repo
stringlengths
2
99
file
stringlengths
13
225
code
stringlengths
0
18.3M
file_length
int64
0
18.3M
avg_line_length
float64
0
1.36M
max_line_length
int64
0
4.26M
extension_type
stringclasses
1 value
scipy
scipy-main/scipy/optimize/tests/test_constraints.py
import pytest import numpy as np from numpy.testing import TestCase, assert_array_equal import scipy.sparse as sps from scipy.optimize._constraints import ( Bounds, LinearConstraint, NonlinearConstraint, PreparedConstraint, new_bounds_to_old, old_bound_to_new, strict_bounds) class TestStrictBounds(TestCase): def test_scalarvalue_unique_enforce_feasibility(self): m = 3 lb = 2 ub = 4 enforce_feasibility = False strict_lb, strict_ub = strict_bounds(lb, ub, enforce_feasibility, m) assert_array_equal(strict_lb, [-np.inf, -np.inf, -np.inf]) assert_array_equal(strict_ub, [np.inf, np.inf, np.inf]) enforce_feasibility = True strict_lb, strict_ub = strict_bounds(lb, ub, enforce_feasibility, m) assert_array_equal(strict_lb, [2, 2, 2]) assert_array_equal(strict_ub, [4, 4, 4]) def test_vectorvalue_unique_enforce_feasibility(self): m = 3 lb = [1, 2, 3] ub = [4, 5, 6] enforce_feasibility = False strict_lb, strict_ub = strict_bounds(lb, ub, enforce_feasibility, m) assert_array_equal(strict_lb, [-np.inf, -np.inf, -np.inf]) assert_array_equal(strict_ub, [np.inf, np.inf, np.inf]) enforce_feasibility = True strict_lb, strict_ub = strict_bounds(lb, ub, enforce_feasibility, m) assert_array_equal(strict_lb, [1, 2, 3]) assert_array_equal(strict_ub, [4, 5, 6]) def test_scalarvalue_vector_enforce_feasibility(self): m = 3 lb = 2 ub = 4 enforce_feasibility = [False, True, False] strict_lb, strict_ub = strict_bounds(lb, ub, enforce_feasibility, m) assert_array_equal(strict_lb, [-np.inf, 2, -np.inf]) assert_array_equal(strict_ub, [np.inf, 4, np.inf]) def test_vectorvalue_vector_enforce_feasibility(self): m = 3 lb = [1, 2, 3] ub = [4, 6, np.inf] enforce_feasibility = [True, False, True] strict_lb, strict_ub = strict_bounds(lb, ub, enforce_feasibility, m) assert_array_equal(strict_lb, [1, -np.inf, 3]) assert_array_equal(strict_ub, [4, np.inf, np.inf]) def test_prepare_constraint_infeasible_x0(): lb = np.array([0, 20, 30]) ub = np.array([0.5, np.inf, 70]) x0 = np.array([1, 2, 3]) enforce_feasibility = np.array([False, True, True], dtype=bool) bounds = Bounds(lb, ub, enforce_feasibility) pytest.raises(ValueError, PreparedConstraint, bounds, x0) pc = PreparedConstraint(Bounds(lb, ub), [1, 2, 3]) assert (pc.violation([1, 2, 3]) > 0).any() assert (pc.violation([0.25, 21, 31]) == 0).all() x0 = np.array([1, 2, 3, 4]) A = np.array([[1, 2, 3, 4], [5, 0, 0, 6], [7, 0, 8, 0]]) enforce_feasibility = np.array([True, True, True], dtype=bool) linear = LinearConstraint(A, -np.inf, 0, enforce_feasibility) pytest.raises(ValueError, PreparedConstraint, linear, x0) pc = PreparedConstraint(LinearConstraint(A, -np.inf, 0), [1, 2, 3, 4]) assert (pc.violation([1, 2, 3, 4]) > 0).any() assert (pc.violation([-10, 2, -10, 4]) == 0).all() def fun(x): return A.dot(x) def jac(x): return A def hess(x, v): return sps.csr_matrix((4, 4)) nonlinear = NonlinearConstraint(fun, -np.inf, 0, jac, hess, enforce_feasibility) pytest.raises(ValueError, PreparedConstraint, nonlinear, x0) pc = PreparedConstraint(nonlinear, [-10, 2, -10, 4]) assert (pc.violation([1, 2, 3, 4]) > 0).any() assert (pc.violation([-10, 2, -10, 4]) == 0).all() def test_violation(): def cons_f(x): return np.array([x[0] ** 2 + x[1], x[0] ** 2 - x[1]]) nlc = NonlinearConstraint(cons_f, [-1, -0.8500], [2, 2]) pc = PreparedConstraint(nlc, [0.5, 1]) assert_array_equal(pc.violation([0.5, 1]), [0., 0.]) np.testing.assert_almost_equal(pc.violation([0.5, 1.2]), [0., 0.1]) np.testing.assert_almost_equal(pc.violation([1.2, 1.2]), [0.64, 0]) np.testing.assert_almost_equal(pc.violation([0.1, -1.2]), [0.19, 0]) np.testing.assert_almost_equal(pc.violation([0.1, 2]), [0.01, 1.14]) def test_new_bounds_to_old(): lb = np.array([-np.inf, 2, 3]) ub = np.array([3, np.inf, 10]) bounds = [(None, 3), (2, None), (3, 10)] assert_array_equal(new_bounds_to_old(lb, ub, 3), bounds) bounds_single_lb = [(-1, 3), (-1, None), (-1, 10)] assert_array_equal(new_bounds_to_old(-1, ub, 3), bounds_single_lb) bounds_no_lb = [(None, 3), (None, None), (None, 10)] assert_array_equal(new_bounds_to_old(-np.inf, ub, 3), bounds_no_lb) bounds_single_ub = [(None, 20), (2, 20), (3, 20)] assert_array_equal(new_bounds_to_old(lb, 20, 3), bounds_single_ub) bounds_no_ub = [(None, None), (2, None), (3, None)] assert_array_equal(new_bounds_to_old(lb, np.inf, 3), bounds_no_ub) bounds_single_both = [(1, 2), (1, 2), (1, 2)] assert_array_equal(new_bounds_to_old(1, 2, 3), bounds_single_both) bounds_no_both = [(None, None), (None, None), (None, None)] assert_array_equal(new_bounds_to_old(-np.inf, np.inf, 3), bounds_no_both) def test_old_bounds_to_new(): bounds = ([1, 2], (None, 3), (-1, None)) lb_true = np.array([1, -np.inf, -1]) ub_true = np.array([2, 3, np.inf]) lb, ub = old_bound_to_new(bounds) assert_array_equal(lb, lb_true) assert_array_equal(ub, ub_true) bounds = [(-np.inf, np.inf), (np.array([1]), np.array([1]))] lb, ub = old_bound_to_new(bounds) assert_array_equal(lb, [-np.inf, 1]) assert_array_equal(ub, [np.inf, 1]) class TestBounds: def test_repr(self): # so that eval works from numpy import array, inf # noqa for args in ( (-1.0, 5.0), (-1.0, np.inf, True), (np.array([1.0, -np.inf]), np.array([2.0, np.inf])), (np.array([1.0, -np.inf]), np.array([2.0, np.inf]), np.array([True, False])), ): bounds = Bounds(*args) bounds2 = eval(repr(Bounds(*args))) assert_array_equal(bounds.lb, bounds2.lb) assert_array_equal(bounds.ub, bounds2.ub) assert_array_equal(bounds.keep_feasible, bounds2.keep_feasible) def test_array(self): # gh13501 b = Bounds(lb=[0.0, 0.0], ub=[1.0, 1.0]) assert isinstance(b.lb, np.ndarray) assert isinstance(b.ub, np.ndarray) def test_defaults(self): b1 = Bounds() b2 = Bounds(np.asarray(-np.inf), np.asarray(np.inf)) assert b1.lb == b2.lb assert b1.ub == b2.ub def test_input_validation(self): message = "Lower and upper bounds must be dense arrays." with pytest.raises(ValueError, match=message): Bounds(sps.coo_array([1, 2]), [1, 2]) with pytest.raises(ValueError, match=message): Bounds([1, 2], sps.coo_array([1, 2])) message = "`keep_feasible` must be a dense array." with pytest.raises(ValueError, match=message): Bounds([1, 2], [1, 2], keep_feasible=sps.coo_array([True, True])) message = "`lb`, `ub`, and `keep_feasible` must be broadcastable." with pytest.raises(ValueError, match=message): Bounds([1, 2], [1, 2, 3]) def test_residual(self): bounds = Bounds(-2, 4) x0 = [-1, 2] np.testing.assert_allclose(bounds.residual(x0), ([1, 4], [5, 2])) class TestLinearConstraint: def test_defaults(self): A = np.eye(4) lc = LinearConstraint(A) lc2 = LinearConstraint(A, -np.inf, np.inf) assert_array_equal(lc.lb, lc2.lb) assert_array_equal(lc.ub, lc2.ub) def test_input_validation(self): A = np.eye(4) message = "`lb`, `ub`, and `keep_feasible` must be broadcastable" with pytest.raises(ValueError, match=message): LinearConstraint(A, [1, 2], [1, 2, 3]) message = "Constraint limits must be dense arrays" with pytest.raises(ValueError, match=message): LinearConstraint(A, sps.coo_array([1, 2]), [2, 3]) with pytest.raises(ValueError, match=message): LinearConstraint(A, [1, 2], sps.coo_array([2, 3])) message = "`keep_feasible` must be a dense array" with pytest.raises(ValueError, match=message): keep_feasible = sps.coo_array([True, True]) LinearConstraint(A, [1, 2], [2, 3], keep_feasible=keep_feasible) A = np.empty((4, 3, 5)) message = "`A` must have exactly two dimensions." with pytest.raises(ValueError, match=message): LinearConstraint(A) def test_residual(self): A = np.eye(2) lc = LinearConstraint(A, -2, 4) x0 = [-1, 2] np.testing.assert_allclose(lc.residual(x0), ([1, 4], [5, 2]))
9,402
35.730469
77
py
scipy
scipy-main/scipy/optimize/tests/test_cobyla.py
import math import numpy as np from numpy.testing import assert_allclose, assert_, assert_array_equal from scipy.optimize import fmin_cobyla, minimize, Bounds class TestCobyla: def setup_method(self): self.x0 = [4.95, 0.66] self.solution = [math.sqrt(25 - (2.0/3)**2), 2.0/3] self.opts = {'disp': False, 'rhobeg': 1, 'tol': 1e-5, 'maxiter': 100} def fun(self, x): return x[0]**2 + abs(x[1])**3 def con1(self, x): return x[0]**2 + x[1]**2 - 25 def con2(self, x): return -self.con1(x) def test_simple(self): # use disp=True as smoke test for gh-8118 x = fmin_cobyla(self.fun, self.x0, [self.con1, self.con2], rhobeg=1, rhoend=1e-5, maxfun=100, disp=True) assert_allclose(x, self.solution, atol=1e-4) def test_minimize_simple(self): class Callback: def __init__(self): self.n_calls = 0 self.last_x = None def __call__(self, x): self.n_calls += 1 self.last_x = x callback = Callback() # Minimize with method='COBYLA' cons = ({'type': 'ineq', 'fun': self.con1}, {'type': 'ineq', 'fun': self.con2}) sol = minimize(self.fun, self.x0, method='cobyla', constraints=cons, callback=callback, options=self.opts) assert_allclose(sol.x, self.solution, atol=1e-4) assert_(sol.success, sol.message) assert_(sol.maxcv < 1e-5, sol) assert_(sol.nfev < 70, sol) assert_(sol.fun < self.fun(self.solution) + 1e-3, sol) assert_(sol.nfev == callback.n_calls, "Callback is not called exactly once for every function eval.") assert_array_equal(sol.x, callback.last_x, "Last design vector sent to the callback is not equal to returned value.") def test_minimize_constraint_violation(self): np.random.seed(1234) pb = np.random.rand(10, 10) spread = np.random.rand(10) def p(w): return pb.dot(w) def f(w): return -(w * spread).sum() def c1(w): return 500 - abs(p(w)).sum() def c2(w): return 5 - abs(p(w).sum()) def c3(w): return 5 - abs(p(w)).max() cons = ({'type': 'ineq', 'fun': c1}, {'type': 'ineq', 'fun': c2}, {'type': 'ineq', 'fun': c3}) w0 = np.zeros((10,)) sol = minimize(f, w0, method='cobyla', constraints=cons, options={'catol': 1e-6}) assert_(sol.maxcv > 1e-6) assert_(not sol.success) def test_vector_constraints(): # test that fmin_cobyla and minimize can take a combination # of constraints, some returning a number and others an array def fun(x): return (x[0] - 1)**2 + (x[1] - 2.5)**2 def fmin(x): return fun(x) - 1 def cons1(x): a = np.array([[1, -2, 2], [-1, -2, 6], [-1, 2, 2]]) return np.array([a[i, 0] * x[0] + a[i, 1] * x[1] + a[i, 2] for i in range(len(a))]) def cons2(x): return x # identity, acts as bounds x > 0 x0 = np.array([2, 0]) cons_list = [fun, cons1, cons2] xsol = [1.4, 1.7] fsol = 0.8 # testing fmin_cobyla sol = fmin_cobyla(fun, x0, cons_list, rhoend=1e-5) assert_allclose(sol, xsol, atol=1e-4) sol = fmin_cobyla(fun, x0, fmin, rhoend=1e-5) assert_allclose(fun(sol), 1, atol=1e-4) # testing minimize constraints = [{'type': 'ineq', 'fun': cons} for cons in cons_list] sol = minimize(fun, x0, constraints=constraints, tol=1e-5) assert_allclose(sol.x, xsol, atol=1e-4) assert_(sol.success, sol.message) assert_allclose(sol.fun, fsol, atol=1e-4) constraints = {'type': 'ineq', 'fun': fmin} sol = minimize(fun, x0, constraints=constraints, tol=1e-5) assert_allclose(sol.fun, 1, atol=1e-4) class TestBounds: # Test cobyla support for bounds (only when used via `minimize`) # Invalid bounds is tested in # test_optimize.TestOptimizeSimple.test_minimize_invalid_bounds def test_basic(self): def f(x): return np.sum(x**2) lb = [-1, None, 1, None, -0.5] ub = [-0.5, -0.5, None, None, -0.5] bounds = [(a, b) for a, b in zip(lb, ub)] # these are converted to Bounds internally res = minimize(f, x0=[1, 2, 3, 4, 5], method='cobyla', bounds=bounds) ref = [-0.5, -0.5, 1, 0, -0.5] assert res.success assert_allclose(res.x, ref, atol=1e-3) def test_unbounded(self): def f(x): return np.sum(x**2) bounds = Bounds([-np.inf, -np.inf], [np.inf, np.inf]) res = minimize(f, x0=[1, 2], method='cobyla', bounds=bounds) assert res.success assert_allclose(res.x, 0, atol=1e-3) bounds = Bounds([1, -np.inf], [np.inf, np.inf]) res = minimize(f, x0=[1, 2], method='cobyla', bounds=bounds) assert res.success assert_allclose(res.x, [1, 0], atol=1e-3)
5,152
30.808642
101
py
scipy
scipy-main/scipy/optimize/tests/test__shgo.py
import logging import sys import numpy import numpy as np import time from multiprocessing import Pool from numpy.testing import assert_allclose, IS_PYPY import pytest from pytest import raises as assert_raises, warns from scipy.optimize import (shgo, Bounds, minimize_scalar, minimize, rosen, rosen_der, rosen_hess, NonlinearConstraint) from scipy.optimize._constraints import new_constraint_to_old from scipy.optimize._shgo import SHGO class StructTestFunction: def __init__(self, bounds, expected_x, expected_fun=None, expected_xl=None, expected_funl=None): self.bounds = bounds self.expected_x = expected_x self.expected_fun = expected_fun self.expected_xl = expected_xl self.expected_funl = expected_funl def wrap_constraints(g): cons = [] if g is not None: if (type(g) is not tuple) and (type(g) is not list): g = (g,) else: pass for g in g: cons.append({'type': 'ineq', 'fun': g}) cons = tuple(cons) else: cons = None return cons class StructTest1(StructTestFunction): def f(self, x): return x[0] ** 2 + x[1] ** 2 def g(x): return -(numpy.sum(x, axis=0) - 6.0) cons = wrap_constraints(g) test1_1 = StructTest1(bounds=[(-1, 6), (-1, 6)], expected_x=[0, 0]) test1_2 = StructTest1(bounds=[(0, 1), (0, 1)], expected_x=[0, 0]) test1_3 = StructTest1(bounds=[(None, None), (None, None)], expected_x=[0, 0]) class StructTest2(StructTestFunction): """ Scalar function with several minima to test all minimiser retrievals """ def f(self, x): return (x - 30) * numpy.sin(x) def g(x): return 58 - numpy.sum(x, axis=0) cons = wrap_constraints(g) test2_1 = StructTest2(bounds=[(0, 60)], expected_x=[1.53567906], expected_fun=-28.44677132, # Important: test that funl return is in the correct # order expected_xl=numpy.array([[1.53567906], [55.01782167], [7.80894889], [48.74797493], [14.07445705], [42.4913859], [20.31743841], [36.28607535], [26.43039605], [30.76371366]]), expected_funl=numpy.array([-28.44677132, -24.99785984, -22.16855376, -18.72136195, -15.89423937, -12.45154942, -9.63133158, -6.20801301, -3.43727232, -0.46353338]) ) test2_2 = StructTest2(bounds=[(0, 4.5)], expected_x=[1.53567906], expected_fun=[-28.44677132], expected_xl=numpy.array([[1.53567906]]), expected_funl=numpy.array([-28.44677132]) ) class StructTest3(StructTestFunction): """ Hock and Schittkowski 18 problem (HS18). Hoch and Schittkowski (1981) http://www.ai7.uni-bayreuth.de/test_problem_coll.pdf Minimize: f = 0.01 * (x_1)**2 + (x_2)**2 Subject to: x_1 * x_2 - 25.0 >= 0, (x_1)**2 + (x_2)**2 - 25.0 >= 0, 2 <= x_1 <= 50, 0 <= x_2 <= 50. Approx. Answer: f([(250)**0.5 , (2.5)**0.5]) = 5.0 """ # amended to test vectorisation of constraints def f(self, x): return 0.01 * (x[0]) ** 2 + (x[1]) ** 2 def g1(x): return x[0] * x[1] - 25.0 def g2(x): return x[0] ** 2 + x[1] ** 2 - 25.0 # g = (g1, g2) # cons = wrap_constraints(g) def g(x): return x[0] * x[1] - 25.0, x[0] ** 2 + x[1] ** 2 - 25.0 # this checks that shgo can be sent new-style constraints __nlc = NonlinearConstraint(g, 0, np.inf) cons = (__nlc,) test3_1 = StructTest3(bounds=[(2, 50), (0, 50)], expected_x=[250 ** 0.5, 2.5 ** 0.5], expected_fun=5.0 ) class StructTest4(StructTestFunction): """ Hock and Schittkowski 11 problem (HS11). Hoch and Schittkowski (1981) NOTE: Did not find in original reference to HS collection, refer to Henderson (2015) problem 7 instead. 02.03.2016 """ def f(self, x): return ((x[0] - 10) ** 2 + 5 * (x[1] - 12) ** 2 + x[2] ** 4 + 3 * (x[3] - 11) ** 2 + 10 * x[4] ** 6 + 7 * x[5] ** 2 + x[ 6] ** 4 - 4 * x[5] * x[6] - 10 * x[5] - 8 * x[6] ) def g1(x): return -(2 * x[0] ** 2 + 3 * x[1] ** 4 + x[2] + 4 * x[3] ** 2 + 5 * x[4] - 127) def g2(x): return -(7 * x[0] + 3 * x[1] + 10 * x[2] ** 2 + x[3] - x[4] - 282.0) def g3(x): return -(23 * x[0] + x[1] ** 2 + 6 * x[5] ** 2 - 8 * x[6] - 196) def g4(x): return -(4 * x[0] ** 2 + x[1] ** 2 - 3 * x[0] * x[1] + 2 * x[2] ** 2 + 5 * x[5] - 11 * x[6]) g = (g1, g2, g3, g4) cons = wrap_constraints(g) test4_1 = StructTest4(bounds=[(-10, 10), ] * 7, expected_x=[2.330499, 1.951372, -0.4775414, 4.365726, -0.6244870, 1.038131, 1.594227], expected_fun=680.6300573 ) class StructTest5(StructTestFunction): def f(self, x): return (-(x[1] + 47.0) * numpy.sin(numpy.sqrt(abs(x[0] / 2.0 + (x[1] + 47.0)))) - x[0] * numpy.sin(numpy.sqrt(abs(x[0] - (x[1] + 47.0)))) ) g = None cons = wrap_constraints(g) test5_1 = StructTest5(bounds=[(-512, 512), (-512, 512)], expected_fun=[-959.64066272085051], expected_x=[512., 404.23180542]) class StructTestLJ(StructTestFunction): """ LennardJones objective function. Used to test symmetry constraints settings. """ def f(self, x, *args): print(f'x = {x}') self.N = args[0] k = int(self.N / 3) s = 0.0 for i in range(k - 1): for j in range(i + 1, k): a = 3 * i b = 3 * j xd = x[a] - x[b] yd = x[a + 1] - x[b + 1] zd = x[a + 2] - x[b + 2] ed = xd * xd + yd * yd + zd * zd ud = ed * ed * ed if ed > 0.0: s += (1.0 / ud - 2.0) / ud return s g = None cons = wrap_constraints(g) N = 6 boundsLJ = list(zip([-4.0] * 6, [4.0] * 6)) testLJ = StructTestLJ(bounds=boundsLJ, expected_fun=[-1.0], expected_x=None, # expected_x=[-2.71247337e-08, # -2.71247337e-08, # -2.50000222e+00, # -2.71247337e-08, # -2.71247337e-08, # -1.50000222e+00] ) class StructTestS(StructTestFunction): def f(self, x): return ((x[0] - 0.5) ** 2 + (x[1] - 0.5) ** 2 + (x[2] - 0.5) ** 2 + (x[3] - 0.5) ** 2) g = None cons = wrap_constraints(g) test_s = StructTestS(bounds=[(0, 2.0), ] * 4, expected_fun=0.0, expected_x=numpy.ones(4) - 0.5 ) class StructTestTable(StructTestFunction): def f(self, x): if x[0] == 3.0 and x[1] == 3.0: return 50 else: return 100 g = None cons = wrap_constraints(g) test_table = StructTestTable(bounds=[(-10, 10), (-10, 10)], expected_fun=[50], expected_x=[3.0, 3.0]) class StructTestInfeasible(StructTestFunction): """ Test function with no feasible domain. """ def f(self, x, *args): return x[0] ** 2 + x[1] ** 2 def g1(x): return x[0] + x[1] - 1 def g2(x): return -(x[0] + x[1] - 1) def g3(x): return -x[0] + x[1] - 1 def g4(x): return -(-x[0] + x[1] - 1) g = (g1, g2, g3, g4) cons = wrap_constraints(g) test_infeasible = StructTestInfeasible(bounds=[(2, 50), (-1, 1)], expected_fun=None, expected_x=None ) @pytest.mark.skip("Not a test") def run_test(test, args=(), test_atol=1e-5, n=100, iters=None, callback=None, minimizer_kwargs=None, options=None, sampling_method='sobol', workers=1): res = shgo(test.f, test.bounds, args=args, constraints=test.cons, n=n, iters=iters, callback=callback, minimizer_kwargs=minimizer_kwargs, options=options, sampling_method=sampling_method, workers=workers) print(f'res = {res}') logging.info(f'res = {res}') if test.expected_x is not None: numpy.testing.assert_allclose(res.x, test.expected_x, rtol=test_atol, atol=test_atol) # (Optional tests) if test.expected_fun is not None: numpy.testing.assert_allclose(res.fun, test.expected_fun, atol=test_atol) if test.expected_xl is not None: numpy.testing.assert_allclose(res.xl, test.expected_xl, atol=test_atol) if test.expected_funl is not None: numpy.testing.assert_allclose(res.funl, test.expected_funl, atol=test_atol) return # Base test functions: class TestShgoSobolTestFunctions: """ Global optimisation tests with Sobol sampling: """ # Sobol algorithm def test_f1_1_sobol(self): """Multivariate test function 1: x[0]**2 + x[1]**2 with bounds=[(-1, 6), (-1, 6)]""" run_test(test1_1) def test_f1_2_sobol(self): """Multivariate test function 1: x[0]**2 + x[1]**2 with bounds=[(0, 1), (0, 1)]""" run_test(test1_2) def test_f1_3_sobol(self): """Multivariate test function 1: x[0]**2 + x[1]**2 with bounds=[(None, None),(None, None)]""" options = {'disp': True} run_test(test1_3, options=options) def test_f2_1_sobol(self): """Univariate test function on f(x) = (x - 30) * sin(x) with bounds=[(0, 60)]""" run_test(test2_1) def test_f2_2_sobol(self): """Univariate test function on f(x) = (x - 30) * sin(x) bounds=[(0, 4.5)]""" run_test(test2_2) def test_f3_sobol(self): """NLP: Hock and Schittkowski problem 18""" run_test(test3_1) @pytest.mark.slow def test_f4_sobol(self): """NLP: (High dimensional) Hock and Schittkowski 11 problem (HS11)""" options = {'infty_constraints': False} # run_test(test4_1, n=990, options=options) run_test(test4_1, n=990 * 2, options=options) def test_f5_1_sobol(self): """NLP: Eggholder, multimodal""" # run_test(test5_1, n=30) run_test(test5_1, n=60) def test_f5_2_sobol(self): """NLP: Eggholder, multimodal""" # run_test(test5_1, n=60, iters=5) run_test(test5_1, n=60, iters=5) # def test_t911(self): # """1D tabletop function""" # run_test(test11_1) class TestShgoSimplicialTestFunctions: """ Global optimisation tests with Simplicial sampling: """ def test_f1_1_simplicial(self): """Multivariate test function 1: x[0]**2 + x[1]**2 with bounds=[(-1, 6), (-1, 6)]""" run_test(test1_1, n=1, sampling_method='simplicial') def test_f1_2_simplicial(self): """Multivariate test function 1: x[0]**2 + x[1]**2 with bounds=[(0, 1), (0, 1)]""" run_test(test1_2, n=1, sampling_method='simplicial') def test_f1_3_simplicial(self): """Multivariate test function 1: x[0]**2 + x[1]**2 with bounds=[(None, None),(None, None)]""" run_test(test1_3, n=5, sampling_method='simplicial') def test_f2_1_simplicial(self): """Univariate test function on f(x) = (x - 30) * sin(x) with bounds=[(0, 60)]""" options = {'minimize_every_iter': False} run_test(test2_1, n=200, iters=7, options=options, sampling_method='simplicial') def test_f2_2_simplicial(self): """Univariate test function on f(x) = (x - 30) * sin(x) bounds=[(0, 4.5)]""" run_test(test2_2, n=1, sampling_method='simplicial') def test_f3_simplicial(self): """NLP: Hock and Schittkowski problem 18""" run_test(test3_1, n=1, sampling_method='simplicial') @pytest.mark.slow def test_f4_simplicial(self): """NLP: (High dimensional) Hock and Schittkowski 11 problem (HS11)""" run_test(test4_1, n=1, sampling_method='simplicial') def test_lj_symmetry_old(self): """LJ: Symmetry-constrained test function""" options = {'symmetry': True, 'disp': True} args = (6,) # Number of atoms run_test(testLJ, args=args, n=300, options=options, iters=1, sampling_method='simplicial') def test_f5_1_lj_symmetry(self): """LJ: Symmetry constrained test function""" options = {'symmetry': [0, ] * 6, 'disp': True} args = (6,) # No. of atoms run_test(testLJ, args=args, n=300, options=options, iters=1, sampling_method='simplicial') def test_f5_2_cons_symmetry(self): """Symmetry constrained test function""" options = {'symmetry': [0, 0], 'disp': True} run_test(test1_1, n=200, options=options, iters=1, sampling_method='simplicial') def test_f5_3_cons_symmetry(self): """Assymmetrically constrained test function""" options = {'symmetry': [0, 0, 0, 3], 'disp': True} run_test(test_s, n=10000, options=options, iters=1, sampling_method='simplicial') @pytest.mark.skip("Not a test") def test_f0_min_variance(self): """Return a minimum on a perfectly symmetric problem, based on gh10429""" avg = 0.5 # Given average value of x cons = {'type': 'eq', 'fun': lambda x: numpy.mean(x) - avg} # Minimize the variance of x under the given constraint res = shgo(numpy.var, bounds=6 * [(0, 1)], constraints=cons) assert res.success assert_allclose(res.fun, 0, atol=1e-15) assert_allclose(res.x, 0.5) @pytest.mark.skip("Not a test") def test_f0_min_variance_1D(self): """Return a minimum on a perfectly symmetric 1D problem, based on gh10538""" def fun(x): return x * (x - 1.0) * (x - 0.5) bounds = [(0, 1)] res = shgo(fun, bounds=bounds) ref = minimize_scalar(fun, bounds=bounds[0]) assert res.success assert_allclose(res.fun, ref.fun) assert_allclose(res.x, ref.x, rtol=1e-6) # Argument test functions class TestShgoArguments: def test_1_1_simpl_iter(self): """Iterative simplicial sampling on TestFunction 1 (multivariate)""" run_test(test1_2, n=None, iters=2, sampling_method='simplicial') def test_1_2_simpl_iter(self): """Iterative simplicial on TestFunction 2 (univariate)""" options = {'minimize_every_iter': False} run_test(test2_1, n=None, iters=9, options=options, sampling_method='simplicial') def test_2_1_sobol_iter(self): """Iterative Sobol sampling on TestFunction 1 (multivariate)""" run_test(test1_2, n=None, iters=1, sampling_method='sobol') def test_2_2_sobol_iter(self): """Iterative Sobol sampling on TestFunction 2 (univariate)""" res = shgo(test2_1.f, test2_1.bounds, constraints=test2_1.cons, n=None, iters=1, sampling_method='sobol') numpy.testing.assert_allclose(res.x, test2_1.expected_x, rtol=1e-5, atol=1e-5) numpy.testing.assert_allclose(res.fun, test2_1.expected_fun, atol=1e-5) def test_3_1_disp_simplicial(self): """Iterative sampling on TestFunction 1 and 2 (multi and univariate) """ def callback_func(x): print("Local minimization callback test") for test in [test1_1, test2_1]: shgo(test.f, test.bounds, iters=1, sampling_method='simplicial', callback=callback_func, options={'disp': True}) shgo(test.f, test.bounds, n=1, sampling_method='simplicial', callback=callback_func, options={'disp': True}) def test_3_2_disp_sobol(self): """Iterative sampling on TestFunction 1 and 2 (multi and univariate)""" def callback_func(x): print("Local minimization callback test") for test in [test1_1, test2_1]: shgo(test.f, test.bounds, iters=1, sampling_method='sobol', callback=callback_func, options={'disp': True}) shgo(test.f, test.bounds, n=1, sampling_method='simplicial', callback=callback_func, options={'disp': True}) def test_args_gh14589(self): """Using `args` used to cause `shgo` to fail; see #14589, #15986, #16506""" res = shgo(func=lambda x, y, z: x * z + y, bounds=[(0, 3)], args=(1, 2) ) ref = shgo(func=lambda x: 2 * x + 1, bounds=[(0, 3)]) assert_allclose(res.fun, ref.fun) assert_allclose(res.x, ref.x) @pytest.mark.slow def test_4_1_known_f_min(self): """Test known function minima stopping criteria""" # Specify known function value options = {'f_min': test4_1.expected_fun, 'f_tol': 1e-6, 'minimize_every_iter': True} # TODO: Make default n higher for faster tests run_test(test4_1, n=None, test_atol=1e-5, options=options, sampling_method='simplicial') @pytest.mark.slow def test_4_2_known_f_min(self): """Test Global mode limiting local evalutions""" options = { # Specify known function value 'f_min': test4_1.expected_fun, 'f_tol': 1e-6, # Specify number of local iterations to perform 'minimize_every_iter': True, 'local_iter': 1} run_test(test4_1, n=None, test_atol=1e-5, options=options, sampling_method='simplicial') def test_4_4_known_f_min(self): """Test Global mode limiting local evaluations for 1D funcs""" options = { # Specify known function value 'f_min': test2_1.expected_fun, 'f_tol': 1e-6, # Specify number of local iterations to perform+ 'minimize_every_iter': True, 'local_iter': 1, 'infty_constraints': False} res = shgo(test2_1.f, test2_1.bounds, constraints=test2_1.cons, n=None, iters=None, options=options, sampling_method='sobol') numpy.testing.assert_allclose(res.x, test2_1.expected_x, rtol=1e-5, atol=1e-5) def test_5_1_simplicial_argless(self): """Test Default simplicial sampling settings on TestFunction 1""" res = shgo(test1_1.f, test1_1.bounds, constraints=test1_1.cons) numpy.testing.assert_allclose(res.x, test1_1.expected_x, rtol=1e-5, atol=1e-5) def test_5_2_sobol_argless(self): """Test Default sobol sampling settings on TestFunction 1""" res = shgo(test1_1.f, test1_1.bounds, constraints=test1_1.cons, sampling_method='sobol') numpy.testing.assert_allclose(res.x, test1_1.expected_x, rtol=1e-5, atol=1e-5) def test_6_1_simplicial_max_iter(self): """Test that maximum iteration option works on TestFunction 3""" options = {'max_iter': 2} res = shgo(test3_1.f, test3_1.bounds, constraints=test3_1.cons, options=options, sampling_method='simplicial') numpy.testing.assert_allclose(res.x, test3_1.expected_x, rtol=1e-5, atol=1e-5) numpy.testing.assert_allclose(res.fun, test3_1.expected_fun, atol=1e-5) def test_6_2_simplicial_min_iter(self): """Test that maximum iteration option works on TestFunction 3""" options = {'min_iter': 2} res = shgo(test3_1.f, test3_1.bounds, constraints=test3_1.cons, options=options, sampling_method='simplicial') numpy.testing.assert_allclose(res.x, test3_1.expected_x, rtol=1e-5, atol=1e-5) numpy.testing.assert_allclose(res.fun, test3_1.expected_fun, atol=1e-5) def test_7_1_minkwargs(self): """Test the minimizer_kwargs arguments for solvers with constraints""" # Test solvers for solver in ['COBYLA', 'SLSQP']: # Note that passing global constraints to SLSQP is tested in other # unittests which run test4_1 normally minimizer_kwargs = {'method': solver, 'constraints': test3_1.cons} run_test(test3_1, n=100, test_atol=1e-3, minimizer_kwargs=minimizer_kwargs, sampling_method='sobol') def test_7_2_minkwargs(self): """Test the minimizer_kwargs default inits""" minimizer_kwargs = {'ftol': 1e-5} options = {'disp': True} # For coverage purposes SHGO(test3_1.f, test3_1.bounds, constraints=test3_1.cons[0], minimizer_kwargs=minimizer_kwargs, options=options) def test_7_3_minkwargs(self): """Test minimizer_kwargs arguments for solvers without constraints""" for solver in ['Nelder-Mead', 'Powell', 'CG', 'BFGS', 'Newton-CG', 'L-BFGS-B', 'TNC', 'dogleg', 'trust-ncg', 'trust-exact', 'trust-krylov']: def jac(x): return numpy.array([2 * x[0], 2 * x[1]]).T def hess(x): return numpy.array([[2, 0], [0, 2]]) minimizer_kwargs = {'method': solver, 'jac': jac, 'hess': hess} logging.info(f"Solver = {solver}") logging.info("=" * 100) run_test(test1_1, n=100, test_atol=1e-3, minimizer_kwargs=minimizer_kwargs, sampling_method='sobol') def test_8_homology_group_diff(self): options = {'minhgrd': 1, 'minimize_every_iter': True} run_test(test1_1, n=None, iters=None, options=options, sampling_method='simplicial') def test_9_cons_g(self): """Test single function constraint passing""" SHGO(test3_1.f, test3_1.bounds, constraints=test3_1.cons[0]) @pytest.mark.xfail(IS_PYPY and sys.platform == 'win32', reason="Failing and fix in PyPy not planned (see gh-18632)") def test_10_finite_time(self): """Test single function constraint passing""" options = {'maxtime': 1e-15} def f(x): time.sleep(1e-14) return 0.0 res = shgo(f, test1_1.bounds, iters=5, options=options) # Assert that only 1 rather than 5 requested iterations ran: assert res.nit == 1 def test_11_f_min_0(self): """Test to cover the case where f_lowest == 0""" options = {'f_min': 0.0, 'disp': True} res = shgo(test1_2.f, test1_2.bounds, n=10, iters=None, options=options, sampling_method='sobol') numpy.testing.assert_equal(0, res.x[0]) numpy.testing.assert_equal(0, res.x[1]) # @nottest @pytest.mark.skip(reason="no way of currently testing this") def test_12_sobol_inf_cons(self): """Test to cover the case where f_lowest == 0""" # TODO: This test doesn't cover anything new, it is unknown what the # original test was intended for as it was never complete. Delete or # replace in the future. options = {'maxtime': 1e-15, 'f_min': 0.0} res = shgo(test1_2.f, test1_2.bounds, n=1, iters=None, options=options, sampling_method='sobol') numpy.testing.assert_equal(0.0, res.fun) def test_13_high_sobol(self): """Test init of high-dimensional sobol sequences""" def f(x): return 0 bounds = [(None, None), ] * 41 SHGOc = SHGO(f, bounds, sampling_method='sobol') # SHGOc.sobol_points(2, 50) SHGOc.sampling_function(2, 50) def test_14_local_iter(self): """Test limited local iterations for a pseudo-global mode""" options = {'local_iter': 4} run_test(test5_1, n=60, options=options) def test_15_min_every_iter(self): """Test minimize every iter options and cover function cache""" options = {'minimize_every_iter': True} run_test(test1_1, n=1, iters=7, options=options, sampling_method='sobol') def test_16_disp_bounds_minimizer(self): """Test disp=True with minimizers that do not support bounds """ options = {'disp': True} minimizer_kwargs = {'method': 'nelder-mead'} run_test(test1_2, sampling_method='simplicial', options=options, minimizer_kwargs=minimizer_kwargs) def test_17_custom_sampling(self): """Test the functionality to add custom sampling methods to shgo""" def sample(n, d): return numpy.random.uniform(size=(n, d)) run_test(test1_1, n=30, sampling_method=sample) def test_18_bounds_class(self): # test that new and old bounds yield same result def f(x): return numpy.square(x).sum() lb = [-6., 1., -5.] ub = [-1., 3., 5.] bounds_old = list(zip(lb, ub)) bounds_new = Bounds(lb, ub) res_old_bounds = shgo(f, bounds_old) res_new_bounds = shgo(f, bounds_new) assert res_new_bounds.nfev == res_old_bounds.nfev assert res_new_bounds.message == res_old_bounds.message assert res_new_bounds.success == res_old_bounds.success x_opt = numpy.array([-1., 1., 0.]) numpy.testing.assert_allclose(res_new_bounds.x, x_opt) numpy.testing.assert_allclose(res_new_bounds.x, res_old_bounds.x) def test_19_parallelization(self): """Test the functionality to add custom sampling methods to shgo""" with Pool(2) as p: run_test(test1_1, n=30, workers=p.map) # Constrained run_test(test1_1, n=30, workers=map) # Constrained with Pool(2) as p: run_test(test_s, n=30, workers=p.map) # Unconstrained run_test(test_s, n=30, workers=map) # Unconstrained def test_20_constrained_args(self): """Test that constraints can be passed to arguments""" def eggholder(x): return (-(x[1] + 47.0) * numpy.sin(numpy.sqrt(abs(x[0] / 2.0 + (x[1] + 47.0)))) - x[0] * numpy.sin(numpy.sqrt(abs(x[0] - (x[1] + 47.0)))) ) def f(x): # (cattle-feed) return 24.55 * x[0] + 26.75 * x[1] + 39 * x[2] + 40.50 * x[3] bounds = [(0, 1.0), ] * 4 def g1_modified(x, i): return i * 2.3 * x[0] + i * 5.6 * x[1] + 11.1 * x[2] + 1.3 * x[ 3] - 5 # >=0 def g2(x): return (12 * x[0] + 11.9 * x[1] + 41.8 * x[2] + 52.1 * x[3] - 21 - 1.645 * numpy.sqrt(0.28 * x[0] ** 2 + 0.19 * x[1] ** 2 + 20.5 * x[2] ** 2 + 0.62 * x[3] ** 2) ) # >=0 def h1(x): return x[0] + x[1] + x[2] + x[3] - 1 # == 0 cons = ({'type': 'ineq', 'fun': g1_modified, "args": (0,)}, {'type': 'ineq', 'fun': g2}, {'type': 'eq', 'fun': h1}) shgo(f, bounds, n=300, iters=1, constraints=cons) # using constrain with arguments AND sampling method sobol shgo(f, bounds, n=300, iters=1, constraints=cons, sampling_method='sobol') def test_21_1_jac_true(self): """Test that shgo can handle objective functions that return the gradient alongside the objective value. Fixes gh-13547""" # previous def func(x): return numpy.sum(numpy.power(x, 2)), 2 * x shgo( func, bounds=[[-1, 1], [1, 2]], n=100, iters=5, sampling_method="sobol", minimizer_kwargs={'method': 'SLSQP', 'jac': True} ) # new def func(x): return numpy.sum(x ** 2), 2 * x bounds = [[-1, 1], [1, 2], [-1, 1], [1, 2], [0, 3]] res = shgo(func, bounds=bounds, sampling_method="sobol", minimizer_kwargs={'method': 'SLSQP', 'jac': True}) ref = minimize(func, x0=[1, 1, 1, 1, 1], bounds=bounds, jac=True) assert res.success assert_allclose(res.fun, ref.fun) assert_allclose(res.x, ref.x, atol=1e-15) @pytest.mark.parametrize('derivative', ['jac', 'hess', 'hessp']) def test_21_2_derivative_options(self, derivative): """shgo used to raise an error when passing `options` with 'jac' # see gh-12963. check that this is resolved """ def objective(x): return 3 * x[0] * x[0] + 2 * x[0] + 5 def gradient(x): return 6 * x[0] + 2 def hess(x): return 6 def hessp(x, p): return 6 * p derivative_funcs = {'jac': gradient, 'hess': hess, 'hessp': hessp} options = {derivative: derivative_funcs[derivative]} minimizer_kwargs = {'method': 'trust-constr'} bounds = [(-100, 100)] res = shgo(objective, bounds, minimizer_kwargs=minimizer_kwargs, options=options) ref = minimize(objective, x0=[0], bounds=bounds, **minimizer_kwargs, **options) assert res.success numpy.testing.assert_allclose(res.fun, ref.fun) numpy.testing.assert_allclose(res.x, ref.x) def test_21_3_hess_options_rosen(self): """Ensure the Hessian gets passed correctly to the local minimizer routine. Previous report gh-14533. """ bounds = [(0, 1.6), (0, 1.6), (0, 1.4), (0, 1.4), (0, 1.4)] options = {'jac': rosen_der, 'hess': rosen_hess} minimizer_kwargs = {'method': 'Newton-CG'} res = shgo(rosen, bounds, minimizer_kwargs=minimizer_kwargs, options=options) ref = minimize(rosen, numpy.zeros(5), method='Newton-CG', **options) assert res.success assert_allclose(res.fun, ref.fun) assert_allclose(res.x, ref.x, atol=1e-15) def test_21_arg_tuple_sobol(self): """shgo used to raise an error when passing `args` with Sobol sampling # see gh-12114. check that this is resolved""" def fun(x, k): return x[0] ** k constraints = ({'type': 'ineq', 'fun': lambda x: x[0] - 1}) bounds = [(0, 10)] res = shgo(fun, bounds, args=(1,), constraints=constraints, sampling_method='sobol') ref = minimize(fun, numpy.zeros(1), bounds=bounds, args=(1,), constraints=constraints) assert res.success assert_allclose(res.fun, ref.fun) assert_allclose(res.x, ref.x) # Failure test functions class TestShgoFailures: def test_1_maxiter(self): """Test failure on insufficient iterations""" options = {'maxiter': 2} res = shgo(test4_1.f, test4_1.bounds, n=2, iters=None, options=options, sampling_method='sobol') numpy.testing.assert_equal(False, res.success) # numpy.testing.assert_equal(4, res.nfev) numpy.testing.assert_equal(4, res.tnev) def test_2_sampling(self): """Rejection of unknown sampling method""" assert_raises(ValueError, shgo, test1_1.f, test1_1.bounds, sampling_method='not_Sobol') def test_3_1_no_min_pool_sobol(self): """Check that the routine stops when no minimiser is found after maximum specified function evaluations""" options = {'maxfev': 10, # 'maxev': 10, 'disp': True} res = shgo(test_table.f, test_table.bounds, n=3, options=options, sampling_method='sobol') numpy.testing.assert_equal(False, res.success) # numpy.testing.assert_equal(9, res.nfev) numpy.testing.assert_equal(12, res.nfev) def test_3_2_no_min_pool_simplicial(self): """Check that the routine stops when no minimiser is found after maximum specified sampling evaluations""" options = {'maxev': 10, 'disp': True} res = shgo(test_table.f, test_table.bounds, n=3, options=options, sampling_method='simplicial') numpy.testing.assert_equal(False, res.success) def test_4_1_bound_err(self): """Specified bounds ub > lb""" bounds = [(6, 3), (3, 5)] assert_raises(ValueError, shgo, test1_1.f, bounds) def test_4_2_bound_err(self): """Specified bounds are of the form (lb, ub)""" bounds = [(3, 5, 5), (3, 5)] assert_raises(ValueError, shgo, test1_1.f, bounds) def test_5_1_1_infeasible_sobol(self): """Ensures the algorithm terminates on infeasible problems after maxev is exceeded. Use infty constraints option""" options = {'maxev': 100, 'disp': True} res = shgo(test_infeasible.f, test_infeasible.bounds, constraints=test_infeasible.cons, n=100, options=options, sampling_method='sobol') numpy.testing.assert_equal(False, res.success) def test_5_1_2_infeasible_sobol(self): """Ensures the algorithm terminates on infeasible problems after maxev is exceeded. Do not use infty constraints option""" options = {'maxev': 100, 'disp': True, 'infty_constraints': False} res = shgo(test_infeasible.f, test_infeasible.bounds, constraints=test_infeasible.cons, n=100, options=options, sampling_method='sobol') numpy.testing.assert_equal(False, res.success) def test_5_2_infeasible_simplicial(self): """Ensures the algorithm terminates on infeasible problems after maxev is exceeded.""" options = {'maxev': 1000, 'disp': False} res = shgo(test_infeasible.f, test_infeasible.bounds, constraints=test_infeasible.cons, n=100, options=options, sampling_method='simplicial') numpy.testing.assert_equal(False, res.success) def test_6_1_lower_known_f_min(self): """Test Global mode limiting local evaluations with f* too high""" options = { # Specify known function value 'f_min': test2_1.expected_fun + 2.0, 'f_tol': 1e-6, # Specify number of local iterations to perform+ 'minimize_every_iter': True, 'local_iter': 1, 'infty_constraints': False} args = (test2_1.f, test2_1.bounds) kwargs = {'constraints': test2_1.cons, 'n': None, 'iters': None, 'options': options, 'sampling_method': 'sobol' } warns(UserWarning, shgo, *args, **kwargs) def test(self): from scipy.optimize import rosen, shgo bounds = [(0, 2), (0, 2), (0, 2), (0, 2), (0, 2)] def fun(x): fun.nfev += 1 return rosen(x) fun.nfev = 0 result = shgo(fun, bounds) print(result.x, result.fun, fun.nfev) # 50 # Returns class TestShgoReturns: def test_1_nfev_simplicial(self): bounds = [(0, 2), (0, 2), (0, 2), (0, 2), (0, 2)] def fun(x): fun.nfev += 1 return rosen(x) fun.nfev = 0 result = shgo(fun, bounds) numpy.testing.assert_equal(fun.nfev, result.nfev) def test_1_nfev_sobol(self): bounds = [(0, 2), (0, 2), (0, 2), (0, 2), (0, 2)] def fun(x): fun.nfev += 1 return rosen(x) fun.nfev = 0 result = shgo(fun, bounds, sampling_method='sobol') numpy.testing.assert_equal(fun.nfev, result.nfev) def test_vector_constraint(): # gh15514 def quad(x): x = np.asarray(x) return [np.sum(x ** 2)] nlc = NonlinearConstraint(quad, [2.2], [3]) oldc = new_constraint_to_old(nlc, np.array([1.0, 1.0])) res = shgo(rosen, [(0, 10), (0, 10)], constraints=oldc, sampling_method='sobol') assert np.all(np.sum((res.x)**2) >= 2.2) assert np.all(np.sum((res.x) ** 2) <= 3.0) assert res.success @pytest.mark.filterwarnings("ignore:delta_grad") def test_trust_constr(): def quad(x): x = np.asarray(x) return [np.sum(x ** 2)] nlc = NonlinearConstraint(quad, [2.6], [3]) minimizer_kwargs = {'method': 'trust-constr'} # note that we don't supply the constraints in minimizer_kwargs, # so if the final result obeys the constraints we know that shgo # passed them on to 'trust-constr' res = shgo( rosen, [(0, 10), (0, 10)], constraints=nlc, sampling_method='sobol', minimizer_kwargs=minimizer_kwargs ) assert np.all(np.sum((res.x)**2) >= 2.6) assert np.all(np.sum((res.x) ** 2) <= 3.0) assert res.success def test_equality_constraints(): # gh16260 bounds = [(0.9, 4.0)] * 2 # Constrain probabilities to 0 and 1. def faulty(x): return x[0] + x[1] nlc = NonlinearConstraint(faulty, 3.9, 3.9) res = shgo(rosen, bounds=bounds, constraints=nlc) assert_allclose(np.sum(res.x), 3.9) def faulty(x): return x[0] + x[1] - 3.9 constraints = {'type': 'eq', 'fun': faulty} res = shgo(rosen, bounds=bounds, constraints=constraints) assert_allclose(np.sum(res.x), 3.9) bounds = [(0, 1.0)] * 4 # sum of variable should equal 1. def faulty(x): return x[0] + x[1] + x[2] + x[3] - 1 # options = {'minimize_every_iter': True, 'local_iter':10} constraints = {'type': 'eq', 'fun': faulty} res = shgo( lambda x: - np.prod(x), bounds=bounds, constraints=constraints, sampling_method='sobol' ) assert_allclose(np.sum(res.x), 1.0) def test_gh16971(): def cons(x): return np.sum(x**2) - 0 c = {'fun': cons, 'type': 'ineq'} minimizer_kwargs = { 'method': 'COBYLA', 'options': {'rhobeg': 5, 'tol': 5e-1, 'catol': 0.05} } s = SHGO( rosen, [(0, 10)]*2, constraints=c, minimizer_kwargs=minimizer_kwargs ) assert s.minimizer_kwargs['method'].lower() == 'cobyla' assert s.minimizer_kwargs['options']['catol'] == 0.05
40,298
33.740517
84
py
scipy
scipy-main/scipy/optimize/tests/test_linesearch.py
""" Tests for line search routines """ from numpy.testing import (assert_equal, assert_array_almost_equal, assert_array_almost_equal_nulp, assert_warns, suppress_warnings) import scipy.optimize._linesearch as ls from scipy.optimize._linesearch import LineSearchWarning import numpy as np def assert_wolfe(s, phi, derphi, c1=1e-4, c2=0.9, err_msg=""): """ Check that strong Wolfe conditions apply """ phi1 = phi(s) phi0 = phi(0) derphi0 = derphi(0) derphi1 = derphi(s) msg = "s = {}; phi(0) = {}; phi(s) = {}; phi'(0) = {}; phi'(s) = {}; {}".format( s, phi0, phi1, derphi0, derphi1, err_msg) assert phi1 <= phi0 + c1*s*derphi0, "Wolfe 1 failed: " + msg assert abs(derphi1) <= abs(c2*derphi0), "Wolfe 2 failed: " + msg def assert_armijo(s, phi, c1=1e-4, err_msg=""): """ Check that Armijo condition applies """ phi1 = phi(s) phi0 = phi(0) msg = f"s = {s}; phi(0) = {phi0}; phi(s) = {phi1}; {err_msg}" assert phi1 <= (1 - c1*s)*phi0, msg def assert_line_wolfe(x, p, s, f, fprime, **kw): assert_wolfe(s, phi=lambda sp: f(x + p*sp), derphi=lambda sp: np.dot(fprime(x + p*sp), p), **kw) def assert_line_armijo(x, p, s, f, **kw): assert_armijo(s, phi=lambda sp: f(x + p*sp), **kw) def assert_fp_equal(x, y, err_msg="", nulp=50): """Assert two arrays are equal, up to some floating-point rounding error""" try: assert_array_almost_equal_nulp(x, y, nulp) except AssertionError as e: raise AssertionError(f"{e}\n{err_msg}") from e class TestLineSearch: # -- scalar functions; must have dphi(0.) < 0 def _scalar_func_1(self, s): # skip name check self.fcount += 1 p = -s - s**3 + s**4 dp = -1 - 3*s**2 + 4*s**3 return p, dp def _scalar_func_2(self, s): # skip name check self.fcount += 1 p = np.exp(-4*s) + s**2 dp = -4*np.exp(-4*s) + 2*s return p, dp def _scalar_func_3(self, s): # skip name check self.fcount += 1 p = -np.sin(10*s) dp = -10*np.cos(10*s) return p, dp # -- n-d functions def _line_func_1(self, x): # skip name check self.fcount += 1 f = np.dot(x, x) df = 2*x return f, df def _line_func_2(self, x): # skip name check self.fcount += 1 f = np.dot(x, np.dot(self.A, x)) + 1 df = np.dot(self.A + self.A.T, x) return f, df # -- def setup_method(self): self.scalar_funcs = [] self.line_funcs = [] self.N = 20 self.fcount = 0 def bind_index(func, idx): # Remember Python's closure semantics! return lambda *a, **kw: func(*a, **kw)[idx] for name in sorted(dir(self)): if name.startswith('_scalar_func_'): value = getattr(self, name) self.scalar_funcs.append( (name, bind_index(value, 0), bind_index(value, 1))) elif name.startswith('_line_func_'): value = getattr(self, name) self.line_funcs.append( (name, bind_index(value, 0), bind_index(value, 1))) np.random.seed(1234) self.A = np.random.randn(self.N, self.N) def scalar_iter(self): for name, phi, derphi in self.scalar_funcs: for old_phi0 in np.random.randn(3): yield name, phi, derphi, old_phi0 def line_iter(self): for name, f, fprime in self.line_funcs: k = 0 while k < 9: x = np.random.randn(self.N) p = np.random.randn(self.N) if np.dot(p, fprime(x)) >= 0: # always pick a descent direction continue k += 1 old_fv = float(np.random.randn()) yield name, f, fprime, x, p, old_fv # -- Generic scalar searches def test_scalar_search_wolfe1(self): c = 0 for name, phi, derphi, old_phi0 in self.scalar_iter(): c += 1 s, phi1, phi0 = ls.scalar_search_wolfe1(phi, derphi, phi(0), old_phi0, derphi(0)) assert_fp_equal(phi0, phi(0), name) assert_fp_equal(phi1, phi(s), name) assert_wolfe(s, phi, derphi, err_msg=name) assert c > 3 # check that the iterator really works... def test_scalar_search_wolfe2(self): for name, phi, derphi, old_phi0 in self.scalar_iter(): s, phi1, phi0, derphi1 = ls.scalar_search_wolfe2( phi, derphi, phi(0), old_phi0, derphi(0)) assert_fp_equal(phi0, phi(0), name) assert_fp_equal(phi1, phi(s), name) if derphi1 is not None: assert_fp_equal(derphi1, derphi(s), name) assert_wolfe(s, phi, derphi, err_msg=f"{name} {old_phi0:g}") def test_scalar_search_wolfe2_with_low_amax(self): def phi(alpha): return (alpha - 5) ** 2 def derphi(alpha): return 2 * (alpha - 5) s, _, _, _ = assert_warns(LineSearchWarning, ls.scalar_search_wolfe2, phi, derphi, amax=0.001) assert s is None def test_scalar_search_wolfe2_regression(self): # Regression test for gh-12157 # This phi has its minimum at alpha=4/3 ~ 1.333. def phi(alpha): if alpha < 1: return - 3*np.pi/2 * (alpha - 1) else: return np.cos(3*np.pi/2 * alpha - np.pi) def derphi(alpha): if alpha < 1: return - 3*np.pi/2 else: return - 3*np.pi/2 * np.sin(3*np.pi/2 * alpha - np.pi) s, _, _, _ = ls.scalar_search_wolfe2(phi, derphi) # Without the fix in gh-13073, the scalar_search_wolfe2 # returned s=2.0 instead. assert s < 1.5 def test_scalar_search_armijo(self): for name, phi, derphi, old_phi0 in self.scalar_iter(): s, phi1 = ls.scalar_search_armijo(phi, phi(0), derphi(0)) assert_fp_equal(phi1, phi(s), name) assert_armijo(s, phi, err_msg=f"{name} {old_phi0:g}") # -- Generic line searches def test_line_search_wolfe1(self): c = 0 smax = 100 for name, f, fprime, x, p, old_f in self.line_iter(): f0 = f(x) g0 = fprime(x) self.fcount = 0 s, fc, gc, fv, ofv, gv = ls.line_search_wolfe1(f, fprime, x, p, g0, f0, old_f, amax=smax) assert_equal(self.fcount, fc+gc) assert_fp_equal(ofv, f(x)) if s is None: continue assert_fp_equal(fv, f(x + s*p)) assert_array_almost_equal(gv, fprime(x + s*p), decimal=14) if s < smax: c += 1 assert_line_wolfe(x, p, s, f, fprime, err_msg=name) assert c > 3 # check that the iterator really works... def test_line_search_wolfe2(self): c = 0 smax = 512 for name, f, fprime, x, p, old_f in self.line_iter(): f0 = f(x) g0 = fprime(x) self.fcount = 0 with suppress_warnings() as sup: sup.filter(LineSearchWarning, "The line search algorithm could not find a solution") sup.filter(LineSearchWarning, "The line search algorithm did not converge") s, fc, gc, fv, ofv, gv = ls.line_search_wolfe2(f, fprime, x, p, g0, f0, old_f, amax=smax) assert_equal(self.fcount, fc+gc) assert_fp_equal(ofv, f(x)) assert_fp_equal(fv, f(x + s*p)) if gv is not None: assert_array_almost_equal(gv, fprime(x + s*p), decimal=14) if s < smax: c += 1 assert_line_wolfe(x, p, s, f, fprime, err_msg=name) assert c > 3 # check that the iterator really works... def test_line_search_wolfe2_bounds(self): # See gh-7475 # For this f and p, starting at a point on axis 0, the strong Wolfe # condition 2 is met if and only if the step length s satisfies # |x + s| <= c2 * |x| def f(x): return np.dot(x, x) def fp(x): return 2 * x p = np.array([1, 0]) # Smallest s satisfying strong Wolfe conditions for these arguments is 30 x = -60 * p c2 = 0.5 s, _, _, _, _, _ = ls.line_search_wolfe2(f, fp, x, p, amax=30, c2=c2) assert_line_wolfe(x, p, s, f, fp) s, _, _, _, _, _ = assert_warns(LineSearchWarning, ls.line_search_wolfe2, f, fp, x, p, amax=29, c2=c2) assert s is None # s=30 will only be tried on the 6th iteration, so this won't converge assert_warns(LineSearchWarning, ls.line_search_wolfe2, f, fp, x, p, c2=c2, maxiter=5) def test_line_search_armijo(self): c = 0 for name, f, fprime, x, p, old_f in self.line_iter(): f0 = f(x) g0 = fprime(x) self.fcount = 0 s, fc, fv = ls.line_search_armijo(f, x, p, g0, f0) c += 1 assert_equal(self.fcount, fc) assert_fp_equal(fv, f(x + s*p)) assert_line_armijo(x, p, s, f, err_msg=name) assert c >= 9 # -- More specific tests def test_armijo_terminate_1(self): # Armijo should evaluate the function only once if the trial step # is already suitable count = [0] def phi(s): count[0] += 1 return -s + 0.01*s**2 s, phi1 = ls.scalar_search_armijo(phi, phi(0), -1, alpha0=1) assert_equal(s, 1) assert_equal(count[0], 2) assert_armijo(s, phi) def test_wolfe_terminate(self): # wolfe1 and wolfe2 should also evaluate the function only a few # times if the trial step is already suitable def phi(s): count[0] += 1 return -s + 0.05*s**2 def derphi(s): count[0] += 1 return -1 + 0.05*2*s for func in [ls.scalar_search_wolfe1, ls.scalar_search_wolfe2]: count = [0] r = func(phi, derphi, phi(0), None, derphi(0)) assert r[0] is not None, (r, func) assert count[0] <= 2 + 2, (count, func) assert_wolfe(r[0], phi, derphi, err_msg=str(func))
10,896
33.593651
84
py
scipy
scipy-main/scipy/optimize/tests/__init__.py
0
0
0
py
scipy
scipy-main/scipy/optimize/tests/test_lbfgsb_hessinv.py
import numpy as np from numpy.testing import assert_allclose import scipy.linalg from scipy.optimize import minimize def test_1(): def f(x): return x**4, 4*x**3 for gtol in [1e-8, 1e-12, 1e-20]: for maxcor in range(20, 35): result = minimize(fun=f, jac=True, method='L-BFGS-B', x0=20, options={'gtol': gtol, 'maxcor': maxcor}) H1 = result.hess_inv(np.array([1])).reshape(1,1) H2 = result.hess_inv.todense() assert_allclose(H1, H2) def test_2(): H0 = [[3, 0], [1, 2]] def f(x): return np.dot(x, np.dot(scipy.linalg.inv(H0), x)) result1 = minimize(fun=f, method='L-BFGS-B', x0=[10, 20]) result2 = minimize(fun=f, method='BFGS', x0=[10, 20]) H1 = result1.hess_inv.todense() H2 = np.vstack(( result1.hess_inv(np.array([1, 0])), result1.hess_inv(np.array([0, 1])))) assert_allclose( result1.hess_inv(np.array([1, 0]).reshape(2,1)).reshape(-1), result1.hess_inv(np.array([1, 0]))) assert_allclose(H1, H2) assert_allclose(H1, result2.hess_inv, rtol=1e-2, atol=0.03)
1,137
24.863636
72
py
scipy
scipy-main/scipy/optimize/tests/test_least_squares.py
from itertools import product import numpy as np from numpy.linalg import norm from numpy.testing import (assert_, assert_allclose, assert_equal, suppress_warnings) from pytest import raises as assert_raises from scipy.sparse import issparse, lil_matrix from scipy.sparse.linalg import aslinearoperator from scipy.optimize import least_squares, Bounds from scipy.optimize._lsq.least_squares import IMPLEMENTED_LOSSES from scipy.optimize._lsq.common import EPS, make_strictly_feasible def fun_trivial(x, a=0): return (x - a)**2 + 5.0 def jac_trivial(x, a=0.0): return 2 * (x - a) def fun_2d_trivial(x): return np.array([x[0], x[1]]) def jac_2d_trivial(x): return np.identity(2) def fun_rosenbrock(x): return np.array([10 * (x[1] - x[0]**2), (1 - x[0])]) def jac_rosenbrock(x): return np.array([ [-20 * x[0], 10], [-1, 0] ]) def jac_rosenbrock_bad_dim(x): return np.array([ [-20 * x[0], 10], [-1, 0], [0.0, 0.0] ]) def fun_rosenbrock_cropped(x): return fun_rosenbrock(x)[0] def jac_rosenbrock_cropped(x): return jac_rosenbrock(x)[0] # When x is 1-D array, return is 2-D array. def fun_wrong_dimensions(x): return np.array([x, x**2, x**3]) def jac_wrong_dimensions(x, a=0.0): return np.atleast_3d(jac_trivial(x, a=a)) def fun_bvp(x): n = int(np.sqrt(x.shape[0])) u = np.zeros((n + 2, n + 2)) x = x.reshape((n, n)) u[1:-1, 1:-1] = x y = u[:-2, 1:-1] + u[2:, 1:-1] + u[1:-1, :-2] + u[1:-1, 2:] - 4 * x + x**3 return y.ravel() class BroydenTridiagonal: def __init__(self, n=100, mode='sparse'): np.random.seed(0) self.n = n self.x0 = -np.ones(n) self.lb = np.linspace(-2, -1.5, n) self.ub = np.linspace(-0.8, 0.0, n) self.lb += 0.1 * np.random.randn(n) self.ub += 0.1 * np.random.randn(n) self.x0 += 0.1 * np.random.randn(n) self.x0 = make_strictly_feasible(self.x0, self.lb, self.ub) if mode == 'sparse': self.sparsity = lil_matrix((n, n), dtype=int) i = np.arange(n) self.sparsity[i, i] = 1 i = np.arange(1, n) self.sparsity[i, i - 1] = 1 i = np.arange(n - 1) self.sparsity[i, i + 1] = 1 self.jac = self._jac elif mode == 'operator': self.jac = lambda x: aslinearoperator(self._jac(x)) elif mode == 'dense': self.sparsity = None self.jac = lambda x: self._jac(x).toarray() else: assert_(False) def fun(self, x): f = (3 - x) * x + 1 f[1:] -= x[:-1] f[:-1] -= 2 * x[1:] return f def _jac(self, x): J = lil_matrix((self.n, self.n)) i = np.arange(self.n) J[i, i] = 3 - 2 * x i = np.arange(1, self.n) J[i, i - 1] = -1 i = np.arange(self.n - 1) J[i, i + 1] = -2 return J class ExponentialFittingProblem: """Provide data and function for exponential fitting in the form y = a + exp(b * x) + noise.""" def __init__(self, a, b, noise, n_outliers=1, x_range=(-1, 1), n_points=11, random_seed=None): np.random.seed(random_seed) self.m = n_points self.n = 2 self.p0 = np.zeros(2) self.x = np.linspace(x_range[0], x_range[1], n_points) self.y = a + np.exp(b * self.x) self.y += noise * np.random.randn(self.m) outliers = np.random.randint(0, self.m, n_outliers) self.y[outliers] += 50 * noise * np.random.rand(n_outliers) self.p_opt = np.array([a, b]) def fun(self, p): return p[0] + np.exp(p[1] * self.x) - self.y def jac(self, p): J = np.empty((self.m, self.n)) J[:, 0] = 1 J[:, 1] = self.x * np.exp(p[1] * self.x) return J def cubic_soft_l1(z): rho = np.empty((3, z.size)) t = 1 + z rho[0] = 3 * (t**(1/3) - 1) rho[1] = t ** (-2/3) rho[2] = -2/3 * t**(-5/3) return rho LOSSES = list(IMPLEMENTED_LOSSES.keys()) + [cubic_soft_l1] class BaseMixin: def test_basic(self): # Test that the basic calling sequence works. res = least_squares(fun_trivial, 2., method=self.method) assert_allclose(res.x, 0, atol=1e-4) assert_allclose(res.fun, fun_trivial(res.x)) def test_args_kwargs(self): # Test that args and kwargs are passed correctly to the functions. a = 3.0 for jac in ['2-point', '3-point', 'cs', jac_trivial]: with suppress_warnings() as sup: sup.filter(UserWarning, "jac='(3-point|cs)' works equivalently to '2-point' for method='lm'") res = least_squares(fun_trivial, 2.0, jac, args=(a,), method=self.method) res1 = least_squares(fun_trivial, 2.0, jac, kwargs={'a': a}, method=self.method) assert_allclose(res.x, a, rtol=1e-4) assert_allclose(res1.x, a, rtol=1e-4) assert_raises(TypeError, least_squares, fun_trivial, 2.0, args=(3, 4,), method=self.method) assert_raises(TypeError, least_squares, fun_trivial, 2.0, kwargs={'kaboom': 3}, method=self.method) def test_jac_options(self): for jac in ['2-point', '3-point', 'cs', jac_trivial]: with suppress_warnings() as sup: sup.filter(UserWarning, "jac='(3-point|cs)' works equivalently to '2-point' for method='lm'") res = least_squares(fun_trivial, 2.0, jac, method=self.method) assert_allclose(res.x, 0, atol=1e-4) assert_raises(ValueError, least_squares, fun_trivial, 2.0, jac='oops', method=self.method) def test_nfev_options(self): for max_nfev in [None, 20]: res = least_squares(fun_trivial, 2.0, max_nfev=max_nfev, method=self.method) assert_allclose(res.x, 0, atol=1e-4) def test_x_scale_options(self): for x_scale in [1.0, np.array([0.5]), 'jac']: res = least_squares(fun_trivial, 2.0, x_scale=x_scale) assert_allclose(res.x, 0) assert_raises(ValueError, least_squares, fun_trivial, 2.0, x_scale='auto', method=self.method) assert_raises(ValueError, least_squares, fun_trivial, 2.0, x_scale=-1.0, method=self.method) assert_raises(ValueError, least_squares, fun_trivial, 2.0, x_scale=None, method=self.method) assert_raises(ValueError, least_squares, fun_trivial, 2.0, x_scale=1.0+2.0j, method=self.method) def test_diff_step(self): # res1 and res2 should be equivalent. # res2 and res3 should be different. res1 = least_squares(fun_trivial, 2.0, diff_step=1e-1, method=self.method) res2 = least_squares(fun_trivial, 2.0, diff_step=-1e-1, method=self.method) res3 = least_squares(fun_trivial, 2.0, diff_step=None, method=self.method) assert_allclose(res1.x, 0, atol=1e-4) assert_allclose(res2.x, 0, atol=1e-4) assert_allclose(res3.x, 0, atol=1e-4) assert_equal(res1.x, res2.x) assert_equal(res1.nfev, res2.nfev) def test_incorrect_options_usage(self): assert_raises(TypeError, least_squares, fun_trivial, 2.0, method=self.method, options={'no_such_option': 100}) assert_raises(TypeError, least_squares, fun_trivial, 2.0, method=self.method, options={'max_nfev': 100}) def test_full_result(self): # MINPACK doesn't work very well with factor=100 on this problem, # thus using low 'atol'. res = least_squares(fun_trivial, 2.0, method=self.method) assert_allclose(res.x, 0, atol=1e-4) assert_allclose(res.cost, 12.5) assert_allclose(res.fun, 5) assert_allclose(res.jac, 0, atol=1e-4) assert_allclose(res.grad, 0, atol=1e-2) assert_allclose(res.optimality, 0, atol=1e-2) assert_equal(res.active_mask, 0) if self.method == 'lm': assert_(res.nfev < 30) assert_(res.njev is None) else: assert_(res.nfev < 10) assert_(res.njev < 10) assert_(res.status > 0) assert_(res.success) def test_full_result_single_fev(self): # MINPACK checks the number of nfev after the iteration, # so it's hard to tell what he is going to compute. if self.method == 'lm': return res = least_squares(fun_trivial, 2.0, method=self.method, max_nfev=1) assert_equal(res.x, np.array([2])) assert_equal(res.cost, 40.5) assert_equal(res.fun, np.array([9])) assert_equal(res.jac, np.array([[4]])) assert_equal(res.grad, np.array([36])) assert_equal(res.optimality, 36) assert_equal(res.active_mask, np.array([0])) assert_equal(res.nfev, 1) assert_equal(res.njev, 1) assert_equal(res.status, 0) assert_equal(res.success, 0) def test_rosenbrock(self): x0 = [-2, 1] x_opt = [1, 1] for jac, x_scale, tr_solver in product( ['2-point', '3-point', 'cs', jac_rosenbrock], [1.0, np.array([1.0, 0.2]), 'jac'], ['exact', 'lsmr']): with suppress_warnings() as sup: sup.filter(UserWarning, "jac='(3-point|cs)' works equivalently to '2-point' for method='lm'") res = least_squares(fun_rosenbrock, x0, jac, x_scale=x_scale, tr_solver=tr_solver, method=self.method) assert_allclose(res.x, x_opt) def test_rosenbrock_cropped(self): x0 = [-2, 1] if self.method == 'lm': assert_raises(ValueError, least_squares, fun_rosenbrock_cropped, x0, method='lm') else: for jac, x_scale, tr_solver in product( ['2-point', '3-point', 'cs', jac_rosenbrock_cropped], [1.0, np.array([1.0, 0.2]), 'jac'], ['exact', 'lsmr']): res = least_squares( fun_rosenbrock_cropped, x0, jac, x_scale=x_scale, tr_solver=tr_solver, method=self.method) assert_allclose(res.cost, 0, atol=1e-14) def test_fun_wrong_dimensions(self): assert_raises(ValueError, least_squares, fun_wrong_dimensions, 2.0, method=self.method) def test_jac_wrong_dimensions(self): assert_raises(ValueError, least_squares, fun_trivial, 2.0, jac_wrong_dimensions, method=self.method) def test_fun_and_jac_inconsistent_dimensions(self): x0 = [1, 2] assert_raises(ValueError, least_squares, fun_rosenbrock, x0, jac_rosenbrock_bad_dim, method=self.method) def test_x0_multidimensional(self): x0 = np.ones(4).reshape(2, 2) assert_raises(ValueError, least_squares, fun_trivial, x0, method=self.method) def test_x0_complex_scalar(self): x0 = 2.0 + 0.0*1j assert_raises(ValueError, least_squares, fun_trivial, x0, method=self.method) def test_x0_complex_array(self): x0 = [1.0, 2.0 + 0.0*1j] assert_raises(ValueError, least_squares, fun_trivial, x0, method=self.method) def test_bvp(self): # This test was introduced with fix #5556. It turned out that # dogbox solver had a bug with trust-region radius update, which # could block its progress and create an infinite loop. And this # discrete boundary value problem is the one which triggers it. n = 10 x0 = np.ones(n**2) if self.method == 'lm': max_nfev = 5000 # To account for Jacobian estimation. else: max_nfev = 100 res = least_squares(fun_bvp, x0, ftol=1e-2, method=self.method, max_nfev=max_nfev) assert_(res.nfev < max_nfev) assert_(res.cost < 0.5) def test_error_raised_when_all_tolerances_below_eps(self): # Test that all 0 tolerances are not allowed. assert_raises(ValueError, least_squares, fun_trivial, 2.0, method=self.method, ftol=None, xtol=None, gtol=None) def test_convergence_with_only_one_tolerance_enabled(self): if self.method == 'lm': return # should not do test x0 = [-2, 1] x_opt = [1, 1] for ftol, xtol, gtol in [(1e-8, None, None), (None, 1e-8, None), (None, None, 1e-8)]: res = least_squares(fun_rosenbrock, x0, jac=jac_rosenbrock, ftol=ftol, gtol=gtol, xtol=xtol, method=self.method) assert_allclose(res.x, x_opt) class BoundsMixin: def test_inconsistent(self): assert_raises(ValueError, least_squares, fun_trivial, 2.0, bounds=(10.0, 0.0), method=self.method) def test_infeasible(self): assert_raises(ValueError, least_squares, fun_trivial, 2.0, bounds=(3., 4), method=self.method) def test_wrong_number(self): assert_raises(ValueError, least_squares, fun_trivial, 2., bounds=(1., 2, 3), method=self.method) def test_inconsistent_shape(self): assert_raises(ValueError, least_squares, fun_trivial, 2.0, bounds=(1.0, [2.0, 3.0]), method=self.method) # 1-D array wont't be broadcasted assert_raises(ValueError, least_squares, fun_rosenbrock, [1.0, 2.0], bounds=([0.0], [3.0, 4.0]), method=self.method) def test_in_bounds(self): for jac in ['2-point', '3-point', 'cs', jac_trivial]: res = least_squares(fun_trivial, 2.0, jac=jac, bounds=(-1.0, 3.0), method=self.method) assert_allclose(res.x, 0.0, atol=1e-4) assert_equal(res.active_mask, [0]) assert_(-1 <= res.x <= 3) res = least_squares(fun_trivial, 2.0, jac=jac, bounds=(0.5, 3.0), method=self.method) assert_allclose(res.x, 0.5, atol=1e-4) assert_equal(res.active_mask, [-1]) assert_(0.5 <= res.x <= 3) def test_bounds_shape(self): def get_bounds_direct(lb, ub): return lb, ub def get_bounds_instances(lb, ub): return Bounds(lb, ub) for jac in ['2-point', '3-point', 'cs', jac_2d_trivial]: for bounds_func in [get_bounds_direct, get_bounds_instances]: x0 = [1.0, 1.0] res = least_squares(fun_2d_trivial, x0, jac=jac) assert_allclose(res.x, [0.0, 0.0]) res = least_squares(fun_2d_trivial, x0, jac=jac, bounds=bounds_func(0.5, [2.0, 2.0]), method=self.method) assert_allclose(res.x, [0.5, 0.5]) res = least_squares(fun_2d_trivial, x0, jac=jac, bounds=bounds_func([0.3, 0.2], 3.0), method=self.method) assert_allclose(res.x, [0.3, 0.2]) res = least_squares( fun_2d_trivial, x0, jac=jac, bounds=bounds_func([-1, 0.5], [1.0, 3.0]), method=self.method) assert_allclose(res.x, [0.0, 0.5], atol=1e-5) def test_bounds_instances(self): res = least_squares(fun_trivial, 0.5, bounds=Bounds()) assert_allclose(res.x, 0.0, atol=1e-4) res = least_squares(fun_trivial, 3.0, bounds=Bounds(lb=1.0)) assert_allclose(res.x, 1.0, atol=1e-4) res = least_squares(fun_trivial, 0.5, bounds=Bounds(lb=-1.0, ub=1.0)) assert_allclose(res.x, 0.0, atol=1e-4) res = least_squares(fun_trivial, -3.0, bounds=Bounds(ub=-1.0)) assert_allclose(res.x, -1.0, atol=1e-4) res = least_squares(fun_2d_trivial, [0.5, 0.5], bounds=Bounds(lb=[-1.0, -1.0], ub=1.0)) assert_allclose(res.x, [0.0, 0.0], atol=1e-5) res = least_squares(fun_2d_trivial, [0.5, 0.5], bounds=Bounds(lb=[0.1, 0.1])) assert_allclose(res.x, [0.1, 0.1], atol=1e-5) def test_rosenbrock_bounds(self): x0_1 = np.array([-2.0, 1.0]) x0_2 = np.array([2.0, 2.0]) x0_3 = np.array([-2.0, 2.0]) x0_4 = np.array([0.0, 2.0]) x0_5 = np.array([-1.2, 1.0]) problems = [ (x0_1, ([-np.inf, -1.5], np.inf)), (x0_2, ([-np.inf, 1.5], np.inf)), (x0_3, ([-np.inf, 1.5], np.inf)), (x0_4, ([-np.inf, 1.5], [1.0, np.inf])), (x0_2, ([1.0, 1.5], [3.0, 3.0])), (x0_5, ([-50.0, 0.0], [0.5, 100])) ] for x0, bounds in problems: for jac, x_scale, tr_solver in product( ['2-point', '3-point', 'cs', jac_rosenbrock], [1.0, [1.0, 0.5], 'jac'], ['exact', 'lsmr']): res = least_squares(fun_rosenbrock, x0, jac, bounds, x_scale=x_scale, tr_solver=tr_solver, method=self.method) assert_allclose(res.optimality, 0.0, atol=1e-5) class SparseMixin: def test_exact_tr_solver(self): p = BroydenTridiagonal() assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac, tr_solver='exact', method=self.method) assert_raises(ValueError, least_squares, p.fun, p.x0, tr_solver='exact', jac_sparsity=p.sparsity, method=self.method) def test_equivalence(self): sparse = BroydenTridiagonal(mode='sparse') dense = BroydenTridiagonal(mode='dense') res_sparse = least_squares( sparse.fun, sparse.x0, jac=sparse.jac, method=self.method) res_dense = least_squares( dense.fun, dense.x0, jac=sparse.jac, method=self.method) assert_equal(res_sparse.nfev, res_dense.nfev) assert_allclose(res_sparse.x, res_dense.x, atol=1e-20) assert_allclose(res_sparse.cost, 0, atol=1e-20) assert_allclose(res_dense.cost, 0, atol=1e-20) def test_tr_options(self): p = BroydenTridiagonal() res = least_squares(p.fun, p.x0, p.jac, method=self.method, tr_options={'btol': 1e-10}) assert_allclose(res.cost, 0, atol=1e-20) def test_wrong_parameters(self): p = BroydenTridiagonal() assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac, tr_solver='best', method=self.method) assert_raises(TypeError, least_squares, p.fun, p.x0, p.jac, tr_solver='lsmr', tr_options={'tol': 1e-10}) def test_solver_selection(self): sparse = BroydenTridiagonal(mode='sparse') dense = BroydenTridiagonal(mode='dense') res_sparse = least_squares(sparse.fun, sparse.x0, jac=sparse.jac, method=self.method) res_dense = least_squares(dense.fun, dense.x0, jac=dense.jac, method=self.method) assert_allclose(res_sparse.cost, 0, atol=1e-20) assert_allclose(res_dense.cost, 0, atol=1e-20) assert_(issparse(res_sparse.jac)) assert_(isinstance(res_dense.jac, np.ndarray)) def test_numerical_jac(self): p = BroydenTridiagonal() for jac in ['2-point', '3-point', 'cs']: res_dense = least_squares(p.fun, p.x0, jac, method=self.method) res_sparse = least_squares( p.fun, p.x0, jac,method=self.method, jac_sparsity=p.sparsity) assert_equal(res_dense.nfev, res_sparse.nfev) assert_allclose(res_dense.x, res_sparse.x, atol=1e-20) assert_allclose(res_dense.cost, 0, atol=1e-20) assert_allclose(res_sparse.cost, 0, atol=1e-20) def test_with_bounds(self): p = BroydenTridiagonal() for jac, jac_sparsity in product( [p.jac, '2-point', '3-point', 'cs'], [None, p.sparsity]): res_1 = least_squares( p.fun, p.x0, jac, bounds=(p.lb, np.inf), method=self.method,jac_sparsity=jac_sparsity) res_2 = least_squares( p.fun, p.x0, jac, bounds=(-np.inf, p.ub), method=self.method, jac_sparsity=jac_sparsity) res_3 = least_squares( p.fun, p.x0, jac, bounds=(p.lb, p.ub), method=self.method, jac_sparsity=jac_sparsity) assert_allclose(res_1.optimality, 0, atol=1e-10) assert_allclose(res_2.optimality, 0, atol=1e-10) assert_allclose(res_3.optimality, 0, atol=1e-10) def test_wrong_jac_sparsity(self): p = BroydenTridiagonal() sparsity = p.sparsity[:-1] assert_raises(ValueError, least_squares, p.fun, p.x0, jac_sparsity=sparsity, method=self.method) def test_linear_operator(self): p = BroydenTridiagonal(mode='operator') res = least_squares(p.fun, p.x0, p.jac, method=self.method) assert_allclose(res.cost, 0.0, atol=1e-20) assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac, method=self.method, tr_solver='exact') def test_x_scale_jac_scale(self): p = BroydenTridiagonal() res = least_squares(p.fun, p.x0, p.jac, method=self.method, x_scale='jac') assert_allclose(res.cost, 0.0, atol=1e-20) p = BroydenTridiagonal(mode='operator') assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac, method=self.method, x_scale='jac') class LossFunctionMixin: def test_options(self): for loss in LOSSES: res = least_squares(fun_trivial, 2.0, loss=loss, method=self.method) assert_allclose(res.x, 0, atol=1e-15) assert_raises(ValueError, least_squares, fun_trivial, 2.0, loss='hinge', method=self.method) def test_fun(self): # Test that res.fun is actual residuals, and not modified by loss # function stuff. for loss in LOSSES: res = least_squares(fun_trivial, 2.0, loss=loss, method=self.method) assert_equal(res.fun, fun_trivial(res.x)) def test_grad(self): # Test that res.grad is true gradient of loss function at the # solution. Use max_nfev = 1, to avoid reaching minimum. x = np.array([2.0]) # res.x will be this. res = least_squares(fun_trivial, x, jac_trivial, loss='linear', max_nfev=1, method=self.method) assert_equal(res.grad, 2 * x * (x**2 + 5)) res = least_squares(fun_trivial, x, jac_trivial, loss='huber', max_nfev=1, method=self.method) assert_equal(res.grad, 2 * x) res = least_squares(fun_trivial, x, jac_trivial, loss='soft_l1', max_nfev=1, method=self.method) assert_allclose(res.grad, 2 * x * (x**2 + 5) / (1 + (x**2 + 5)**2)**0.5) res = least_squares(fun_trivial, x, jac_trivial, loss='cauchy', max_nfev=1, method=self.method) assert_allclose(res.grad, 2 * x * (x**2 + 5) / (1 + (x**2 + 5)**2)) res = least_squares(fun_trivial, x, jac_trivial, loss='arctan', max_nfev=1, method=self.method) assert_allclose(res.grad, 2 * x * (x**2 + 5) / (1 + (x**2 + 5)**4)) res = least_squares(fun_trivial, x, jac_trivial, loss=cubic_soft_l1, max_nfev=1, method=self.method) assert_allclose(res.grad, 2 * x * (x**2 + 5) / (1 + (x**2 + 5)**2)**(2/3)) def test_jac(self): # Test that res.jac.T.dot(res.jac) gives Gauss-Newton approximation # of Hessian. This approximation is computed by doubly differentiating # the cost function and dropping the part containing second derivative # of f. For a scalar function it is computed as # H = (rho' + 2 * rho'' * f**2) * f'**2, if the expression inside the # brackets is less than EPS it is replaced by EPS. Here, we check # against the root of H. x = 2.0 # res.x will be this. f = x**2 + 5 # res.fun will be this. res = least_squares(fun_trivial, x, jac_trivial, loss='linear', max_nfev=1, method=self.method) assert_equal(res.jac, 2 * x) # For `huber` loss the Jacobian correction is identically zero # in outlier region, in such cases it is modified to be equal EPS**0.5. res = least_squares(fun_trivial, x, jac_trivial, loss='huber', max_nfev=1, method=self.method) assert_equal(res.jac, 2 * x * EPS**0.5) # Now, let's apply `loss_scale` to turn the residual into an inlier. # The loss function becomes linear. res = least_squares(fun_trivial, x, jac_trivial, loss='huber', f_scale=10, max_nfev=1) assert_equal(res.jac, 2 * x) # 'soft_l1' always gives a positive scaling. res = least_squares(fun_trivial, x, jac_trivial, loss='soft_l1', max_nfev=1, method=self.method) assert_allclose(res.jac, 2 * x * (1 + f**2)**-0.75) # For 'cauchy' the correction term turns out to be negative, and it # replaced by EPS**0.5. res = least_squares(fun_trivial, x, jac_trivial, loss='cauchy', max_nfev=1, method=self.method) assert_allclose(res.jac, 2 * x * EPS**0.5) # Now use scaling to turn the residual to inlier. res = least_squares(fun_trivial, x, jac_trivial, loss='cauchy', f_scale=10, max_nfev=1, method=self.method) fs = f / 10 assert_allclose(res.jac, 2 * x * (1 - fs**2)**0.5 / (1 + fs**2)) # 'arctan' gives an outlier. res = least_squares(fun_trivial, x, jac_trivial, loss='arctan', max_nfev=1, method=self.method) assert_allclose(res.jac, 2 * x * EPS**0.5) # Turn to inlier. res = least_squares(fun_trivial, x, jac_trivial, loss='arctan', f_scale=20.0, max_nfev=1, method=self.method) fs = f / 20 assert_allclose(res.jac, 2 * x * (1 - 3 * fs**4)**0.5 / (1 + fs**4)) # cubic_soft_l1 will give an outlier. res = least_squares(fun_trivial, x, jac_trivial, loss=cubic_soft_l1, max_nfev=1) assert_allclose(res.jac, 2 * x * EPS**0.5) # Turn to inlier. res = least_squares(fun_trivial, x, jac_trivial, loss=cubic_soft_l1, f_scale=6, max_nfev=1) fs = f / 6 assert_allclose(res.jac, 2 * x * (1 - fs**2 / 3)**0.5 * (1 + fs**2)**(-5/6)) def test_robustness(self): for noise in [0.1, 1.0]: p = ExponentialFittingProblem(1, 0.1, noise, random_seed=0) for jac in ['2-point', '3-point', 'cs', p.jac]: res_lsq = least_squares(p.fun, p.p0, jac=jac, method=self.method) assert_allclose(res_lsq.optimality, 0, atol=1e-2) for loss in LOSSES: if loss == 'linear': continue res_robust = least_squares( p.fun, p.p0, jac=jac, loss=loss, f_scale=noise, method=self.method) assert_allclose(res_robust.optimality, 0, atol=1e-2) assert_(norm(res_robust.x - p.p_opt) < norm(res_lsq.x - p.p_opt)) class TestDogbox(BaseMixin, BoundsMixin, SparseMixin, LossFunctionMixin): method = 'dogbox' class TestTRF(BaseMixin, BoundsMixin, SparseMixin, LossFunctionMixin): method = 'trf' def test_lsmr_regularization(self): p = BroydenTridiagonal() for regularize in [True, False]: res = least_squares(p.fun, p.x0, p.jac, method='trf', tr_options={'regularize': regularize}) assert_allclose(res.cost, 0, atol=1e-20) class TestLM(BaseMixin): method = 'lm' def test_bounds_not_supported(self): assert_raises(ValueError, least_squares, fun_trivial, 2.0, bounds=(-3.0, 3.0), method='lm') def test_m_less_n_not_supported(self): x0 = [-2, 1] assert_raises(ValueError, least_squares, fun_rosenbrock_cropped, x0, method='lm') def test_sparse_not_supported(self): p = BroydenTridiagonal() assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac, method='lm') def test_jac_sparsity_not_supported(self): assert_raises(ValueError, least_squares, fun_trivial, 2.0, jac_sparsity=[1], method='lm') def test_LinearOperator_not_supported(self): p = BroydenTridiagonal(mode="operator") assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac, method='lm') def test_loss(self): res = least_squares(fun_trivial, 2.0, loss='linear', method='lm') assert_allclose(res.x, 0.0, atol=1e-4) assert_raises(ValueError, least_squares, fun_trivial, 2.0, method='lm', loss='huber') def test_basic(): # test that 'method' arg is really optional res = least_squares(fun_trivial, 2.0) assert_allclose(res.x, 0, atol=1e-10) def test_small_tolerances_for_lm(): for ftol, xtol, gtol in [(None, 1e-13, 1e-13), (1e-13, None, 1e-13), (1e-13, 1e-13, None)]: assert_raises(ValueError, least_squares, fun_trivial, 2.0, xtol=xtol, ftol=ftol, gtol=gtol, method='lm') def test_fp32_gh12991(): # checks that smaller FP sizes can be used in least_squares # this is the minimum working example reported for gh12991 np.random.seed(1) x = np.linspace(0, 1, 100).astype("float32") y = np.random.random(100).astype("float32") def func(p, x): return p[0] + p[1] * x def err(p, x, y): return func(p, x) - y res = least_squares(err, [-1.0, -1.0], args=(x, y)) # previously the initial jacobian calculated for this would be all 0 # and the minimize would terminate immediately, with nfev=1, would # report a successful minimization (it shouldn't have done), but be # unchanged from the initial solution. # It was terminating early because the underlying approx_derivative # used a step size for FP64 when the working space was FP32. assert res.nfev > 2 assert_allclose(res.x, np.array([0.4082241, 0.15530563]), atol=5e-5)
31,773
38.130542
96
py
scipy
scipy-main/scipy/optimize/tests/test_minimize_constrained.py
import numpy as np import pytest from scipy.linalg import block_diag from scipy.sparse import csc_matrix from numpy.testing import (TestCase, assert_array_almost_equal, assert_array_less, assert_, assert_allclose, suppress_warnings) from scipy.optimize import (NonlinearConstraint, LinearConstraint, Bounds, minimize, BFGS, SR1) class Maratos: """Problem 15.4 from Nocedal and Wright The following optimization problem: minimize 2*(x[0]**2 + x[1]**2 - 1) - x[0] Subject to: x[0]**2 + x[1]**2 - 1 = 0 """ def __init__(self, degrees=60, constr_jac=None, constr_hess=None): rads = degrees/180*np.pi self.x0 = [np.cos(rads), np.sin(rads)] self.x_opt = np.array([1.0, 0.0]) self.constr_jac = constr_jac self.constr_hess = constr_hess self.bounds = None def fun(self, x): return 2*(x[0]**2 + x[1]**2 - 1) - x[0] def grad(self, x): return np.array([4*x[0]-1, 4*x[1]]) def hess(self, x): return 4*np.eye(2) @property def constr(self): def fun(x): return x[0]**2 + x[1]**2 if self.constr_jac is None: def jac(x): return [[2*x[0], 2*x[1]]] else: jac = self.constr_jac if self.constr_hess is None: def hess(x, v): return 2*v[0]*np.eye(2) else: hess = self.constr_hess return NonlinearConstraint(fun, 1, 1, jac, hess) class MaratosTestArgs: """Problem 15.4 from Nocedal and Wright The following optimization problem: minimize 2*(x[0]**2 + x[1]**2 - 1) - x[0] Subject to: x[0]**2 + x[1]**2 - 1 = 0 """ def __init__(self, a, b, degrees=60, constr_jac=None, constr_hess=None): rads = degrees/180*np.pi self.x0 = [np.cos(rads), np.sin(rads)] self.x_opt = np.array([1.0, 0.0]) self.constr_jac = constr_jac self.constr_hess = constr_hess self.a = a self.b = b self.bounds = None def _test_args(self, a, b): if self.a != a or self.b != b: raise ValueError() def fun(self, x, a, b): self._test_args(a, b) return 2*(x[0]**2 + x[1]**2 - 1) - x[0] def grad(self, x, a, b): self._test_args(a, b) return np.array([4*x[0]-1, 4*x[1]]) def hess(self, x, a, b): self._test_args(a, b) return 4*np.eye(2) @property def constr(self): def fun(x): return x[0]**2 + x[1]**2 if self.constr_jac is None: def jac(x): return [[4*x[0], 4*x[1]]] else: jac = self.constr_jac if self.constr_hess is None: def hess(x, v): return 2*v[0]*np.eye(2) else: hess = self.constr_hess return NonlinearConstraint(fun, 1, 1, jac, hess) class MaratosGradInFunc: """Problem 15.4 from Nocedal and Wright The following optimization problem: minimize 2*(x[0]**2 + x[1]**2 - 1) - x[0] Subject to: x[0]**2 + x[1]**2 - 1 = 0 """ def __init__(self, degrees=60, constr_jac=None, constr_hess=None): rads = degrees/180*np.pi self.x0 = [np.cos(rads), np.sin(rads)] self.x_opt = np.array([1.0, 0.0]) self.constr_jac = constr_jac self.constr_hess = constr_hess self.bounds = None def fun(self, x): return (2*(x[0]**2 + x[1]**2 - 1) - x[0], np.array([4*x[0]-1, 4*x[1]])) @property def grad(self): return True def hess(self, x): return 4*np.eye(2) @property def constr(self): def fun(x): return x[0]**2 + x[1]**2 if self.constr_jac is None: def jac(x): return [[4*x[0], 4*x[1]]] else: jac = self.constr_jac if self.constr_hess is None: def hess(x, v): return 2*v[0]*np.eye(2) else: hess = self.constr_hess return NonlinearConstraint(fun, 1, 1, jac, hess) class HyperbolicIneq: """Problem 15.1 from Nocedal and Wright The following optimization problem: minimize 1/2*(x[0] - 2)**2 + 1/2*(x[1] - 1/2)**2 Subject to: 1/(x[0] + 1) - x[1] >= 1/4 x[0] >= 0 x[1] >= 0 """ def __init__(self, constr_jac=None, constr_hess=None): self.x0 = [0, 0] self.x_opt = [1.952823, 0.088659] self.constr_jac = constr_jac self.constr_hess = constr_hess self.bounds = Bounds(0, np.inf) def fun(self, x): return 1/2*(x[0] - 2)**2 + 1/2*(x[1] - 1/2)**2 def grad(self, x): return [x[0] - 2, x[1] - 1/2] def hess(self, x): return np.eye(2) @property def constr(self): def fun(x): return 1/(x[0] + 1) - x[1] if self.constr_jac is None: def jac(x): return [[-1/(x[0] + 1)**2, -1]] else: jac = self.constr_jac if self.constr_hess is None: def hess(x, v): return 2*v[0]*np.array([[1/(x[0] + 1)**3, 0], [0, 0]]) else: hess = self.constr_hess return NonlinearConstraint(fun, 0.25, np.inf, jac, hess) class Rosenbrock: """Rosenbrock function. The following optimization problem: minimize sum(100.0*(x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0) """ def __init__(self, n=2, random_state=0): rng = np.random.RandomState(random_state) self.x0 = rng.uniform(-1, 1, n) self.x_opt = np.ones(n) self.bounds = None def fun(self, x): x = np.asarray(x) r = np.sum(100.0 * (x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0, axis=0) return r def grad(self, x): x = np.asarray(x) xm = x[1:-1] xm_m1 = x[:-2] xm_p1 = x[2:] der = np.zeros_like(x) der[1:-1] = (200 * (xm - xm_m1**2) - 400 * (xm_p1 - xm**2) * xm - 2 * (1 - xm)) der[0] = -400 * x[0] * (x[1] - x[0]**2) - 2 * (1 - x[0]) der[-1] = 200 * (x[-1] - x[-2]**2) return der def hess(self, x): x = np.atleast_1d(x) H = np.diag(-400 * x[:-1], 1) - np.diag(400 * x[:-1], -1) diagonal = np.zeros(len(x), dtype=x.dtype) diagonal[0] = 1200 * x[0]**2 - 400 * x[1] + 2 diagonal[-1] = 200 diagonal[1:-1] = 202 + 1200 * x[1:-1]**2 - 400 * x[2:] H = H + np.diag(diagonal) return H @property def constr(self): return () class IneqRosenbrock(Rosenbrock): """Rosenbrock subject to inequality constraints. The following optimization problem: minimize sum(100.0*(x[1] - x[0]**2)**2.0 + (1 - x[0])**2) subject to: x[0] + 2 x[1] <= 1 Taken from matlab ``fmincon`` documentation. """ def __init__(self, random_state=0): Rosenbrock.__init__(self, 2, random_state) self.x0 = [-1, -0.5] self.x_opt = [0.5022, 0.2489] self.bounds = None @property def constr(self): A = [[1, 2]] b = 1 return LinearConstraint(A, -np.inf, b) class BoundedRosenbrock(Rosenbrock): """Rosenbrock subject to inequality constraints. The following optimization problem: minimize sum(100.0*(x[1] - x[0]**2)**2.0 + (1 - x[0])**2) subject to: -2 <= x[0] <= 0 0 <= x[1] <= 2 Taken from matlab ``fmincon`` documentation. """ def __init__(self, random_state=0): Rosenbrock.__init__(self, 2, random_state) self.x0 = [-0.2, 0.2] self.x_opt = None self.bounds = Bounds([-2, 0], [0, 2]) class EqIneqRosenbrock(Rosenbrock): """Rosenbrock subject to equality and inequality constraints. The following optimization problem: minimize sum(100.0*(x[1] - x[0]**2)**2.0 + (1 - x[0])**2) subject to: x[0] + 2 x[1] <= 1 2 x[0] + x[1] = 1 Taken from matlab ``fimincon`` documentation. """ def __init__(self, random_state=0): Rosenbrock.__init__(self, 2, random_state) self.x0 = [-1, -0.5] self.x_opt = [0.41494, 0.17011] self.bounds = None @property def constr(self): A_ineq = [[1, 2]] b_ineq = 1 A_eq = [[2, 1]] b_eq = 1 return (LinearConstraint(A_ineq, -np.inf, b_ineq), LinearConstraint(A_eq, b_eq, b_eq)) class Elec: """Distribution of electrons on a sphere. Problem no 2 from COPS collection [2]_. Find the equilibrium state distribution (of minimal potential) of the electrons positioned on a conducting sphere. References ---------- .. [1] E. D. Dolan, J. J. Mor\'{e}, and T. S. Munson, "Benchmarking optimization software with COPS 3.0.", Argonne National Lab., Argonne, IL (US), 2004. """ def __init__(self, n_electrons=200, random_state=0, constr_jac=None, constr_hess=None): self.n_electrons = n_electrons self.rng = np.random.RandomState(random_state) # Initial Guess phi = self.rng.uniform(0, 2 * np.pi, self.n_electrons) theta = self.rng.uniform(-np.pi, np.pi, self.n_electrons) x = np.cos(theta) * np.cos(phi) y = np.cos(theta) * np.sin(phi) z = np.sin(theta) self.x0 = np.hstack((x, y, z)) self.x_opt = None self.constr_jac = constr_jac self.constr_hess = constr_hess self.bounds = None def _get_cordinates(self, x): x_coord = x[:self.n_electrons] y_coord = x[self.n_electrons:2 * self.n_electrons] z_coord = x[2 * self.n_electrons:] return x_coord, y_coord, z_coord def _compute_coordinate_deltas(self, x): x_coord, y_coord, z_coord = self._get_cordinates(x) dx = x_coord[:, None] - x_coord dy = y_coord[:, None] - y_coord dz = z_coord[:, None] - z_coord return dx, dy, dz def fun(self, x): dx, dy, dz = self._compute_coordinate_deltas(x) with np.errstate(divide='ignore'): dm1 = (dx**2 + dy**2 + dz**2) ** -0.5 dm1[np.diag_indices_from(dm1)] = 0 return 0.5 * np.sum(dm1) def grad(self, x): dx, dy, dz = self._compute_coordinate_deltas(x) with np.errstate(divide='ignore'): dm3 = (dx**2 + dy**2 + dz**2) ** -1.5 dm3[np.diag_indices_from(dm3)] = 0 grad_x = -np.sum(dx * dm3, axis=1) grad_y = -np.sum(dy * dm3, axis=1) grad_z = -np.sum(dz * dm3, axis=1) return np.hstack((grad_x, grad_y, grad_z)) def hess(self, x): dx, dy, dz = self._compute_coordinate_deltas(x) d = (dx**2 + dy**2 + dz**2) ** 0.5 with np.errstate(divide='ignore'): dm3 = d ** -3 dm5 = d ** -5 i = np.arange(self.n_electrons) dm3[i, i] = 0 dm5[i, i] = 0 Hxx = dm3 - 3 * dx**2 * dm5 Hxx[i, i] = -np.sum(Hxx, axis=1) Hxy = -3 * dx * dy * dm5 Hxy[i, i] = -np.sum(Hxy, axis=1) Hxz = -3 * dx * dz * dm5 Hxz[i, i] = -np.sum(Hxz, axis=1) Hyy = dm3 - 3 * dy**2 * dm5 Hyy[i, i] = -np.sum(Hyy, axis=1) Hyz = -3 * dy * dz * dm5 Hyz[i, i] = -np.sum(Hyz, axis=1) Hzz = dm3 - 3 * dz**2 * dm5 Hzz[i, i] = -np.sum(Hzz, axis=1) H = np.vstack(( np.hstack((Hxx, Hxy, Hxz)), np.hstack((Hxy, Hyy, Hyz)), np.hstack((Hxz, Hyz, Hzz)) )) return H @property def constr(self): def fun(x): x_coord, y_coord, z_coord = self._get_cordinates(x) return x_coord**2 + y_coord**2 + z_coord**2 - 1 if self.constr_jac is None: def jac(x): x_coord, y_coord, z_coord = self._get_cordinates(x) Jx = 2 * np.diag(x_coord) Jy = 2 * np.diag(y_coord) Jz = 2 * np.diag(z_coord) return csc_matrix(np.hstack((Jx, Jy, Jz))) else: jac = self.constr_jac if self.constr_hess is None: def hess(x, v): D = 2 * np.diag(v) return block_diag(D, D, D) else: hess = self.constr_hess return NonlinearConstraint(fun, -np.inf, 0, jac, hess) class TestTrustRegionConstr(TestCase): @pytest.mark.slow def test_list_of_problems(self): list_of_problems = [Maratos(), Maratos(constr_hess='2-point'), Maratos(constr_hess=SR1()), Maratos(constr_jac='2-point', constr_hess=SR1()), MaratosGradInFunc(), HyperbolicIneq(), HyperbolicIneq(constr_hess='3-point'), HyperbolicIneq(constr_hess=BFGS()), HyperbolicIneq(constr_jac='3-point', constr_hess=BFGS()), Rosenbrock(), IneqRosenbrock(), EqIneqRosenbrock(), BoundedRosenbrock(), Elec(n_electrons=2), Elec(n_electrons=2, constr_hess='2-point'), Elec(n_electrons=2, constr_hess=SR1()), Elec(n_electrons=2, constr_jac='3-point', constr_hess=SR1())] for prob in list_of_problems: for grad in (prob.grad, '3-point', False): for hess in (prob.hess, '3-point', SR1(), BFGS(exception_strategy='damp_update'), BFGS(exception_strategy='skip_update')): # Remove exceptions if grad in ('2-point', '3-point', 'cs', False) and \ hess in ('2-point', '3-point', 'cs'): continue if prob.grad is True and grad in ('3-point', False): continue with suppress_warnings() as sup: sup.filter(UserWarning, "delta_grad == 0.0") result = minimize(prob.fun, prob.x0, method='trust-constr', jac=grad, hess=hess, bounds=prob.bounds, constraints=prob.constr) if prob.x_opt is not None: assert_array_almost_equal(result.x, prob.x_opt, decimal=5) # gtol if result.status == 1: assert_array_less(result.optimality, 1e-8) # xtol if result.status == 2: assert_array_less(result.tr_radius, 1e-8) if result.method == "tr_interior_point": assert_array_less(result.barrier_parameter, 1e-8) # max iter if result.status in (0, 3): raise RuntimeError("Invalid termination condition.") def test_default_jac_and_hess(self): def fun(x): return (x - 1) ** 2 bounds = [(-2, 2)] res = minimize(fun, x0=[-1.5], bounds=bounds, method='trust-constr') assert_array_almost_equal(res.x, 1, decimal=5) def test_default_hess(self): def fun(x): return (x - 1) ** 2 bounds = [(-2, 2)] res = minimize(fun, x0=[-1.5], bounds=bounds, method='trust-constr', jac='2-point') assert_array_almost_equal(res.x, 1, decimal=5) def test_no_constraints(self): prob = Rosenbrock() result = minimize(prob.fun, prob.x0, method='trust-constr', jac=prob.grad, hess=prob.hess) result1 = minimize(prob.fun, prob.x0, method='L-BFGS-B', jac='2-point') result2 = minimize(prob.fun, prob.x0, method='L-BFGS-B', jac='3-point') assert_array_almost_equal(result.x, prob.x_opt, decimal=5) assert_array_almost_equal(result1.x, prob.x_opt, decimal=5) assert_array_almost_equal(result2.x, prob.x_opt, decimal=5) def test_hessp(self): prob = Maratos() def hessp(x, p): H = prob.hess(x) return H.dot(p) result = minimize(prob.fun, prob.x0, method='trust-constr', jac=prob.grad, hessp=hessp, bounds=prob.bounds, constraints=prob.constr) if prob.x_opt is not None: assert_array_almost_equal(result.x, prob.x_opt, decimal=2) # gtol if result.status == 1: assert_array_less(result.optimality, 1e-8) # xtol if result.status == 2: assert_array_less(result.tr_radius, 1e-8) if result.method == "tr_interior_point": assert_array_less(result.barrier_parameter, 1e-8) # max iter if result.status in (0, 3): raise RuntimeError("Invalid termination condition.") def test_args(self): prob = MaratosTestArgs("a", 234) result = minimize(prob.fun, prob.x0, ("a", 234), method='trust-constr', jac=prob.grad, hess=prob.hess, bounds=prob.bounds, constraints=prob.constr) if prob.x_opt is not None: assert_array_almost_equal(result.x, prob.x_opt, decimal=2) # gtol if result.status == 1: assert_array_less(result.optimality, 1e-8) # xtol if result.status == 2: assert_array_less(result.tr_radius, 1e-8) if result.method == "tr_interior_point": assert_array_less(result.barrier_parameter, 1e-8) # max iter if result.status in (0, 3): raise RuntimeError("Invalid termination condition.") def test_raise_exception(self): prob = Maratos() message = "Whenever the gradient is estimated via finite-differences" with pytest.raises(ValueError, match=message): minimize(prob.fun, prob.x0, method='trust-constr', jac='2-point', hess='2-point', constraints=prob.constr) def test_issue_9044(self): # https://github.com/scipy/scipy/issues/9044 # Test the returned `OptimizeResult` contains keys consistent with # other solvers. def callback(x, info): assert_('nit' in info) assert_('niter' in info) result = minimize(lambda x: x**2, [0], jac=lambda x: 2*x, hess=lambda x: 2, callback=callback, method='trust-constr') assert_(result.get('success')) assert_(result.get('nit', -1) == 1) # Also check existence of the 'niter' attribute, for backward # compatibility assert_(result.get('niter', -1) == 1) class TestEmptyConstraint(TestCase): """ Here we minimize x^2+y^2 subject to x^2-y^2>1. The actual minimum is at (0, 0) which fails the constraint. Therefore we will find a minimum on the boundary at (+/-1, 0). When minimizing on the boundary, optimize uses a set of constraints that removes the constraint that sets that boundary. In our case, there's only one constraint, so the result is an empty constraint. This tests that the empty constraint works. """ def test_empty_constraint(self): def function(x): return x[0]**2 + x[1]**2 def functionjacobian(x): return np.array([2.*x[0], 2.*x[1]]) def functionhvp(x, v): return 2.*v def constraint(x): return np.array([x[0]**2 - x[1]**2]) def constraintjacobian(x): return np.array([[2*x[0], -2*x[1]]]) def constraintlcoh(x, v): return np.array([[2., 0.], [0., -2.]]) * v[0] constraint = NonlinearConstraint(constraint, 1., np.inf, constraintjacobian, constraintlcoh) startpoint = [1., 2.] bounds = Bounds([-np.inf, -np.inf], [np.inf, np.inf]) result = minimize( function, startpoint, method='trust-constr', jac=functionjacobian, hessp=functionhvp, constraints=[constraint], bounds=bounds, ) assert_array_almost_equal(abs(result.x), np.array([1, 0]), decimal=4) def test_bug_11886(): def opt(x): return x[0]**2+x[1]**2 with np.testing.suppress_warnings() as sup: sup.filter(PendingDeprecationWarning) A = np.matrix(np.diag([1, 1])) lin_cons = LinearConstraint(A, -1, np.inf) minimize(opt, 2*[1], constraints = lin_cons) # just checking that there are no errors # Remove xfail when gh-11649 is resolved @pytest.mark.xfail(reason="Known bug in trust-constr; see gh-11649.", strict=True) def test_gh11649(): bnds = Bounds(lb=[-1, -1], ub=[1, 1], keep_feasible=True) def assert_inbounds(x): assert np.all(x >= bnds.lb) assert np.all(x <= bnds.ub) def obj(x): assert_inbounds(x) return np.exp(x[0])*(4*x[0]**2 + 2*x[1]**2 + 4*x[0]*x[1] + 2*x[1] + 1) def nce(x): assert_inbounds(x) return x[0]**2 + x[1] def nci(x): assert_inbounds(x) return x[0]*x[1] x0 = np.array((0.99, -0.99)) nlcs = [NonlinearConstraint(nci, -10, np.inf), NonlinearConstraint(nce, 1, 1)] res = minimize(fun=obj, x0=x0, method='trust-constr', bounds=bnds, constraints=nlcs) assert res.success assert_inbounds(res.x) assert nlcs[0].lb < nlcs[0].fun(res.x) < nlcs[0].ub assert_allclose(nce(res.x), nlcs[1].ub) ref = minimize(fun=obj, x0=x0, method='slsqp', bounds=bnds, constraints=nlcs) assert_allclose(res.fun, ref.fun) class TestBoundedNelderMead: @pytest.mark.parametrize('bounds, x_opt', [(Bounds(-np.inf, np.inf), Rosenbrock().x_opt), (Bounds(-np.inf, -0.8), [-0.8, -0.8]), (Bounds(3.0, np.inf), [3.0, 9.0]), (Bounds([3.0, 1.0], [4.0, 5.0]), [3., 5.]), ]) def test_rosen_brock_with_bounds(self, bounds, x_opt): prob = Rosenbrock() with suppress_warnings() as sup: sup.filter(UserWarning, "Initial guess is not within " "the specified bounds") result = minimize(prob.fun, [-10, -10], method='Nelder-Mead', bounds=bounds) assert np.less_equal(bounds.lb, result.x).all() assert np.less_equal(result.x, bounds.ub).all() assert np.allclose(prob.fun(result.x), result.fun) assert np.allclose(result.x, x_opt, atol=1.e-3) def test_equal_all_bounds(self): prob = Rosenbrock() bounds = Bounds([4.0, 5.0], [4.0, 5.0]) with suppress_warnings() as sup: sup.filter(UserWarning, "Initial guess is not within " "the specified bounds") result = minimize(prob.fun, [-10, 8], method='Nelder-Mead', bounds=bounds) assert np.allclose(result.x, [4.0, 5.0]) def test_equal_one_bounds(self): prob = Rosenbrock() bounds = Bounds([4.0, 5.0], [4.0, 20.0]) with suppress_warnings() as sup: sup.filter(UserWarning, "Initial guess is not within " "the specified bounds") result = minimize(prob.fun, [-10, 8], method='Nelder-Mead', bounds=bounds) assert np.allclose(result.x, [4.0, 16.0]) def test_invalid_bounds(self): prob = Rosenbrock() message = 'An upper bound is less than the corresponding lower bound.' with pytest.raises(ValueError, match=message): bounds = Bounds([-np.inf, 1.0], [4.0, -5.0]) minimize(prob.fun, [-10, 3], method='Nelder-Mead', bounds=bounds) @pytest.mark.xfail(reason="Failing on Azure Linux and macOS builds, " "see gh-13846") def test_outside_bounds_warning(self): prob = Rosenbrock() message = "Initial guess is not within the specified bounds" with pytest.warns(UserWarning, match=message): bounds = Bounds([-np.inf, 1.0], [4.0, 5.0]) minimize(prob.fun, [-10, 8], method='Nelder-Mead', bounds=bounds)
25,696
31.902689
100
py
scipy
scipy-main/scipy/optimize/tests/test_lbfgsb_setulb.py
import numpy as np from scipy.optimize import _lbfgsb def objfun(x): """simplified objective func to test lbfgsb bound violation""" x0 = [0.8750000000000278, 0.7500000000000153, 0.9499999999999722, 0.8214285714285992, 0.6363636363636085] x1 = [1.0, 0.0, 1.0, 0.0, 0.0] x2 = [1.0, 0.0, 0.9889733043149325, 0.0, 0.026353554421041155] x3 = [1.0, 0.0, 0.9889917442915558, 0.0, 0.020341986743231205] f0 = 5163.647901211178 f1 = 5149.8181642072905 f2 = 5149.379332309634 f3 = 5149.374490771297 g0 = np.array([-0.5934820547965749, 1.6251549718258351, -71.99168459202559, 5.346636965797545, 37.10732723092604]) g1 = np.array([-0.43295349282641515, 1.008607936794592, 18.223666726602975, 31.927010036981997, -19.667512518739386]) g2 = np.array([-0.4699874455100256, 0.9466285353668347, -0.016874360242016825, 48.44999161133457, 5.819631620590712]) g3 = np.array([-0.46970678696829116, 0.9612719312174818, 0.006129809488833699, 48.43557729419473, 6.005481418498221]) if np.allclose(x, x0): f = f0 g = g0 elif np.allclose(x, x1): f = f1 g = g1 elif np.allclose(x, x2): f = f2 g = g2 elif np.allclose(x, x3): f = f3 g = g3 else: raise ValueError( 'Simplified objective function not defined ' 'at requested point') return (np.copy(f), np.copy(g)) def test_setulb_floatround(): """test if setulb() violates bounds checks for violation due to floating point rounding error """ n = 5 m = 10 factr = 1e7 pgtol = 1e-5 maxls = 20 iprint = -1 nbd = np.full((n,), 2) low_bnd = np.zeros(n, np.float64) upper_bnd = np.ones(n, np.float64) x0 = np.array( [0.8750000000000278, 0.7500000000000153, 0.9499999999999722, 0.8214285714285992, 0.6363636363636085]) x = np.copy(x0) f = np.array(0.0, np.float64) g = np.zeros(n, np.float64) fortran_int = _lbfgsb.types.intvar.dtype wa = np.zeros(2*m*n + 5*n + 11*m*m + 8*m, np.float64) iwa = np.zeros(3*n, fortran_int) task = np.zeros(1, 'S60') csave = np.zeros(1, 'S60') lsave = np.zeros(4, fortran_int) isave = np.zeros(44, fortran_int) dsave = np.zeros(29, np.float64) task[:] = b'START' for n_iter in range(7): # 7 steps required to reproduce error f, g = objfun(x) _lbfgsb.setulb(m, x, low_bnd, upper_bnd, nbd, f, g, factr, pgtol, wa, iwa, task, iprint, csave, lsave, isave, dsave, maxls) assert (x <= upper_bnd).all() and (x >= low_bnd).all(), ( "_lbfgsb.setulb() stepped to a point outside of the bounds")
3,172
26.119658
72
py
scipy
scipy-main/scipy/optimize/tests/test_zeros.py
import pytest from functools import lru_cache from numpy.testing import (assert_warns, assert_, assert_allclose, assert_equal, assert_array_equal, assert_array_less, suppress_warnings) import numpy as np from numpy import finfo, power, nan, isclose, sqrt, exp, sin, cos from scipy import stats, optimize from scipy.optimize import (_zeros_py as zeros, newton, root_scalar, OptimizeResult) from scipy._lib._util import getfullargspec_no_self as _getfullargspec # Import testing parameters from scipy.optimize._tstutils import get_tests, functions as tstutils_functions TOL = 4*np.finfo(float).eps # tolerance _FLOAT_EPS = finfo(float).eps bracket_methods = [zeros.bisect, zeros.ridder, zeros.brentq, zeros.brenth, zeros.toms748] gradient_methods = [zeros.newton] all_methods = bracket_methods + gradient_methods # noqa # A few test functions used frequently: # # A simple quadratic, (x-1)^2 - 1 def f1(x): return x ** 2 - 2 * x - 1 def f1_1(x): return 2 * x - 2 def f1_2(x): return 2.0 + 0 * x def f1_and_p_and_pp(x): return f1(x), f1_1(x), f1_2(x) # Simple transcendental function def f2(x): return exp(x) - cos(x) def f2_1(x): return exp(x) + sin(x) def f2_2(x): return exp(x) + cos(x) # lru cached function @lru_cache def f_lrucached(x): return x class TestScalarRootFinders: # Basic tests for all scalar root finders xtol = 4 * np.finfo(float).eps rtol = 4 * np.finfo(float).eps def _run_one_test(self, tc, method, sig_args_keys=None, sig_kwargs_keys=None, **kwargs): method_args = [] for k in sig_args_keys or []: if k not in tc: # If a,b not present use x0, x1. Similarly for f and func k = {'a': 'x0', 'b': 'x1', 'func': 'f'}.get(k, k) method_args.append(tc[k]) method_kwargs = dict(**kwargs) method_kwargs.update({'full_output': True, 'disp': False}) for k in sig_kwargs_keys or []: method_kwargs[k] = tc[k] root = tc.get('root') func_args = tc.get('args', ()) try: r, rr = method(*method_args, args=func_args, **method_kwargs) return root, rr, tc except Exception: return root, zeros.RootResults(nan, -1, -1, zeros._EVALUEERR), tc def run_tests(self, tests, method, name, known_fail=None, **kwargs): r"""Run test-cases using the specified method and the supplied signature. Extract the arguments for the method call from the test case dictionary using the supplied keys for the method's signature.""" # The methods have one of two base signatures: # (f, a, b, **kwargs) # newton # (func, x0, **kwargs) # bisect/brentq/... sig = _getfullargspec(method) # FullArgSpec with args, varargs, varkw, defaults, ... assert_(not sig.kwonlyargs) nDefaults = len(sig.defaults) nRequired = len(sig.args) - nDefaults sig_args_keys = sig.args[:nRequired] sig_kwargs_keys = [] if name in ['secant', 'newton', 'halley']: if name in ['newton', 'halley']: sig_kwargs_keys.append('fprime') if name in ['halley']: sig_kwargs_keys.append('fprime2') kwargs['tol'] = self.xtol else: kwargs['xtol'] = self.xtol kwargs['rtol'] = self.rtol results = [list(self._run_one_test( tc, method, sig_args_keys=sig_args_keys, sig_kwargs_keys=sig_kwargs_keys, **kwargs)) for tc in tests] # results= [[true root, full output, tc], ...] known_fail = known_fail or [] notcvgd = [elt for elt in results if not elt[1].converged] notcvgd = [elt for elt in notcvgd if elt[-1]['ID'] not in known_fail] notcvged_IDS = [elt[-1]['ID'] for elt in notcvgd] assert_equal([len(notcvged_IDS), notcvged_IDS], [0, []]) # The usable xtol and rtol depend on the test tols = {'xtol': self.xtol, 'rtol': self.rtol} tols.update(**kwargs) rtol = tols['rtol'] atol = tols.get('tol', tols['xtol']) cvgd = [elt for elt in results if elt[1].converged] approx = [elt[1].root for elt in cvgd] correct = [elt[0] for elt in cvgd] # See if the root matches the reference value notclose = [[a] + elt for a, c, elt in zip(approx, correct, cvgd) if not isclose(a, c, rtol=rtol, atol=atol) and elt[-1]['ID'] not in known_fail] # If not, evaluate the function and see if is 0 at the purported root fvs = [tc['f'](aroot, *tc.get('args', tuple())) for aroot, c, fullout, tc in notclose] notclose = [[fv] + elt for fv, elt in zip(fvs, notclose) if fv != 0] assert_equal([notclose, len(notclose)], [[], 0]) def run_collection(self, collection, method, name, smoothness=None, known_fail=None, **kwargs): r"""Run a collection of tests using the specified method. The name is used to determine some optional arguments.""" tests = get_tests(collection, smoothness=smoothness) self.run_tests(tests, method, name, known_fail=known_fail, **kwargs) class TestBracketMethods(TestScalarRootFinders): @pytest.mark.parametrize('method', bracket_methods) @pytest.mark.parametrize('function', tstutils_functions) def test_basic_root_scalar(self, method, function): # Tests bracketing root finders called via `root_scalar` on a small # set of simple problems, each of which has a root at `x=1`. Checks for # converged status and that the root was found. a, b = .5, sqrt(3) r = root_scalar(function, method=method.__name__, bracket=[a, b], x0=a, xtol=self.xtol, rtol=self.rtol) assert r.converged assert_allclose(r.root, 1.0, atol=self.xtol, rtol=self.rtol) @pytest.mark.parametrize('method', bracket_methods) @pytest.mark.parametrize('function', tstutils_functions) def test_basic_individual(self, method, function): # Tests individual bracketing root finders on a small set of simple # problems, each of which has a root at `x=1`. Checks for converged # status and that the root was found. a, b = .5, sqrt(3) root, r = method(function, a, b, xtol=self.xtol, rtol=self.rtol, full_output=True) assert r.converged assert_allclose(root, 1.0, atol=self.xtol, rtol=self.rtol) @pytest.mark.parametrize('method', bracket_methods) def test_aps_collection(self, method): self.run_collection('aps', method, method.__name__, smoothness=1) @pytest.mark.parametrize('method', [zeros.bisect, zeros.ridder, zeros.toms748]) def test_chandrupatla_collection(self, method): known_fail = {'fun7.4'} if method == zeros.ridder else {} self.run_collection('chandrupatla', method, method.__name__, known_fail=known_fail) @pytest.mark.parametrize('method', bracket_methods) def test_lru_cached_individual(self, method): # check that https://github.com/scipy/scipy/issues/10846 is fixed # (`root_scalar` failed when passed a function that was `@lru_cache`d) a, b = -1, 1 root, r = method(f_lrucached, a, b, full_output=True) assert r.converged assert_allclose(root, 0) class TestChandrupatla(TestScalarRootFinders): def f(self, q, p): return stats.norm.cdf(q) - p @pytest.mark.parametrize('p', [0.6, np.linspace(-0.05, 1.05, 10)]) def test_basic(self, p): # Invert distribution CDF and compare against distrtibution `ppf` res = zeros._chandrupatla(self.f, -5, 5, args=(p,)) ref = stats.norm().ppf(p) np.testing.assert_allclose(res.x, ref) assert res.x.shape == ref.shape @pytest.mark.parametrize('shape', [tuple(), (12,), (3, 4), (3, 2, 2)]) def test_vectorization(self, shape): # Test for correct functionality, output shapes, and dtypes for various # input shapes. p = np.linspace(-0.05, 1.05, 12).reshape(shape) if shape else 0.6 args = (p,) @np.vectorize def chandrupatla_single(p): return zeros._chandrupatla(self.f, -5, 5, args=(p,)) def f(*args, **kwargs): f.f_evals += 1 return self.f(*args, **kwargs) f.f_evals = 0 res = zeros._chandrupatla(f, -5, 5, args=args) refs = chandrupatla_single(p).ravel() ref_x = [ref.x for ref in refs] assert_allclose(res.x.ravel(), ref_x) assert_equal(res.x.shape, shape) ref_fun = [ref.fun for ref in refs] assert_allclose(res.fun.ravel(), ref_fun) assert_equal(res.fun.shape, shape) assert_equal(res.fun, self.f(res.x, *args)) ref_success = [ref.success for ref in refs] assert_equal(res.success.ravel(), ref_success) assert_equal(res.success.shape, shape) assert np.issubdtype(res.success.dtype, np.bool_) ref_flag = [ref.status for ref in refs] assert_equal(res.status.ravel(), ref_flag) assert_equal(res.status.shape, shape) assert np.issubdtype(res.status.dtype, np.integer) ref_nfev = [ref.nfev for ref in refs] assert_equal(res.nfev.ravel(), ref_nfev) assert_equal(np.max(res.nfev), f.f_evals) assert_equal(res.nfev.shape, res.fun.shape) assert np.issubdtype(res.nfev.dtype, np.integer) ref_nit = [ref.nit for ref in refs] assert_equal(res.nit.ravel(), ref_nit) assert_equal(np.max(res.nit), f.f_evals-2) assert_equal(res.nit.shape, res.fun.shape) assert np.issubdtype(res.nit.dtype, np.integer) ref_xl = [ref.xl for ref in refs] assert_allclose(res.xl.ravel(), ref_xl) assert_equal(res.xl.shape, shape) ref_xr = [ref.xr for ref in refs] assert_allclose(res.xr.ravel(), ref_xr) assert_equal(res.xr.shape, shape) assert_array_less(res.xl, res.xr) finite = np.isfinite(res.x) assert np.all((res.x[finite] == res.xl[finite]) | (res.x[finite] == res.xr[finite])) ref_fl = [ref.fl for ref in refs] assert_allclose(res.fl.ravel(), ref_fl) assert_equal(res.fl.shape, shape) assert_allclose(res.fl, self.f(res.xl, *args)) ref_fr = [ref.fr for ref in refs] assert_allclose(res.fr.ravel(), ref_fr) assert_equal(res.fr.shape, shape) assert_allclose(res.fr, self.f(res.xr, *args)) assert np.all(np.abs(res.fun[finite]) == np.minimum(np.abs(res.fl[finite]), np.abs(res.fr[finite]))) def test_flags(self): # Test cases that should produce different status flags; show that all # can be produced simultaneously. def f(xs, js): funcs = [lambda x: x - 2.5, lambda x: x - 10, lambda x: (x - 0.1)**3, lambda x: np.nan] return [funcs[j](x) for x, j in zip(xs, js)] args = (np.arange(4, dtype=np.int64),) res = zeros._chandrupatla(f, [0]*4, [np.pi]*4, args=args, maxiter=2) ref_flags = np.array([zeros._ECONVERGED, zeros._ESIGNERR, zeros._ECONVERR, zeros._EVALUEERR]) assert_equal(res.status, ref_flags) def test_convergence(self): # Test that the convergence tolerances behave as expected rng = np.random.default_rng(2585255913088665241) p = rng.random(size=3) bracket = (-5, 5) args = (p,) kwargs0 = dict(args=args, xatol=0, xrtol=0, fatol=0, frtol=0) kwargs = kwargs0.copy() kwargs['xatol'] = 1e-3 res1 = zeros._chandrupatla(self.f, *bracket, **kwargs) assert_array_less(res1.xr - res1.xl, 1e-3) kwargs['xatol'] = 1e-6 res2 = zeros._chandrupatla(self.f, *bracket, **kwargs) assert_array_less(res2.xr - res2.xl, 1e-6) assert_array_less(res2.xr - res2.xl, res1.xr - res1.xl) kwargs = kwargs0.copy() kwargs['xrtol'] = 1e-3 res1 = zeros._chandrupatla(self.f, *bracket, **kwargs) assert_array_less(res1.xr - res1.xl, 1e-3 * np.abs(res1.x)) kwargs['xrtol'] = 1e-6 res2 = zeros._chandrupatla(self.f, *bracket, **kwargs) assert_array_less(res2.xr - res2.xl, 1e-6 * np.abs(res2.x)) assert_array_less(res2.xr - res2.xl, res1.xr - res1.xl) kwargs = kwargs0.copy() kwargs['fatol'] = 1e-3 res1 = zeros._chandrupatla(self.f, *bracket, **kwargs) assert_array_less(np.abs(res1.fun), 1e-3) kwargs['fatol'] = 1e-6 res2 = zeros._chandrupatla(self.f, *bracket, **kwargs) assert_array_less(np.abs(res2.fun), 1e-6) assert_array_less(np.abs(res2.fun), np.abs(res1.fun)) kwargs = kwargs0.copy() kwargs['frtol'] = 1e-3 x1, x2 = bracket f0 = np.minimum(abs(self.f(x1, *args)), abs(self.f(x2, *args))) res1 = zeros._chandrupatla(self.f, *bracket, **kwargs) assert_array_less(np.abs(res1.fun), 1e-3*f0) kwargs['frtol'] = 1e-6 res2 = zeros._chandrupatla(self.f, *bracket, **kwargs) assert_array_less(np.abs(res2.fun), 1e-6*f0) assert_array_less(np.abs(res2.fun), np.abs(res1.fun)) def test_maxiter_callback(self): # Test behavior of `maxiter` parameter and `callback` interface p = 0.612814 bracket = (-5, 5) maxiter = 5 def f(q, p): res = stats.norm().cdf(q) - p f.x = q f.fun = res return res f.x = None f.fun = None res = zeros._chandrupatla(f, *bracket, args=(p,), maxiter=maxiter) assert not np.any(res.success) assert np.all(res.nfev == maxiter+2) assert np.all(res.nit == maxiter) def callback(res): callback.iter += 1 callback.res = res assert hasattr(res, 'x') if callback.iter == 0: # callback is called once with initial bracket assert res.xl, res.xr == bracket else: # Ensure that attributes are updating each iteration assert f.x[0] in {res.xl, res.xr} assert f.fun[0] in {res.fl, res.fr} assert res.status == zeros._EINPROGRESS if callback.iter == maxiter: raise StopIteration callback.iter = -1 # callback called once before first iteration callback.res = None res2 = zeros._chandrupatla(f, *bracket, args=(p,), callback=callback) # terminating with callback is identical to terminating due to maxiter # (except for `status`) for key in res.keys(): if key == 'status': assert res[key] == zeros._ECONVERR assert callback.res[key] == zeros._EINPROGRESS assert res2[key] == zeros._ECALLBACK else: assert res2[key] == callback.res[key] == res[key] @pytest.mark.parametrize('case', optimize._tstutils._CHANDRUPATLA_TESTS) def test_nit_expected(self, case): # Test that `_chandrupatla` implements Chandrupatla's algorithm: # in all 40 test cases, the number of iterations performed # matches the number reported in the original paper. f, bracket, root, nfeval, id = case # Chandrupatla's criterion is equivalent to # abs(x2-x1) < 4*abs(xmin)*xrtol + xatol, but we use the more standard # abs(x2-x1) < abs(xmin)*xrtol + xatol. Therefore, set xrtol to 4x # that used by Chandrupatla in tests. res = zeros._chandrupatla(f, *bracket, xrtol=4e-10, xatol=1e-5) assert_allclose(res.fun, f(root), rtol=1e-8, atol=2e-3) assert_equal(res.nfev, nfeval) @pytest.mark.parametrize("dtype", (np.float16, np.float32, np.float64)) def test_dtype(self, dtype): # Test that dtypes are preserved root = 0.622 def f(x): return ((x - root) ** 3).astype(dtype) res = zeros._chandrupatla(f, dtype(-3), dtype(5), xatol=1e-3) assert res.x.dtype == dtype assert_allclose(res.x, root, atol=1e-3) def test_input_validation(self): # Test input validation for appropriate error messages message = '`func` must be callable.' with pytest.raises(ValueError, match=message): zeros._chandrupatla(None, -4, 4) message = 'Abscissae and function output must be real numbers.' with pytest.raises(ValueError, match=message): zeros._chandrupatla(lambda x: x, -4+1j, 4) message = "shape mismatch: objects cannot be broadcast" # raised by `np.broadcast, but the traceback is readable IMO with pytest.raises(ValueError, match=message): zeros._chandrupatla(lambda x: x, [-2, -3], [3, 4, 5]) with pytest.raises(ValueError, match=message): zeros._chandrupatla(lambda x: [x[0], x[1], x[1]], [-3, -3], [5, 5]) message = 'Tolerances must be non-negative scalars.' with pytest.raises(ValueError, match=message): zeros._chandrupatla(lambda x: x, -4, 4, xatol=-1) with pytest.raises(ValueError, match=message): zeros._chandrupatla(lambda x: x, -4, 4, xrtol=None) with pytest.raises(ValueError, match=message): zeros._chandrupatla(lambda x: x, -4, 4, fatol='ekki') with pytest.raises(ValueError, match=message): zeros._chandrupatla(lambda x: x, -4, 4, frtol=None) message = '`maxiter` must be a non-negative integer.' with pytest.raises(ValueError, match=message): zeros._chandrupatla(lambda x: x, -4, 4, maxiter=1.5) with pytest.raises(ValueError, match=message): zeros._chandrupatla(lambda x: x, -4, 4, maxiter=-1) message = '`callback` must be callable.' with pytest.raises(ValueError, match=message): zeros._chandrupatla(lambda x: x, -4, 4, callback='shrubbery') def test_special_cases(self): # Test edge cases and other special cases # Test that integers are not passed to `f` # (otherwise this would overflow) def f(x): # assert np.issubdtype(x.dtype, np.floating) return x ** 99 - 1 res = zeros._chandrupatla(f, -7, 5) assert res.success assert_allclose(res.x, 1) # Test that if both ends of bracket equal root, algorithm reports # convergence def f(x): return x**2 - 1 res = zeros._chandrupatla(f, 1, 1) assert res.success assert_equal(res.x, 1) def f(x): return 1/x with np.errstate(invalid='ignore'): res = zeros._chandrupatla(f, np.inf, np.inf) assert res.success assert_equal(res.x, np.inf) # Test maxiter = 0. Should do nothing to bracket. def f(x): return x**3 - 1 bracket = (-3, 5) res = zeros._chandrupatla(f, *bracket, maxiter=0) assert res.xl, res.xr == bracket assert res.nit == 0 assert res.nfev == 2 assert res.status == -2 assert res.x == -3 # best so far # Test maxiter = 1 res = zeros._chandrupatla(f, *bracket, maxiter=1) assert res.success assert res.status == 0 assert res.nit == 1 assert res.nfev == 3 assert_allclose(res.x, 1) # Test scalar `args` (not in tuple) def f(x, c): return c*x - 1 res = zeros._chandrupatla(f, -1, 1, args=3) assert_allclose(res.x, 1/3) # # TODO: Test zero tolerance # # ~~What's going on here - why are iterations repeated?~~ # # tl goes to zero when xatol=xrtol=0. When function is nearly linear, # # this causes convergence issues. # def f(x): # return np.cos(x) # # res = zeros._chandrupatla(f, 0, np.pi, xatol=0, xrtol=0) # assert res.nit < 100 # xp = np.nextafter(res.x, np.inf) # xm = np.nextafter(res.x, -np.inf) # assert np.abs(res.fun) < np.abs(f(xp)) # assert np.abs(res.fun) < np.abs(f(xm)) class TestNewton(TestScalarRootFinders): def test_newton_collections(self): known_fail = ['aps.13.00'] known_fail += ['aps.12.05', 'aps.12.17'] # fails under Windows Py27 for collection in ['aps', 'complex']: self.run_collection(collection, zeros.newton, 'newton', smoothness=2, known_fail=known_fail) def test_halley_collections(self): known_fail = ['aps.12.06', 'aps.12.07', 'aps.12.08', 'aps.12.09', 'aps.12.10', 'aps.12.11', 'aps.12.12', 'aps.12.13', 'aps.12.14', 'aps.12.15', 'aps.12.16', 'aps.12.17', 'aps.12.18', 'aps.13.00'] for collection in ['aps', 'complex']: self.run_collection(collection, zeros.newton, 'halley', smoothness=2, known_fail=known_fail) def test_newton(self): for f, f_1, f_2 in [(f1, f1_1, f1_2), (f2, f2_1, f2_2)]: x = zeros.newton(f, 3, tol=1e-6) assert_allclose(f(x), 0, atol=1e-6) x = zeros.newton(f, 3, x1=5, tol=1e-6) # secant, x0 and x1 assert_allclose(f(x), 0, atol=1e-6) x = zeros.newton(f, 3, fprime=f_1, tol=1e-6) # newton assert_allclose(f(x), 0, atol=1e-6) x = zeros.newton(f, 3, fprime=f_1, fprime2=f_2, tol=1e-6) # halley assert_allclose(f(x), 0, atol=1e-6) def test_newton_by_name(self): r"""Invoke newton through root_scalar()""" for f, f_1, f_2 in [(f1, f1_1, f1_2), (f2, f2_1, f2_2)]: r = root_scalar(f, method='newton', x0=3, fprime=f_1, xtol=1e-6) assert_allclose(f(r.root), 0, atol=1e-6) for f, f_1, f_2 in [(f1, f1_1, f1_2), (f2, f2_1, f2_2)]: r = root_scalar(f, method='newton', x0=3, xtol=1e-6) # without f' assert_allclose(f(r.root), 0, atol=1e-6) def test_secant_by_name(self): r"""Invoke secant through root_scalar()""" for f, f_1, f_2 in [(f1, f1_1, f1_2), (f2, f2_1, f2_2)]: r = root_scalar(f, method='secant', x0=3, x1=2, xtol=1e-6) assert_allclose(f(r.root), 0, atol=1e-6) r = root_scalar(f, method='secant', x0=3, x1=5, xtol=1e-6) assert_allclose(f(r.root), 0, atol=1e-6) for f, f_1, f_2 in [(f1, f1_1, f1_2), (f2, f2_1, f2_2)]: r = root_scalar(f, method='secant', x0=3, xtol=1e-6) # without x1 assert_allclose(f(r.root), 0, atol=1e-6) def test_halley_by_name(self): r"""Invoke halley through root_scalar()""" for f, f_1, f_2 in [(f1, f1_1, f1_2), (f2, f2_1, f2_2)]: r = root_scalar(f, method='halley', x0=3, fprime=f_1, fprime2=f_2, xtol=1e-6) assert_allclose(f(r.root), 0, atol=1e-6) def test_root_scalar_fail(self): message = 'fprime2 must be specified for halley' with pytest.raises(ValueError, match=message): root_scalar(f1, method='halley', fprime=f1_1, x0=3, xtol=1e-6) # no fprime2 message = 'fprime must be specified for halley' with pytest.raises(ValueError, match=message): root_scalar(f1, method='halley', fprime2=f1_2, x0=3, xtol=1e-6) # no fprime def test_array_newton(self): """test newton with array""" def f1(x, *a): b = a[0] + x * a[3] return a[1] - a[2] * (np.exp(b / a[5]) - 1.0) - b / a[4] - x def f1_1(x, *a): b = a[3] / a[5] return -a[2] * np.exp(a[0] / a[5] + x * b) * b - a[3] / a[4] - 1 def f1_2(x, *a): b = a[3] / a[5] return -a[2] * np.exp(a[0] / a[5] + x * b) * b**2 a0 = np.array([ 5.32725221, 5.48673747, 5.49539973, 5.36387202, 4.80237316, 1.43764452, 5.23063958, 5.46094772, 5.50512718, 5.42046290 ]) a1 = (np.sin(range(10)) + 1.0) * 7.0 args = (a0, a1, 1e-09, 0.004, 10, 0.27456) x0 = [7.0] * 10 x = zeros.newton(f1, x0, f1_1, args) x_expected = ( 6.17264965, 11.7702805, 12.2219954, 7.11017681, 1.18151293, 0.143707955, 4.31928228, 10.5419107, 12.7552490, 8.91225749 ) assert_allclose(x, x_expected) # test halley's x = zeros.newton(f1, x0, f1_1, args, fprime2=f1_2) assert_allclose(x, x_expected) # test secant x = zeros.newton(f1, x0, args=args) assert_allclose(x, x_expected) def test_array_newton_complex(self): def f(x): return x + 1+1j def fprime(x): return 1.0 t = np.full(4, 1j) x = zeros.newton(f, t, fprime=fprime) assert_allclose(f(x), 0.) # should work even if x0 is not complex t = np.ones(4) x = zeros.newton(f, t, fprime=fprime) assert_allclose(f(x), 0.) x = zeros.newton(f, t) assert_allclose(f(x), 0.) def test_array_secant_active_zero_der(self): """test secant doesn't continue to iterate zero derivatives""" x = zeros.newton(lambda x, *a: x*x - a[0], x0=[4.123, 5], args=[np.array([17, 25])]) assert_allclose(x, (4.123105625617661, 5.0)) def test_array_newton_integers(self): # test secant with float x = zeros.newton(lambda y, z: z - y ** 2, [4.0] * 2, args=([15.0, 17.0],)) assert_allclose(x, (3.872983346207417, 4.123105625617661)) # test integer becomes float x = zeros.newton(lambda y, z: z - y ** 2, [4] * 2, args=([15, 17],)) assert_allclose(x, (3.872983346207417, 4.123105625617661)) def test_array_newton_zero_der_failures(self): # test derivative zero warning assert_warns(RuntimeWarning, zeros.newton, lambda y: y**2 - 2, [0., 0.], lambda y: 2 * y) # test failures and zero_der with pytest.warns(RuntimeWarning): results = zeros.newton(lambda y: y**2 - 2, [0., 0.], lambda y: 2*y, full_output=True) assert_allclose(results.root, 0) assert results.zero_der.all() assert not results.converged.any() def test_newton_combined(self): def f1(x): return x ** 2 - 2 * x - 1 def f1_1(x): return 2 * x - 2 def f1_2(x): return 2.0 + 0 * x def f1_and_p_and_pp(x): return x**2 - 2*x-1, 2*x-2, 2.0 sol0 = root_scalar(f1, method='newton', x0=3, fprime=f1_1) sol = root_scalar(f1_and_p_and_pp, method='newton', x0=3, fprime=True) assert_allclose(sol0.root, sol.root, atol=1e-8) assert_equal(2*sol.function_calls, sol0.function_calls) sol0 = root_scalar(f1, method='halley', x0=3, fprime=f1_1, fprime2=f1_2) sol = root_scalar(f1_and_p_and_pp, method='halley', x0=3, fprime2=True) assert_allclose(sol0.root, sol.root, atol=1e-8) assert_equal(3*sol.function_calls, sol0.function_calls) def test_newton_full_output(self): # Test the full_output capability, both when converging and not. # Use simple polynomials, to avoid hitting platform dependencies # (e.g., exp & trig) in number of iterations x0 = 3 expected_counts = [(6, 7), (5, 10), (3, 9)] for derivs in range(3): kwargs = {'tol': 1e-6, 'full_output': True, } for k, v in [['fprime', f1_1], ['fprime2', f1_2]][:derivs]: kwargs[k] = v x, r = zeros.newton(f1, x0, disp=False, **kwargs) assert_(r.converged) assert_equal(x, r.root) assert_equal((r.iterations, r.function_calls), expected_counts[derivs]) if derivs == 0: assert r.function_calls <= r.iterations + 1 else: assert_equal(r.function_calls, (derivs + 1) * r.iterations) # Now repeat, allowing one fewer iteration to force convergence failure iters = r.iterations - 1 x, r = zeros.newton(f1, x0, maxiter=iters, disp=False, **kwargs) assert_(not r.converged) assert_equal(x, r.root) assert_equal(r.iterations, iters) if derivs == 1: # Check that the correct Exception is raised and # validate the start of the message. with pytest.raises( RuntimeError, match='Failed to converge after %d iterations, value is .*' % (iters)): x, r = zeros.newton(f1, x0, maxiter=iters, disp=True, **kwargs) def test_deriv_zero_warning(self): def func(x): return x ** 2 - 2.0 def dfunc(x): return 2 * x assert_warns(RuntimeWarning, zeros.newton, func, 0.0, dfunc, disp=False) with pytest.raises(RuntimeError, match='Derivative was zero'): zeros.newton(func, 0.0, dfunc) def test_newton_does_not_modify_x0(self): # https://github.com/scipy/scipy/issues/9964 x0 = np.array([0.1, 3]) x0_copy = x0.copy() # Copy to test for equality. newton(np.sin, x0, np.cos) assert_array_equal(x0, x0_copy) def test_gh17570_defaults(self): # Previously, when fprime was not specified, root_scalar would default # to secant. When x1 was not specified, secant failed. # Check that without fprime, the default is secant if x1 is specified # and newton otherwise. res_newton_default = root_scalar(f1, method='newton', x0=3, xtol=1e-6) res_secant_default = root_scalar(f1, method='secant', x0=3, x1=2, xtol=1e-6) # `newton` uses the secant method when `x1` and `x2` are specified res_secant = newton(f1, x0=3, x1=2, tol=1e-6, full_output=True)[1] # all three found a root assert_allclose(f1(res_newton_default.root), 0, atol=1e-6) assert res_newton_default.root.shape == tuple() assert_allclose(f1(res_secant_default.root), 0, atol=1e-6) assert res_secant_default.root.shape == tuple() assert_allclose(f1(res_secant.root), 0, atol=1e-6) assert res_secant.root.shape == tuple() # Defaults are correct assert (res_secant_default.root == res_secant.root != res_newton_default.iterations) assert (res_secant_default.iterations == res_secant_default.function_calls - 1 # true for secant == res_secant.iterations != res_newton_default.iterations == res_newton_default.function_calls/2) # newton 2-point diff def test_gh_5555(): root = 0.1 def f(x): return x - root methods = [zeros.bisect, zeros.ridder] xtol = rtol = TOL for method in methods: res = method(f, -1e8, 1e7, xtol=xtol, rtol=rtol) assert_allclose(root, res, atol=xtol, rtol=rtol, err_msg='method %s' % method.__name__) def test_gh_5557(): # Show that without the changes in 5557 brentq and brenth might # only achieve a tolerance of 2*(xtol + rtol*|res|). # f linearly interpolates (0, -0.1), (0.5, -0.1), and (1, # 0.4). The important parts are that |f(0)| < |f(1)| (so that # brent takes 0 as the initial guess), |f(0)| < atol (so that # brent accepts 0 as the root), and that the exact root of f lies # more than atol away from 0 (so that brent doesn't achieve the # desired tolerance). def f(x): if x < 0.5: return -0.1 else: return x - 0.6 atol = 0.51 rtol = 4 * _FLOAT_EPS methods = [zeros.brentq, zeros.brenth] for method in methods: res = method(f, 0, 1, xtol=atol, rtol=rtol) assert_allclose(0.6, res, atol=atol, rtol=rtol) def test_brent_underflow_in_root_bracketing(): # Tetsing if an interval [a,b] brackets a zero of a function # by checking f(a)*f(b) < 0 is not reliable when the product # underflows/overflows. (reported in issue# 13737) underflow_scenario = (-450.0, -350.0, -400.0) overflow_scenario = (350.0, 450.0, 400.0) for a, b, root in [underflow_scenario, overflow_scenario]: c = np.exp(root) for method in [zeros.brenth, zeros.brentq]: res = method(lambda x: np.exp(x)-c, a, b) assert_allclose(root, res) class TestRootResults: r = zeros.RootResults(root=1.0, iterations=44, function_calls=46, flag=0) def test_repr(self): expected_repr = (" converged: True\n flag: converged" "\n function_calls: 46\n iterations: 44\n" " root: 1.0") assert_equal(repr(self.r), expected_repr) def test_type(self): assert isinstance(self.r, OptimizeResult) def test_complex_halley(): """Test Halley's works with complex roots""" def f(x, *a): return a[0] * x**2 + a[1] * x + a[2] def f_1(x, *a): return 2 * a[0] * x + a[1] def f_2(x, *a): retval = 2 * a[0] try: size = len(x) except TypeError: return retval else: return [retval] * size z = complex(1.0, 2.0) coeffs = (2.0, 3.0, 4.0) y = zeros.newton(f, z, args=coeffs, fprime=f_1, fprime2=f_2, tol=1e-6) # (-0.75000000000000078+1.1989578808281789j) assert_allclose(f(y, *coeffs), 0, atol=1e-6) z = [z] * 10 coeffs = (2.0, 3.0, 4.0) y = zeros.newton(f, z, args=coeffs, fprime=f_1, fprime2=f_2, tol=1e-6) assert_allclose(f(y, *coeffs), 0, atol=1e-6) def test_zero_der_nz_dp(): """Test secant method with a non-zero dp, but an infinite newton step""" # pick a symmetrical functions and choose a point on the side that with dx # makes a secant that is a flat line with zero slope, EG: f = (x - 100)**2, # which has a root at x = 100 and is symmetrical around the line x = 100 # we have to pick a really big number so that it is consistently true # now find a point on each side so that the secant has a zero slope dx = np.finfo(float).eps ** 0.33 # 100 - p0 = p1 - 100 = p0 * (1 + dx) + dx - 100 # -> 200 = p0 * (2 + dx) + dx p0 = (200.0 - dx) / (2.0 + dx) with suppress_warnings() as sup: sup.filter(RuntimeWarning, "RMS of") x = zeros.newton(lambda y: (y - 100.0)**2, x0=[p0] * 10) assert_allclose(x, [100] * 10) # test scalar cases too p0 = (2.0 - 1e-4) / (2.0 + 1e-4) with suppress_warnings() as sup: sup.filter(RuntimeWarning, "Tolerance of") x = zeros.newton(lambda y: (y - 1.0) ** 2, x0=p0, disp=False) assert_allclose(x, 1) with pytest.raises(RuntimeError, match='Tolerance of'): x = zeros.newton(lambda y: (y - 1.0) ** 2, x0=p0, disp=True) p0 = (-2.0 + 1e-4) / (2.0 + 1e-4) with suppress_warnings() as sup: sup.filter(RuntimeWarning, "Tolerance of") x = zeros.newton(lambda y: (y + 1.0) ** 2, x0=p0, disp=False) assert_allclose(x, -1) with pytest.raises(RuntimeError, match='Tolerance of'): x = zeros.newton(lambda y: (y + 1.0) ** 2, x0=p0, disp=True) def test_array_newton_failures(): """Test that array newton fails as expected""" # p = 0.68 # [MPa] # dp = -0.068 * 1e6 # [Pa] # T = 323 # [K] diameter = 0.10 # [m] # L = 100 # [m] roughness = 0.00015 # [m] rho = 988.1 # [kg/m**3] mu = 5.4790e-04 # [Pa*s] u = 2.488 # [m/s] reynolds_number = rho * u * diameter / mu # Reynolds number def colebrook_eqn(darcy_friction, re, dia): return (1 / np.sqrt(darcy_friction) + 2 * np.log10(roughness / 3.7 / dia + 2.51 / re / np.sqrt(darcy_friction))) # only some failures with pytest.warns(RuntimeWarning): result = zeros.newton( colebrook_eqn, x0=[0.01, 0.2, 0.02223, 0.3], maxiter=2, args=[reynolds_number, diameter], full_output=True ) assert not result.converged.all() # they all fail with pytest.raises(RuntimeError): result = zeros.newton( colebrook_eqn, x0=[0.01] * 2, maxiter=2, args=[reynolds_number, diameter], full_output=True ) # this test should **not** raise a RuntimeWarning def test_gh8904_zeroder_at_root_fails(): """Test that Newton or Halley don't warn if zero derivative at root""" # a function that has a zero derivative at it's root def f_zeroder_root(x): return x**3 - x**2 # should work with secant r = zeros.newton(f_zeroder_root, x0=0) assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol) # test again with array r = zeros.newton(f_zeroder_root, x0=[0]*10) assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol) # 1st derivative def fder(x): return 3 * x**2 - 2 * x # 2nd derivative def fder2(x): return 6*x - 2 # should work with newton and halley r = zeros.newton(f_zeroder_root, x0=0, fprime=fder) assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol) r = zeros.newton(f_zeroder_root, x0=0, fprime=fder, fprime2=fder2) assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol) # test again with array r = zeros.newton(f_zeroder_root, x0=[0]*10, fprime=fder) assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol) r = zeros.newton(f_zeroder_root, x0=[0]*10, fprime=fder, fprime2=fder2) assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol) # also test that if a root is found we do not raise RuntimeWarning even if # the derivative is zero, EG: at x = 0.5, then fval = -0.125 and # fder = -0.25 so the next guess is 0.5 - (-0.125/-0.5) = 0 which is the # root, but if the solver continued with that guess, then it will calculate # a zero derivative, so it should return the root w/o RuntimeWarning r = zeros.newton(f_zeroder_root, x0=0.5, fprime=fder) assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol) # test again with array r = zeros.newton(f_zeroder_root, x0=[0.5]*10, fprime=fder) assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol) # doesn't apply to halley def test_gh_8881(): r"""Test that Halley's method realizes that the 2nd order adjustment is too big and drops off to the 1st order adjustment.""" n = 9 def f(x): return power(x, 1.0/n) - power(n, 1.0/n) def fp(x): return power(x, (1.0-n)/n)/n def fpp(x): return power(x, (1.0-2*n)/n) * (1.0/n) * (1.0-n)/n x0 = 0.1 # The root is at x=9. # The function has positive slope, x0 < root. # Newton succeeds in 8 iterations rt, r = newton(f, x0, fprime=fp, full_output=True) assert r.converged # Before the Issue 8881/PR 8882, halley would send x in the wrong direction. # Check that it now succeeds. rt, r = newton(f, x0, fprime=fp, fprime2=fpp, full_output=True) assert r.converged def test_gh_9608_preserve_array_shape(): """ Test that shape is preserved for array inputs even if fprime or fprime2 is scalar """ def f(x): return x**2 def fp(x): return 2 * x def fpp(x): return 2 x0 = np.array([-2], dtype=np.float32) rt, r = newton(f, x0, fprime=fp, fprime2=fpp, full_output=True) assert r.converged x0_array = np.array([-2, -3], dtype=np.float32) # This next invocation should fail with pytest.raises(IndexError): result = zeros.newton( f, x0_array, fprime=fp, fprime2=fpp, full_output=True ) def fpp_array(x): return np.full(np.shape(x), 2, dtype=np.float32) result = zeros.newton( f, x0_array, fprime=fp, fprime2=fpp_array, full_output=True ) assert result.converged.all() @pytest.mark.parametrize( "maximum_iterations,flag_expected", [(10, zeros.CONVERR), (100, zeros.CONVERGED)]) def test_gh9254_flag_if_maxiter_exceeded(maximum_iterations, flag_expected): """ Test that if the maximum iterations is exceeded that the flag is not converged. """ result = zeros.brentq( lambda x: ((1.2*x - 2.3)*x + 3.4)*x - 4.5, -30, 30, (), 1e-6, 1e-6, maximum_iterations, full_output=True, disp=False) assert result[1].flag == flag_expected if flag_expected == zeros.CONVERR: # didn't converge because exceeded maximum iterations assert result[1].iterations == maximum_iterations elif flag_expected == zeros.CONVERGED: # converged before maximum iterations assert result[1].iterations < maximum_iterations def test_gh9551_raise_error_if_disp_true(): """Test that if disp is true then zero derivative raises RuntimeError""" def f(x): return x*x + 1 def f_p(x): return 2*x assert_warns(RuntimeWarning, zeros.newton, f, 1.0, f_p, disp=False) with pytest.raises( RuntimeError, match=r'^Derivative was zero\. Failed to converge after \d+ iterations, value is [+-]?\d*\.\d+\.$'): zeros.newton(f, 1.0, f_p) root = zeros.newton(f, complex(10.0, 10.0), f_p) assert_allclose(root, complex(0.0, 1.0)) @pytest.mark.parametrize('solver_name', ['brentq', 'brenth', 'bisect', 'ridder', 'toms748']) def test_gh3089_8394(solver_name): # gh-3089 and gh-8394 reported that bracketing solvers returned incorrect # results when they encountered NaNs. Check that this is resolved. def f(x): return np.nan solver = getattr(zeros, solver_name) with pytest.raises(ValueError, match="The function value at x..."): solver(f, 0, 1) @pytest.mark.parametrize('method', ['brentq', 'brenth', 'bisect', 'ridder', 'toms748']) def test_gh18171(method): # gh-3089 and gh-8394 reported that bracketing solvers returned incorrect # results when they encountered NaNs. Check that `root_scalar` returns # normally but indicates that convergence was unsuccessful. See gh-18171. def f(x): f._count += 1 return np.nan f._count = 0 res = root_scalar(f, bracket=(0, 1), method=method) assert res.converged is False assert res.flag.startswith("The function value at x") assert res.function_calls == f._count assert str(res.root) in res.flag @pytest.mark.parametrize('solver_name', ['brentq', 'brenth', 'bisect', 'ridder', 'toms748']) @pytest.mark.parametrize('rs_interface', [True, False]) def test_function_calls(solver_name, rs_interface): # There do not appear to be checks that the bracketing solvers report the # correct number of function evaluations. Check that this is the case. solver = ((lambda f, a, b, **kwargs: root_scalar(f, bracket=(a, b))) if rs_interface else getattr(zeros, solver_name)) def f(x): f.calls += 1 return x**2 - 1 f.calls = 0 res = solver(f, 0, 10, full_output=True) if rs_interface: assert res.function_calls == f.calls else: assert res[1].function_calls == f.calls def test_gh_14486_converged_false(): """Test that zero slope with secant method results in a converged=False""" def lhs(x): return x * np.exp(-x*x) - 0.07 with pytest.warns(RuntimeWarning, match='Tolerance of'): res = root_scalar(lhs, method='secant', x0=-0.15, x1=1.0) assert not res.converged assert res.flag == 'convergence error' with pytest.warns(RuntimeWarning, match='Tolerance of'): res = newton(lhs, x0=-0.15, x1=1.0, disp=False, full_output=True)[1] assert not res.converged assert res.flag == 'convergence error' @pytest.mark.parametrize('solver_name', ['brentq', 'brenth', 'bisect', 'ridder', 'toms748']) @pytest.mark.parametrize('rs_interface', [True, False]) def test_gh5584(solver_name, rs_interface): # gh-5584 reported that an underflow can cause sign checks in the algorithm # to fail. Check that this is resolved. solver = ((lambda f, a, b, **kwargs: root_scalar(f, bracket=(a, b))) if rs_interface else getattr(zeros, solver_name)) def f(x): return 1e-200*x # Report failure when signs are the same with pytest.raises(ValueError, match='...must have different signs'): solver(f, -0.5, -0.4, full_output=True) # Solve successfully when signs are different res = solver(f, -0.5, 0.4, full_output=True) res = res if rs_interface else res[1] assert res.converged assert_allclose(res.root, 0, atol=1e-8) # Solve successfully when one side is negative zero res = solver(f, -0.5, float('-0.0'), full_output=True) res = res if rs_interface else res[1] assert res.converged assert_allclose(res.root, 0, atol=1e-8) def test_gh13407(): # gh-13407 reported that the message produced by `scipy.optimize.toms748` # when `rtol < eps` is incorrect, and also that toms748 is unusual in # accepting `rtol` as low as eps while other solvers raise at 4*eps. Check # that the error message has been corrected and that `rtol=eps` can produce # a lower function value than `rtol=4*eps`. def f(x): return x**3 - 2*x - 5 xtol = 1e-300 eps = np.finfo(float).eps x1 = zeros.toms748(f, 1e-10, 1e10, xtol=xtol, rtol=1*eps) f1 = f(x1) x4 = zeros.toms748(f, 1e-10, 1e10, xtol=xtol, rtol=4*eps) f4 = f(x4) assert f1 < f4 # using old-style syntax to get exactly the same message message = fr"rtol too small \({eps/2:g} < {eps:g}\)" with pytest.raises(ValueError, match=message): zeros.toms748(f, 1e-10, 1e10, xtol=xtol, rtol=eps/2) def test_newton_complex_gh10103(): # gh-10103 reported a problem when `newton` is pass a Python complex x0, # no `fprime` (secant method), and no `x1` (`x1` must be constructed). # Check that this is resolved. def f(z): return z - 1 res = newton(f, 1+1j) assert_allclose(res, 1, atol=1e-12) res = root_scalar(f, x0=1+1j, x1=2+1.5j, method='secant') assert_allclose(res.root, 1, atol=1e-12) @pytest.mark.parametrize('method', all_methods) def test_maxiter_int_check_gh10236(method): # gh-10236 reported that the error message when `maxiter` is not an integer # was difficult to interpret. Check that this was resolved (by gh-10907). message = "'float' object cannot be interpreted as an integer" with pytest.raises(TypeError, match=message): method(f1, 0.0, 1.0, maxiter=72.45)
47,558
37.077662
112
py
scipy
scipy-main/scipy/optimize/tests/test_lsq_linear.py
import pytest import numpy as np from numpy.linalg import lstsq from numpy.testing import assert_allclose, assert_equal, assert_ from scipy.sparse import rand, coo_matrix from scipy.sparse.linalg import aslinearoperator from scipy.optimize import lsq_linear from scipy.optimize._minimize import Bounds A = np.array([ [0.171, -0.057], [-0.049, -0.248], [-0.166, 0.054], ]) b = np.array([0.074, 1.014, -0.383]) class BaseMixin: def setup_method(self): self.rnd = np.random.RandomState(0) def test_dense_no_bounds(self): for lsq_solver in self.lsq_solvers: res = lsq_linear(A, b, method=self.method, lsq_solver=lsq_solver) assert_allclose(res.x, lstsq(A, b, rcond=-1)[0]) assert_allclose(res.x, res.unbounded_sol[0]) def test_dense_bounds(self): # Solutions for comparison are taken from MATLAB. lb = np.array([-1, -10]) ub = np.array([1, 0]) unbounded_sol = lstsq(A, b, rcond=-1)[0] for lsq_solver in self.lsq_solvers: res = lsq_linear(A, b, (lb, ub), method=self.method, lsq_solver=lsq_solver) assert_allclose(res.x, lstsq(A, b, rcond=-1)[0]) assert_allclose(res.unbounded_sol[0], unbounded_sol) lb = np.array([0.0, -np.inf]) for lsq_solver in self.lsq_solvers: res = lsq_linear(A, b, (lb, np.inf), method=self.method, lsq_solver=lsq_solver) assert_allclose(res.x, np.array([0.0, -4.084174437334673]), atol=1e-6) assert_allclose(res.unbounded_sol[0], unbounded_sol) lb = np.array([-1, 0]) for lsq_solver in self.lsq_solvers: res = lsq_linear(A, b, (lb, np.inf), method=self.method, lsq_solver=lsq_solver) assert_allclose(res.x, np.array([0.448427311733504, 0]), atol=1e-15) assert_allclose(res.unbounded_sol[0], unbounded_sol) ub = np.array([np.inf, -5]) for lsq_solver in self.lsq_solvers: res = lsq_linear(A, b, (-np.inf, ub), method=self.method, lsq_solver=lsq_solver) assert_allclose(res.x, np.array([-0.105560998682388, -5])) assert_allclose(res.unbounded_sol[0], unbounded_sol) ub = np.array([-1, np.inf]) for lsq_solver in self.lsq_solvers: res = lsq_linear(A, b, (-np.inf, ub), method=self.method, lsq_solver=lsq_solver) assert_allclose(res.x, np.array([-1, -4.181102129483254])) assert_allclose(res.unbounded_sol[0], unbounded_sol) lb = np.array([0, -4]) ub = np.array([1, 0]) for lsq_solver in self.lsq_solvers: res = lsq_linear(A, b, (lb, ub), method=self.method, lsq_solver=lsq_solver) assert_allclose(res.x, np.array([0.005236663400791, -4])) assert_allclose(res.unbounded_sol[0], unbounded_sol) def test_bounds_variants(self): x = np.array([1, 3]) A = self.rnd.uniform(size=(2, 2)) b = A@x lb = np.array([1, 1]) ub = np.array([2, 2]) bounds_old = (lb, ub) bounds_new = Bounds(lb, ub) res_old = lsq_linear(A, b, bounds_old) res_new = lsq_linear(A, b, bounds_new) assert not np.allclose(res_new.x, res_new.unbounded_sol[0]) assert_allclose(res_old.x, res_new.x) def test_np_matrix(self): # gh-10711 with np.testing.suppress_warnings() as sup: sup.filter(PendingDeprecationWarning) A = np.matrix([[20, -4, 0, 2, 3], [10, -2, 1, 0, -1]]) k = np.array([20, 15]) lsq_linear(A, k) def test_dense_rank_deficient(self): A = np.array([[-0.307, -0.184]]) b = np.array([0.773]) lb = [-0.1, -0.1] ub = [0.1, 0.1] for lsq_solver in self.lsq_solvers: res = lsq_linear(A, b, (lb, ub), method=self.method, lsq_solver=lsq_solver) assert_allclose(res.x, [-0.1, -0.1]) assert_allclose(res.unbounded_sol[0], lstsq(A, b, rcond=-1)[0]) A = np.array([ [0.334, 0.668], [-0.516, -1.032], [0.192, 0.384], ]) b = np.array([-1.436, 0.135, 0.909]) lb = [0, -1] ub = [1, -0.5] for lsq_solver in self.lsq_solvers: res = lsq_linear(A, b, (lb, ub), method=self.method, lsq_solver=lsq_solver) assert_allclose(res.optimality, 0, atol=1e-11) assert_allclose(res.unbounded_sol[0], lstsq(A, b, rcond=-1)[0]) def test_full_result(self): lb = np.array([0, -4]) ub = np.array([1, 0]) res = lsq_linear(A, b, (lb, ub), method=self.method) assert_allclose(res.x, [0.005236663400791, -4]) assert_allclose(res.unbounded_sol[0], lstsq(A, b, rcond=-1)[0]) r = A.dot(res.x) - b assert_allclose(res.cost, 0.5 * np.dot(r, r)) assert_allclose(res.fun, r) assert_allclose(res.optimality, 0.0, atol=1e-12) assert_equal(res.active_mask, [0, -1]) assert_(res.nit < 15) assert_(res.status == 1 or res.status == 3) assert_(isinstance(res.message, str)) assert_(res.success) # This is a test for issue #9982. def test_almost_singular(self): A = np.array( [[0.8854232310355122, 0.0365312146937765, 0.0365312146836789], [0.3742460132129041, 0.0130523214078376, 0.0130523214077873], [0.9680633871281361, 0.0319366128718639, 0.0319366128718388]]) b = np.array( [0.0055029366538097, 0.0026677442422208, 0.0066612514782381]) result = lsq_linear(A, b, method=self.method) assert_(result.cost < 1.1e-8) @pytest.mark.xslow def test_large_rank_deficient(self): np.random.seed(0) n, m = np.sort(np.random.randint(2, 1000, size=2)) m *= 2 # make m >> n A = 1.0 * np.random.randint(-99, 99, size=[m, n]) b = 1.0 * np.random.randint(-99, 99, size=[m]) bounds = 1.0 * np.sort(np.random.randint(-99, 99, size=(2, n)), axis=0) bounds[1, :] += 1.0 # ensure up > lb # Make the A matrix strongly rank deficient by replicating some columns w = np.random.choice(n, n) # Select random columns with duplicates A = A[:, w] x_bvls = lsq_linear(A, b, bounds=bounds, method='bvls').x x_trf = lsq_linear(A, b, bounds=bounds, method='trf').x cost_bvls = np.sum((A @ x_bvls - b)**2) cost_trf = np.sum((A @ x_trf - b)**2) assert_(abs(cost_bvls - cost_trf) < cost_trf*1e-10) def test_convergence_small_matrix(self): A = np.array([[49.0, 41.0, -32.0], [-19.0, -32.0, -8.0], [-13.0, 10.0, 69.0]]) b = np.array([-41.0, -90.0, 47.0]) bounds = np.array([[31.0, -44.0, 26.0], [54.0, -32.0, 28.0]]) x_bvls = lsq_linear(A, b, bounds=bounds, method='bvls').x x_trf = lsq_linear(A, b, bounds=bounds, method='trf').x cost_bvls = np.sum((A @ x_bvls - b)**2) cost_trf = np.sum((A @ x_trf - b)**2) assert_(abs(cost_bvls - cost_trf) < cost_trf*1e-10) class SparseMixin: def test_sparse_and_LinearOperator(self): m = 5000 n = 1000 A = rand(m, n, random_state=0) b = self.rnd.randn(m) res = lsq_linear(A, b) assert_allclose(res.optimality, 0, atol=1e-6) A = aslinearoperator(A) res = lsq_linear(A, b) assert_allclose(res.optimality, 0, atol=1e-6) def test_sparse_bounds(self): m = 5000 n = 1000 A = rand(m, n, random_state=0) b = self.rnd.randn(m) lb = self.rnd.randn(n) ub = lb + 1 res = lsq_linear(A, b, (lb, ub)) assert_allclose(res.optimality, 0.0, atol=1e-6) res = lsq_linear(A, b, (lb, ub), lsmr_tol=1e-13, lsmr_maxiter=1500) assert_allclose(res.optimality, 0.0, atol=1e-6) res = lsq_linear(A, b, (lb, ub), lsmr_tol='auto') assert_allclose(res.optimality, 0.0, atol=1e-6) def test_sparse_ill_conditioned(self): # Sparse matrix with condition number of ~4 million data = np.array([1., 1., 1., 1. + 1e-6, 1.]) row = np.array([0, 0, 1, 2, 2]) col = np.array([0, 2, 1, 0, 2]) A = coo_matrix((data, (row, col)), shape=(3, 3)) # Get the exact solution exact_sol = lsq_linear(A.toarray(), b, lsq_solver='exact') # Default lsmr arguments should not fully converge the solution default_lsmr_sol = lsq_linear(A, b, lsq_solver='lsmr') with pytest.raises(AssertionError, match=""): assert_allclose(exact_sol.x, default_lsmr_sol.x) # By increasing the maximum lsmr iters, it will converge conv_lsmr = lsq_linear(A, b, lsq_solver='lsmr', lsmr_maxiter=10) assert_allclose(exact_sol.x, conv_lsmr.x) class TestTRF(BaseMixin, SparseMixin): method = 'trf' lsq_solvers = ['exact', 'lsmr'] class TestBVLS(BaseMixin): method = 'bvls' lsq_solvers = ['exact'] class TestErrorChecking: def test_option_lsmr_tol(self): # Should work with a positive float, string equal to 'auto', or None _ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_tol=1e-2) _ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_tol='auto') _ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_tol=None) # Should raise error with negative float, strings # other than 'auto', and integers err_message = "`lsmr_tol` must be None, 'auto', or positive float." with pytest.raises(ValueError, match=err_message): _ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_tol=-0.1) with pytest.raises(ValueError, match=err_message): _ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_tol='foo') with pytest.raises(ValueError, match=err_message): _ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_tol=1) def test_option_lsmr_maxiter(self): # Should work with positive integers or None _ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_maxiter=1) _ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_maxiter=None) # Should raise error with 0 or negative max iter err_message = "`lsmr_maxiter` must be None or positive integer." with pytest.raises(ValueError, match=err_message): _ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_maxiter=0) with pytest.raises(ValueError, match=err_message): _ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_maxiter=-1)
10,861
37.112281
79
py
scipy
scipy-main/scipy/optimize/tests/test_differentiable_functions.py
import pytest import numpy as np from numpy.testing import (TestCase, assert_array_almost_equal, assert_array_equal, assert_, assert_allclose, assert_equal) from scipy.sparse import csr_matrix from scipy.sparse.linalg import LinearOperator from scipy.optimize._differentiable_functions import (ScalarFunction, VectorFunction, LinearVectorFunction, IdentityVectorFunction) from scipy.optimize import rosen, rosen_der, rosen_hess from scipy.optimize._hessian_update_strategy import BFGS class ExScalarFunction: def __init__(self): self.nfev = 0 self.ngev = 0 self.nhev = 0 def fun(self, x): self.nfev += 1 return 2*(x[0]**2 + x[1]**2 - 1) - x[0] def grad(self, x): self.ngev += 1 return np.array([4*x[0]-1, 4*x[1]]) def hess(self, x): self.nhev += 1 return 4*np.eye(2) class TestScalarFunction(TestCase): def test_finite_difference_grad(self): ex = ExScalarFunction() nfev = 0 ngev = 0 x0 = [1.0, 0.0] analit = ScalarFunction(ex.fun, x0, (), ex.grad, ex.hess, None, (-np.inf, np.inf)) nfev += 1 ngev += 1 assert_array_equal(ex.nfev, nfev) assert_array_equal(analit.nfev, nfev) assert_array_equal(ex.ngev, ngev) assert_array_equal(analit.ngev, nfev) approx = ScalarFunction(ex.fun, x0, (), '2-point', ex.hess, None, (-np.inf, np.inf)) nfev += 3 ngev += 1 assert_array_equal(ex.nfev, nfev) assert_array_equal(analit.nfev+approx.nfev, nfev) assert_array_equal(analit.ngev+approx.ngev, ngev) assert_array_equal(analit.f, approx.f) assert_array_almost_equal(analit.g, approx.g) x = [10, 0.3] f_analit = analit.fun(x) g_analit = analit.grad(x) nfev += 1 ngev += 1 assert_array_equal(ex.nfev, nfev) assert_array_equal(analit.nfev+approx.nfev, nfev) assert_array_equal(analit.ngev+approx.ngev, ngev) f_approx = approx.fun(x) g_approx = approx.grad(x) nfev += 3 ngev += 1 assert_array_equal(ex.nfev, nfev) assert_array_equal(analit.nfev+approx.nfev, nfev) assert_array_equal(analit.ngev+approx.ngev, ngev) assert_array_almost_equal(f_analit, f_approx) assert_array_almost_equal(g_analit, g_approx) x = [2.0, 1.0] g_analit = analit.grad(x) ngev += 1 assert_array_equal(ex.nfev, nfev) assert_array_equal(analit.nfev+approx.nfev, nfev) assert_array_equal(analit.ngev+approx.ngev, ngev) g_approx = approx.grad(x) nfev += 3 ngev += 1 assert_array_equal(ex.nfev, nfev) assert_array_equal(analit.nfev+approx.nfev, nfev) assert_array_equal(analit.ngev+approx.ngev, ngev) assert_array_almost_equal(g_analit, g_approx) x = [2.5, 0.3] f_analit = analit.fun(x) g_analit = analit.grad(x) nfev += 1 ngev += 1 assert_array_equal(ex.nfev, nfev) assert_array_equal(analit.nfev+approx.nfev, nfev) assert_array_equal(analit.ngev+approx.ngev, ngev) f_approx = approx.fun(x) g_approx = approx.grad(x) nfev += 3 ngev += 1 assert_array_equal(ex.nfev, nfev) assert_array_equal(analit.nfev+approx.nfev, nfev) assert_array_equal(analit.ngev+approx.ngev, ngev) assert_array_almost_equal(f_analit, f_approx) assert_array_almost_equal(g_analit, g_approx) x = [2, 0.3] f_analit = analit.fun(x) g_analit = analit.grad(x) nfev += 1 ngev += 1 assert_array_equal(ex.nfev, nfev) assert_array_equal(analit.nfev+approx.nfev, nfev) assert_array_equal(analit.ngev+approx.ngev, ngev) f_approx = approx.fun(x) g_approx = approx.grad(x) nfev += 3 ngev += 1 assert_array_equal(ex.nfev, nfev) assert_array_equal(analit.nfev+approx.nfev, nfev) assert_array_equal(analit.ngev+approx.ngev, ngev) assert_array_almost_equal(f_analit, f_approx) assert_array_almost_equal(g_analit, g_approx) def test_fun_and_grad(self): ex = ExScalarFunction() def fg_allclose(x, y): assert_allclose(x[0], y[0]) assert_allclose(x[1], y[1]) # with analytic gradient x0 = [2.0, 0.3] analit = ScalarFunction(ex.fun, x0, (), ex.grad, ex.hess, None, (-np.inf, np.inf)) fg = ex.fun(x0), ex.grad(x0) fg_allclose(analit.fun_and_grad(x0), fg) assert analit.ngev == 1 x0[1] = 1. fg = ex.fun(x0), ex.grad(x0) fg_allclose(analit.fun_and_grad(x0), fg) # with finite difference gradient x0 = [2.0, 0.3] sf = ScalarFunction(ex.fun, x0, (), '3-point', ex.hess, None, (-np.inf, np.inf)) assert sf.ngev == 1 fg = ex.fun(x0), ex.grad(x0) fg_allclose(sf.fun_and_grad(x0), fg) assert sf.ngev == 1 x0[1] = 1. fg = ex.fun(x0), ex.grad(x0) fg_allclose(sf.fun_and_grad(x0), fg) def test_finite_difference_hess_linear_operator(self): ex = ExScalarFunction() nfev = 0 ngev = 0 nhev = 0 x0 = [1.0, 0.0] analit = ScalarFunction(ex.fun, x0, (), ex.grad, ex.hess, None, (-np.inf, np.inf)) nfev += 1 ngev += 1 nhev += 1 assert_array_equal(ex.nfev, nfev) assert_array_equal(analit.nfev, nfev) assert_array_equal(ex.ngev, ngev) assert_array_equal(analit.ngev, ngev) assert_array_equal(ex.nhev, nhev) assert_array_equal(analit.nhev, nhev) approx = ScalarFunction(ex.fun, x0, (), ex.grad, '2-point', None, (-np.inf, np.inf)) assert_(isinstance(approx.H, LinearOperator)) for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]): assert_array_equal(analit.f, approx.f) assert_array_almost_equal(analit.g, approx.g) assert_array_almost_equal(analit.H.dot(v), approx.H.dot(v)) nfev += 1 ngev += 4 assert_array_equal(ex.nfev, nfev) assert_array_equal(analit.nfev+approx.nfev, nfev) assert_array_equal(ex.ngev, ngev) assert_array_equal(analit.ngev+approx.ngev, ngev) assert_array_equal(ex.nhev, nhev) assert_array_equal(analit.nhev+approx.nhev, nhev) x = [2.0, 1.0] H_analit = analit.hess(x) nhev += 1 assert_array_equal(ex.nfev, nfev) assert_array_equal(analit.nfev+approx.nfev, nfev) assert_array_equal(ex.ngev, ngev) assert_array_equal(analit.ngev+approx.ngev, ngev) assert_array_equal(ex.nhev, nhev) assert_array_equal(analit.nhev+approx.nhev, nhev) H_approx = approx.hess(x) assert_(isinstance(H_approx, LinearOperator)) for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]): assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v)) ngev += 4 assert_array_equal(ex.nfev, nfev) assert_array_equal(analit.nfev+approx.nfev, nfev) assert_array_equal(ex.ngev, ngev) assert_array_equal(analit.ngev+approx.ngev, ngev) assert_array_equal(ex.nhev, nhev) assert_array_equal(analit.nhev+approx.nhev, nhev) x = [2.1, 1.2] H_analit = analit.hess(x) nhev += 1 assert_array_equal(ex.nfev, nfev) assert_array_equal(analit.nfev+approx.nfev, nfev) assert_array_equal(ex.ngev, ngev) assert_array_equal(analit.ngev+approx.ngev, ngev) assert_array_equal(ex.nhev, nhev) assert_array_equal(analit.nhev+approx.nhev, nhev) H_approx = approx.hess(x) assert_(isinstance(H_approx, LinearOperator)) for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]): assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v)) ngev += 4 assert_array_equal(ex.nfev, nfev) assert_array_equal(analit.nfev+approx.nfev, nfev) assert_array_equal(ex.ngev, ngev) assert_array_equal(analit.ngev+approx.ngev, ngev) assert_array_equal(ex.nhev, nhev) assert_array_equal(analit.nhev+approx.nhev, nhev) x = [2.5, 0.3] _ = analit.grad(x) H_analit = analit.hess(x) ngev += 1 nhev += 1 assert_array_equal(ex.nfev, nfev) assert_array_equal(analit.nfev+approx.nfev, nfev) assert_array_equal(ex.ngev, ngev) assert_array_equal(analit.ngev+approx.ngev, ngev) assert_array_equal(ex.nhev, nhev) assert_array_equal(analit.nhev+approx.nhev, nhev) _ = approx.grad(x) H_approx = approx.hess(x) assert_(isinstance(H_approx, LinearOperator)) for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]): assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v)) ngev += 4 assert_array_equal(ex.nfev, nfev) assert_array_equal(analit.nfev+approx.nfev, nfev) assert_array_equal(ex.ngev, ngev) assert_array_equal(analit.ngev+approx.ngev, ngev) assert_array_equal(ex.nhev, nhev) assert_array_equal(analit.nhev+approx.nhev, nhev) x = [5.2, 2.3] _ = analit.grad(x) H_analit = analit.hess(x) ngev += 1 nhev += 1 assert_array_equal(ex.nfev, nfev) assert_array_equal(analit.nfev+approx.nfev, nfev) assert_array_equal(ex.ngev, ngev) assert_array_equal(analit.ngev+approx.ngev, ngev) assert_array_equal(ex.nhev, nhev) assert_array_equal(analit.nhev+approx.nhev, nhev) _ = approx.grad(x) H_approx = approx.hess(x) assert_(isinstance(H_approx, LinearOperator)) for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]): assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v)) ngev += 4 assert_array_equal(ex.nfev, nfev) assert_array_equal(analit.nfev+approx.nfev, nfev) assert_array_equal(ex.ngev, ngev) assert_array_equal(analit.ngev+approx.ngev, ngev) assert_array_equal(ex.nhev, nhev) assert_array_equal(analit.nhev+approx.nhev, nhev) def test_x_storage_overlap(self): # Scalar_Function should not store references to arrays, it should # store copies - this checks that updating an array in-place causes # Scalar_Function.x to be updated. def f(x): return np.sum(np.asarray(x) ** 2) x = np.array([1., 2., 3.]) sf = ScalarFunction(f, x, (), '3-point', lambda x: x, None, (-np.inf, np.inf)) assert x is not sf.x assert_equal(sf.fun(x), 14.0) assert x is not sf.x x[0] = 0. f1 = sf.fun(x) assert_equal(f1, 13.0) x[0] = 1 f2 = sf.fun(x) assert_equal(f2, 14.0) assert x is not sf.x # now test with a HessianUpdate strategy specified hess = BFGS() x = np.array([1., 2., 3.]) sf = ScalarFunction(f, x, (), '3-point', hess, None, (-np.inf, np.inf)) assert x is not sf.x assert_equal(sf.fun(x), 14.0) assert x is not sf.x x[0] = 0. f1 = sf.fun(x) assert_equal(f1, 13.0) x[0] = 1 f2 = sf.fun(x) assert_equal(f2, 14.0) assert x is not sf.x # gh13740 x is changed in user function def ff(x): x *= x # overwrite x return np.sum(x) x = np.array([1., 2., 3.]) sf = ScalarFunction( ff, x, (), '3-point', lambda x: x, None, (-np.inf, np.inf) ) assert x is not sf.x assert_equal(sf.fun(x), 14.0) assert_equal(sf.x, np.array([1., 2., 3.])) assert x is not sf.x def test_lowest_x(self): # ScalarFunction should remember the lowest func(x) visited. x0 = np.array([2, 3, 4]) sf = ScalarFunction(rosen, x0, (), rosen_der, rosen_hess, None, None) sf.fun([1, 1, 1]) sf.fun(x0) sf.fun([1.01, 1, 1.0]) sf.grad([1.01, 1, 1.0]) assert_equal(sf._lowest_f, 0.0) assert_equal(sf._lowest_x, [1.0, 1.0, 1.0]) sf = ScalarFunction(rosen, x0, (), '2-point', rosen_hess, None, (-np.inf, np.inf)) sf.fun([1, 1, 1]) sf.fun(x0) sf.fun([1.01, 1, 1.0]) sf.grad([1.01, 1, 1.0]) assert_equal(sf._lowest_f, 0.0) assert_equal(sf._lowest_x, [1.0, 1.0, 1.0]) class ExVectorialFunction: def __init__(self): self.nfev = 0 self.njev = 0 self.nhev = 0 def fun(self, x): self.nfev += 1 return np.array([2*(x[0]**2 + x[1]**2 - 1) - x[0], 4*(x[0]**3 + x[1]**2 - 4) - 3*x[0]]) def jac(self, x): self.njev += 1 return np.array([[4*x[0]-1, 4*x[1]], [12*x[0]**2-3, 8*x[1]]]) def hess(self, x, v): self.nhev += 1 return v[0]*4*np.eye(2) + v[1]*np.array([[24*x[0], 0], [0, 8]]) class TestVectorialFunction(TestCase): def test_finite_difference_jac(self): ex = ExVectorialFunction() nfev = 0 njev = 0 x0 = [1.0, 0.0] analit = VectorFunction(ex.fun, x0, ex.jac, ex.hess, None, None, (-np.inf, np.inf), None) nfev += 1 njev += 1 assert_array_equal(ex.nfev, nfev) assert_array_equal(analit.nfev, nfev) assert_array_equal(ex.njev, njev) assert_array_equal(analit.njev, njev) approx = VectorFunction(ex.fun, x0, '2-point', ex.hess, None, None, (-np.inf, np.inf), None) nfev += 3 assert_array_equal(ex.nfev, nfev) assert_array_equal(analit.nfev+approx.nfev, nfev) assert_array_equal(ex.njev, njev) assert_array_equal(analit.njev+approx.njev, njev) assert_array_equal(analit.f, approx.f) assert_array_almost_equal(analit.J, approx.J) x = [10, 0.3] f_analit = analit.fun(x) J_analit = analit.jac(x) nfev += 1 njev += 1 assert_array_equal(ex.nfev, nfev) assert_array_equal(analit.nfev+approx.nfev, nfev) assert_array_equal(ex.njev, njev) assert_array_equal(analit.njev+approx.njev, njev) f_approx = approx.fun(x) J_approx = approx.jac(x) nfev += 3 assert_array_equal(ex.nfev, nfev) assert_array_equal(analit.nfev+approx.nfev, nfev) assert_array_equal(ex.njev, njev) assert_array_equal(analit.njev+approx.njev, njev) assert_array_almost_equal(f_analit, f_approx) assert_array_almost_equal(J_analit, J_approx, decimal=4) x = [2.0, 1.0] J_analit = analit.jac(x) njev += 1 assert_array_equal(ex.nfev, nfev) assert_array_equal(analit.nfev+approx.nfev, nfev) assert_array_equal(ex.njev, njev) assert_array_equal(analit.njev+approx.njev, njev) J_approx = approx.jac(x) nfev += 3 assert_array_equal(ex.nfev, nfev) assert_array_equal(analit.nfev+approx.nfev, nfev) assert_array_equal(ex.njev, njev) assert_array_equal(analit.njev+approx.njev, njev) assert_array_almost_equal(J_analit, J_approx) x = [2.5, 0.3] f_analit = analit.fun(x) J_analit = analit.jac(x) nfev += 1 njev += 1 assert_array_equal(ex.nfev, nfev) assert_array_equal(analit.nfev+approx.nfev, nfev) assert_array_equal(ex.njev, njev) assert_array_equal(analit.njev+approx.njev, njev) f_approx = approx.fun(x) J_approx = approx.jac(x) nfev += 3 assert_array_equal(ex.nfev, nfev) assert_array_equal(analit.nfev+approx.nfev, nfev) assert_array_equal(ex.njev, njev) assert_array_equal(analit.njev+approx.njev, njev) assert_array_almost_equal(f_analit, f_approx) assert_array_almost_equal(J_analit, J_approx) x = [2, 0.3] f_analit = analit.fun(x) J_analit = analit.jac(x) nfev += 1 njev += 1 assert_array_equal(ex.nfev, nfev) assert_array_equal(analit.nfev+approx.nfev, nfev) assert_array_equal(ex.njev, njev) assert_array_equal(analit.njev+approx.njev, njev) f_approx = approx.fun(x) J_approx = approx.jac(x) nfev += 3 assert_array_equal(ex.nfev, nfev) assert_array_equal(analit.nfev+approx.nfev, nfev) assert_array_equal(ex.njev, njev) assert_array_equal(analit.njev+approx.njev, njev) assert_array_almost_equal(f_analit, f_approx) assert_array_almost_equal(J_analit, J_approx) def test_finite_difference_hess_linear_operator(self): ex = ExVectorialFunction() nfev = 0 njev = 0 nhev = 0 x0 = [1.0, 0.0] v0 = [1.0, 2.0] analit = VectorFunction(ex.fun, x0, ex.jac, ex.hess, None, None, (-np.inf, np.inf), None) nfev += 1 njev += 1 nhev += 1 assert_array_equal(ex.nfev, nfev) assert_array_equal(analit.nfev, nfev) assert_array_equal(ex.njev, njev) assert_array_equal(analit.njev, njev) assert_array_equal(ex.nhev, nhev) assert_array_equal(analit.nhev, nhev) approx = VectorFunction(ex.fun, x0, ex.jac, '2-point', None, None, (-np.inf, np.inf), None) assert_(isinstance(approx.H, LinearOperator)) for p in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]): assert_array_equal(analit.f, approx.f) assert_array_almost_equal(analit.J, approx.J) assert_array_almost_equal(analit.H.dot(p), approx.H.dot(p)) nfev += 1 njev += 4 assert_array_equal(ex.nfev, nfev) assert_array_equal(analit.nfev+approx.nfev, nfev) assert_array_equal(ex.njev, njev) assert_array_equal(analit.njev+approx.njev, njev) assert_array_equal(ex.nhev, nhev) assert_array_equal(analit.nhev+approx.nhev, nhev) x = [2.0, 1.0] H_analit = analit.hess(x, v0) nhev += 1 assert_array_equal(ex.nfev, nfev) assert_array_equal(analit.nfev+approx.nfev, nfev) assert_array_equal(ex.njev, njev) assert_array_equal(analit.njev+approx.njev, njev) assert_array_equal(ex.nhev, nhev) assert_array_equal(analit.nhev+approx.nhev, nhev) H_approx = approx.hess(x, v0) assert_(isinstance(H_approx, LinearOperator)) for p in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]): assert_array_almost_equal(H_analit.dot(p), H_approx.dot(p), decimal=5) njev += 4 assert_array_equal(ex.nfev, nfev) assert_array_equal(analit.nfev+approx.nfev, nfev) assert_array_equal(ex.njev, njev) assert_array_equal(analit.njev+approx.njev, njev) assert_array_equal(ex.nhev, nhev) assert_array_equal(analit.nhev+approx.nhev, nhev) x = [2.1, 1.2] v = [1.0, 1.0] H_analit = analit.hess(x, v) nhev += 1 assert_array_equal(ex.nfev, nfev) assert_array_equal(analit.nfev+approx.nfev, nfev) assert_array_equal(ex.njev, njev) assert_array_equal(analit.njev+approx.njev, njev) assert_array_equal(ex.nhev, nhev) assert_array_equal(analit.nhev+approx.nhev, nhev) H_approx = approx.hess(x, v) assert_(isinstance(H_approx, LinearOperator)) for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]): assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v)) njev += 4 assert_array_equal(ex.nfev, nfev) assert_array_equal(analit.nfev+approx.nfev, nfev) assert_array_equal(ex.njev, njev) assert_array_equal(analit.njev+approx.njev, njev) assert_array_equal(ex.nhev, nhev) assert_array_equal(analit.nhev+approx.nhev, nhev) x = [2.5, 0.3] _ = analit.jac(x) H_analit = analit.hess(x, v0) njev += 1 nhev += 1 assert_array_equal(ex.nfev, nfev) assert_array_equal(analit.nfev+approx.nfev, nfev) assert_array_equal(ex.njev, njev) assert_array_equal(analit.njev+approx.njev, njev) assert_array_equal(ex.nhev, nhev) assert_array_equal(analit.nhev+approx.nhev, nhev) _ = approx.jac(x) H_approx = approx.hess(x, v0) assert_(isinstance(H_approx, LinearOperator)) for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]): assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v), decimal=4) njev += 4 assert_array_equal(ex.nfev, nfev) assert_array_equal(analit.nfev+approx.nfev, nfev) assert_array_equal(ex.njev, njev) assert_array_equal(analit.njev+approx.njev, njev) assert_array_equal(ex.nhev, nhev) assert_array_equal(analit.nhev+approx.nhev, nhev) x = [5.2, 2.3] v = [2.3, 5.2] _ = analit.jac(x) H_analit = analit.hess(x, v) njev += 1 nhev += 1 assert_array_equal(ex.nfev, nfev) assert_array_equal(analit.nfev+approx.nfev, nfev) assert_array_equal(ex.njev, njev) assert_array_equal(analit.njev+approx.njev, njev) assert_array_equal(ex.nhev, nhev) assert_array_equal(analit.nhev+approx.nhev, nhev) _ = approx.jac(x) H_approx = approx.hess(x, v) assert_(isinstance(H_approx, LinearOperator)) for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]): assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v), decimal=4) njev += 4 assert_array_equal(ex.nfev, nfev) assert_array_equal(analit.nfev+approx.nfev, nfev) assert_array_equal(ex.njev, njev) assert_array_equal(analit.njev+approx.njev, njev) assert_array_equal(ex.nhev, nhev) assert_array_equal(analit.nhev+approx.nhev, nhev) def test_x_storage_overlap(self): # VectorFunction should not store references to arrays, it should # store copies - this checks that updating an array in-place causes # Scalar_Function.x to be updated. ex = ExVectorialFunction() x0 = np.array([1.0, 0.0]) vf = VectorFunction(ex.fun, x0, '3-point', ex.hess, None, None, (-np.inf, np.inf), None) assert x0 is not vf.x assert_equal(vf.fun(x0), ex.fun(x0)) assert x0 is not vf.x x0[0] = 2. assert_equal(vf.fun(x0), ex.fun(x0)) assert x0 is not vf.x x0[0] = 1. assert_equal(vf.fun(x0), ex.fun(x0)) assert x0 is not vf.x # now test with a HessianUpdate strategy specified hess = BFGS() x0 = np.array([1.0, 0.0]) vf = VectorFunction(ex.fun, x0, '3-point', hess, None, None, (-np.inf, np.inf), None) with pytest.warns(UserWarning): # filter UserWarning because ExVectorialFunction is linear and # a quasi-Newton approximation is used for the Hessian. assert x0 is not vf.x assert_equal(vf.fun(x0), ex.fun(x0)) assert x0 is not vf.x x0[0] = 2. assert_equal(vf.fun(x0), ex.fun(x0)) assert x0 is not vf.x x0[0] = 1. assert_equal(vf.fun(x0), ex.fun(x0)) assert x0 is not vf.x def test_LinearVectorFunction(): A_dense = np.array([ [-1, 2, 0], [0, 4, 2] ]) x0 = np.zeros(3) A_sparse = csr_matrix(A_dense) x = np.array([1, -1, 0]) v = np.array([-1, 1]) Ax = np.array([-3, -4]) f1 = LinearVectorFunction(A_dense, x0, None) assert_(not f1.sparse_jacobian) f2 = LinearVectorFunction(A_dense, x0, True) assert_(f2.sparse_jacobian) f3 = LinearVectorFunction(A_dense, x0, False) assert_(not f3.sparse_jacobian) f4 = LinearVectorFunction(A_sparse, x0, None) assert_(f4.sparse_jacobian) f5 = LinearVectorFunction(A_sparse, x0, True) assert_(f5.sparse_jacobian) f6 = LinearVectorFunction(A_sparse, x0, False) assert_(not f6.sparse_jacobian) assert_array_equal(f1.fun(x), Ax) assert_array_equal(f2.fun(x), Ax) assert_array_equal(f1.jac(x), A_dense) assert_array_equal(f2.jac(x).toarray(), A_sparse.toarray()) assert_array_equal(f1.hess(x, v).toarray(), np.zeros((3, 3))) def test_LinearVectorFunction_memoization(): A = np.array([[-1, 2, 0], [0, 4, 2]]) x0 = np.array([1, 2, -1]) fun = LinearVectorFunction(A, x0, False) assert_array_equal(x0, fun.x) assert_array_equal(A.dot(x0), fun.f) x1 = np.array([-1, 3, 10]) assert_array_equal(A, fun.jac(x1)) assert_array_equal(x1, fun.x) assert_array_equal(A.dot(x0), fun.f) assert_array_equal(A.dot(x1), fun.fun(x1)) assert_array_equal(A.dot(x1), fun.f) def test_IdentityVectorFunction(): x0 = np.zeros(3) f1 = IdentityVectorFunction(x0, None) f2 = IdentityVectorFunction(x0, False) f3 = IdentityVectorFunction(x0, True) assert_(f1.sparse_jacobian) assert_(not f2.sparse_jacobian) assert_(f3.sparse_jacobian) x = np.array([-1, 2, 1]) v = np.array([-2, 3, 0]) assert_array_equal(f1.fun(x), x) assert_array_equal(f2.fun(x), x) assert_array_equal(f1.jac(x).toarray(), np.eye(3)) assert_array_equal(f2.jac(x), np.eye(3)) assert_array_equal(f1.hess(x, v).toarray(), np.zeros((3, 3)))
26,154
34.730874
86
py
scipy
scipy-main/scipy/optimize/tests/test_trustregion_exact.py
""" Unit tests for trust-region iterative subproblem. To run it in its simplest form:: nosetests test_optimize.py """ import numpy as np from scipy.optimize._trustregion_exact import ( estimate_smallest_singular_value, singular_leading_submatrix, IterativeSubproblem) from scipy.linalg import (svd, get_lapack_funcs, det, qr, norm) from numpy.testing import (assert_array_equal, assert_equal, assert_array_almost_equal) def random_entry(n, min_eig, max_eig, case): # Generate random matrix rand = np.random.uniform(-1, 1, (n, n)) # QR decomposition Q, _, _ = qr(rand, pivoting='True') # Generate random eigenvalues eigvalues = np.random.uniform(min_eig, max_eig, n) eigvalues = np.sort(eigvalues)[::-1] # Generate matrix Qaux = np.multiply(eigvalues, Q) A = np.dot(Qaux, Q.T) # Generate gradient vector accordingly # to the case is being tested. if case == 'hard': g = np.zeros(n) g[:-1] = np.random.uniform(-1, 1, n-1) g = np.dot(Q, g) elif case == 'jac_equal_zero': g = np.zeros(n) else: g = np.random.uniform(-1, 1, n) return A, g class TestEstimateSmallestSingularValue: def test_for_ill_condiotioned_matrix(self): # Ill-conditioned triangular matrix C = np.array([[1, 2, 3, 4], [0, 0.05, 60, 7], [0, 0, 0.8, 9], [0, 0, 0, 10]]) # Get svd decomposition U, s, Vt = svd(C) # Get smallest singular value and correspondent right singular vector. smin_svd = s[-1] zmin_svd = Vt[-1, :] # Estimate smallest singular value smin, zmin = estimate_smallest_singular_value(C) # Check the estimation assert_array_almost_equal(smin, smin_svd, decimal=8) assert_array_almost_equal(abs(zmin), abs(zmin_svd), decimal=8) class TestSingularLeadingSubmatrix: def test_for_already_singular_leading_submatrix(self): # Define test matrix A. # Note that the leading 2x2 submatrix is singular. A = np.array([[1, 2, 3], [2, 4, 5], [3, 5, 6]]) # Get Cholesky from lapack functions cholesky, = get_lapack_funcs(('potrf',), (A,)) # Compute Cholesky Decomposition c, k = cholesky(A, lower=False, overwrite_a=False, clean=True) delta, v = singular_leading_submatrix(A, c, k) A[k-1, k-1] += delta # Check if the leading submatrix is singular. assert_array_almost_equal(det(A[:k, :k]), 0) # Check if `v` fullfil the specified properties quadratic_term = np.dot(v, np.dot(A, v)) assert_array_almost_equal(quadratic_term, 0) def test_for_simetric_indefinite_matrix(self): # Define test matrix A. # Note that the leading 5x5 submatrix is indefinite. A = np.asarray([[1, 2, 3, 7, 8], [2, 5, 5, 9, 0], [3, 5, 11, 1, 2], [7, 9, 1, 7, 5], [8, 0, 2, 5, 8]]) # Get Cholesky from lapack functions cholesky, = get_lapack_funcs(('potrf',), (A,)) # Compute Cholesky Decomposition c, k = cholesky(A, lower=False, overwrite_a=False, clean=True) delta, v = singular_leading_submatrix(A, c, k) A[k-1, k-1] += delta # Check if the leading submatrix is singular. assert_array_almost_equal(det(A[:k, :k]), 0) # Check if `v` fullfil the specified properties quadratic_term = np.dot(v, np.dot(A, v)) assert_array_almost_equal(quadratic_term, 0) def test_for_first_element_equal_to_zero(self): # Define test matrix A. # Note that the leading 2x2 submatrix is singular. A = np.array([[0, 3, 11], [3, 12, 5], [11, 5, 6]]) # Get Cholesky from lapack functions cholesky, = get_lapack_funcs(('potrf',), (A,)) # Compute Cholesky Decomposition c, k = cholesky(A, lower=False, overwrite_a=False, clean=True) delta, v = singular_leading_submatrix(A, c, k) A[k-1, k-1] += delta # Check if the leading submatrix is singular assert_array_almost_equal(det(A[:k, :k]), 0) # Check if `v` fullfil the specified properties quadratic_term = np.dot(v, np.dot(A, v)) assert_array_almost_equal(quadratic_term, 0) class TestIterativeSubproblem: def test_for_the_easy_case(self): # `H` is chosen such that `g` is not orthogonal to the # eigenvector associated with the smallest eigenvalue `s`. H = [[10, 2, 3, 4], [2, 1, 7, 1], [3, 7, 1, 7], [4, 1, 7, 2]] g = [1, 1, 1, 1] # Trust Radius trust_radius = 1 # Solve Subproblem subprob = IterativeSubproblem(x=0, fun=lambda x: 0, jac=lambda x: np.array(g), hess=lambda x: np.array(H), k_easy=1e-10, k_hard=1e-10) p, hits_boundary = subprob.solve(trust_radius) assert_array_almost_equal(p, [0.00393332, -0.55260862, 0.67065477, -0.49480341]) assert_array_almost_equal(hits_boundary, True) def test_for_the_hard_case(self): # `H` is chosen such that `g` is orthogonal to the # eigenvector associated with the smallest eigenvalue `s`. H = [[10, 2, 3, 4], [2, 1, 7, 1], [3, 7, 1, 7], [4, 1, 7, 2]] g = [6.4852641521327437, 1, 1, 1] s = -8.2151519874416614 # Trust Radius trust_radius = 1 # Solve Subproblem subprob = IterativeSubproblem(x=0, fun=lambda x: 0, jac=lambda x: np.array(g), hess=lambda x: np.array(H), k_easy=1e-10, k_hard=1e-10) p, hits_boundary = subprob.solve(trust_radius) assert_array_almost_equal(-s, subprob.lambda_current) def test_for_interior_convergence(self): H = [[1.812159, 0.82687265, 0.21838879, -0.52487006, 0.25436988], [0.82687265, 2.66380283, 0.31508988, -0.40144163, 0.08811588], [0.21838879, 0.31508988, 2.38020726, -0.3166346, 0.27363867], [-0.52487006, -0.40144163, -0.3166346, 1.61927182, -0.42140166], [0.25436988, 0.08811588, 0.27363867, -0.42140166, 1.33243101]] g = [0.75798952, 0.01421945, 0.33847612, 0.83725004, -0.47909534] # Solve Subproblem subprob = IterativeSubproblem(x=0, fun=lambda x: 0, jac=lambda x: np.array(g), hess=lambda x: np.array(H)) p, hits_boundary = subprob.solve(1.1) assert_array_almost_equal(p, [-0.68585435, 0.1222621, -0.22090999, -0.67005053, 0.31586769]) assert_array_almost_equal(hits_boundary, False) assert_array_almost_equal(subprob.lambda_current, 0) assert_array_almost_equal(subprob.niter, 1) def test_for_jac_equal_zero(self): H = [[0.88547534, 2.90692271, 0.98440885, -0.78911503, -0.28035809], [2.90692271, -0.04618819, 0.32867263, -0.83737945, 0.17116396], [0.98440885, 0.32867263, -0.87355957, -0.06521957, -1.43030957], [-0.78911503, -0.83737945, -0.06521957, -1.645709, -0.33887298], [-0.28035809, 0.17116396, -1.43030957, -0.33887298, -1.68586978]] g = [0, 0, 0, 0, 0] # Solve Subproblem subprob = IterativeSubproblem(x=0, fun=lambda x: 0, jac=lambda x: np.array(g), hess=lambda x: np.array(H), k_easy=1e-10, k_hard=1e-10) p, hits_boundary = subprob.solve(1.1) assert_array_almost_equal(p, [0.06910534, -0.01432721, -0.65311947, -0.23815972, -0.84954934]) assert_array_almost_equal(hits_boundary, True) def test_for_jac_very_close_to_zero(self): H = [[0.88547534, 2.90692271, 0.98440885, -0.78911503, -0.28035809], [2.90692271, -0.04618819, 0.32867263, -0.83737945, 0.17116396], [0.98440885, 0.32867263, -0.87355957, -0.06521957, -1.43030957], [-0.78911503, -0.83737945, -0.06521957, -1.645709, -0.33887298], [-0.28035809, 0.17116396, -1.43030957, -0.33887298, -1.68586978]] g = [0, 0, 0, 0, 1e-15] # Solve Subproblem subprob = IterativeSubproblem(x=0, fun=lambda x: 0, jac=lambda x: np.array(g), hess=lambda x: np.array(H), k_easy=1e-10, k_hard=1e-10) p, hits_boundary = subprob.solve(1.1) assert_array_almost_equal(p, [0.06910534, -0.01432721, -0.65311947, -0.23815972, -0.84954934]) assert_array_almost_equal(hits_boundary, True) def test_for_random_entries(self): # Seed np.random.seed(1) # Dimension n = 5 for case in ('easy', 'hard', 'jac_equal_zero'): eig_limits = [(-20, -15), (-10, -5), (-10, 0), (-5, 5), (-10, 10), (0, 10), (5, 10), (15, 20)] for min_eig, max_eig in eig_limits: # Generate random symmetric matrix H with # eigenvalues between min_eig and max_eig. H, g = random_entry(n, min_eig, max_eig, case) # Trust radius trust_radius_list = [0.1, 0.3, 0.6, 0.8, 1, 1.2, 3.3, 5.5, 10] for trust_radius in trust_radius_list: # Solve subproblem with very high accuracy subprob_ac = IterativeSubproblem(0, lambda x: 0, lambda x: g, lambda x: H, k_easy=1e-10, k_hard=1e-10) p_ac, hits_boundary_ac = subprob_ac.solve(trust_radius) # Compute objective function value J_ac = 1/2*np.dot(p_ac, np.dot(H, p_ac))+np.dot(g, p_ac) stop_criteria = [(0.1, 2), (0.5, 1.1), (0.9, 1.01)] for k_opt, k_trf in stop_criteria: # k_easy and k_hard computed in function # of k_opt and k_trf accordingly to # Conn, A. R., Gould, N. I., & Toint, P. L. (2000). # "Trust region methods". Siam. p. 197. k_easy = min(k_trf-1, 1-np.sqrt(k_opt)) k_hard = 1-k_opt # Solve subproblem subprob = IterativeSubproblem(0, lambda x: 0, lambda x: g, lambda x: H, k_easy=k_easy, k_hard=k_hard) p, hits_boundary = subprob.solve(trust_radius) # Compute objective function value J = 1/2*np.dot(p, np.dot(H, p))+np.dot(g, p) # Check if it respect k_trf if hits_boundary: assert_array_equal(np.abs(norm(p)-trust_radius) <= (k_trf-1)*trust_radius, True) else: assert_equal(norm(p) <= trust_radius, True) # Check if it respect k_opt assert_equal(J <= k_opt*J_ac, True)
12,954
35.699717
78
py
scipy
scipy-main/scipy/optimize/tests/test_milp.py
""" Unit test for Mixed Integer Linear Programming """ import re import numpy as np from numpy.testing import assert_allclose, assert_array_equal import pytest from .test_linprog import magic_square from scipy.optimize import milp, Bounds, LinearConstraint from scipy import sparse def test_milp_iv(): message = "`c` must be a dense array" with pytest.raises(ValueError, match=message): milp(sparse.coo_array([0, 0])) message = "`c` must be a one-dimensional array of finite numbers with" with pytest.raises(ValueError, match=message): milp(np.zeros((3, 4))) with pytest.raises(ValueError, match=message): milp([]) with pytest.raises(ValueError, match=message): milp(None) message = "`bounds` must be convertible into an instance of..." with pytest.raises(ValueError, match=message): milp(1, bounds=10) message = "`constraints` (or each element within `constraints`) must be" with pytest.raises(ValueError, match=re.escape(message)): milp(1, constraints=10) with pytest.raises(ValueError, match=re.escape(message)): milp(np.zeros(3), constraints=([[1, 2, 3]], [2, 3], [2, 3])) with pytest.raises(ValueError, match=re.escape(message)): milp(np.zeros(2), constraints=([[1, 2]], [2], sparse.coo_array([2]))) message = "The shape of `A` must be (len(b_l), len(c))." with pytest.raises(ValueError, match=re.escape(message)): milp(np.zeros(3), constraints=([[1, 2]], [2], [2])) message = "`integrality` must be a dense array" with pytest.raises(ValueError, match=message): milp([1, 2], integrality=sparse.coo_array([1, 2])) message = ("`integrality` must contain integers 0-3 and be broadcastable " "to `c.shape`.") with pytest.raises(ValueError, match=message): milp([1, 2, 3], integrality=[1, 2]) with pytest.raises(ValueError, match=message): milp([1, 2, 3], integrality=[1, 5, 3]) message = "Lower and upper bounds must be dense arrays." with pytest.raises(ValueError, match=message): milp([1, 2, 3], bounds=([1, 2], sparse.coo_array([3, 4]))) message = "`lb`, `ub`, and `keep_feasible` must be broadcastable." with pytest.raises(ValueError, match=message): milp([1, 2, 3], bounds=([1, 2], [3, 4, 5])) with pytest.raises(ValueError, match=message): milp([1, 2, 3], bounds=([1, 2, 3], [4, 5])) message = "`bounds.lb` and `bounds.ub` must contain reals and..." with pytest.raises(ValueError, match=message): milp([1, 2, 3], bounds=([1, 2], [3, 4])) with pytest.raises(ValueError, match=message): milp([1, 2, 3], bounds=([1, 2, 3], ["3+4", 4, 5])) with pytest.raises(ValueError, match=message): milp([1, 2, 3], bounds=([1, 2, 3], [set(), 4, 5])) @pytest.mark.xfail(run=False, reason="Needs to be fixed in `_highs_wrapper`") def test_milp_options(capsys): # run=False now because of gh-16347 message = "Unrecognized options detected: {'ekki'}..." options = {'ekki': True} with pytest.warns(RuntimeWarning, match=message): milp(1, options=options) A, b, c, numbers, M = magic_square(3) options = {"disp": True, "presolve": False, "time_limit": 0.05} res = milp(c=c, constraints=(A, b, b), bounds=(0, 1), integrality=1, options=options) captured = capsys.readouterr() assert "Presolve is switched off" in captured.out assert "Time Limit Reached" in captured.out assert not res.success def test_result(): A, b, c, numbers, M = magic_square(3) res = milp(c=c, constraints=(A, b, b), bounds=(0, 1), integrality=1) assert res.status == 0 assert res.success msg = "Optimization terminated successfully. (HiGHS Status 7:" assert res.message.startswith(msg) assert isinstance(res.x, np.ndarray) assert isinstance(res.fun, float) assert isinstance(res.mip_node_count, int) assert isinstance(res.mip_dual_bound, float) assert isinstance(res.mip_gap, float) A, b, c, numbers, M = magic_square(6) res = milp(c=c*0, constraints=(A, b, b), bounds=(0, 1), integrality=1, options={'time_limit': 0.05}) assert res.status == 1 assert not res.success msg = "Time limit reached. (HiGHS Status 13:" assert res.message.startswith(msg) assert (res.fun is res.mip_dual_bound is res.mip_gap is res.mip_node_count is res.x is None) res = milp(1, bounds=(1, -1)) assert res.status == 2 assert not res.success msg = "The problem is infeasible. (HiGHS Status 8:" assert res.message.startswith(msg) assert (res.fun is res.mip_dual_bound is res.mip_gap is res.mip_node_count is res.x is None) res = milp(-1) assert res.status == 3 assert not res.success msg = "The problem is unbounded. (HiGHS Status 10:" assert res.message.startswith(msg) assert (res.fun is res.mip_dual_bound is res.mip_gap is res.mip_node_count is res.x is None) def test_milp_optional_args(): # check that arguments other than `c` are indeed optional res = milp(1) assert res.fun == 0 assert_array_equal(res.x, [0]) def test_milp_1(): # solve magic square problem n = 3 A, b, c, numbers, M = magic_square(n) A = sparse.csc_array(A) # confirm that sparse arrays are accepted res = milp(c=c*0, constraints=(A, b, b), bounds=(0, 1), integrality=1) # check that solution is a magic square x = np.round(res.x) s = (numbers.flatten() * x).reshape(n**2, n, n) square = np.sum(s, axis=0) np.testing.assert_allclose(square.sum(axis=0), M) np.testing.assert_allclose(square.sum(axis=1), M) np.testing.assert_allclose(np.diag(square).sum(), M) np.testing.assert_allclose(np.diag(square[:, ::-1]).sum(), M) def test_milp_2(): # solve MIP with inequality constraints and all integer constraints # source: slide 5, # https://www.cs.upc.edu/~erodri/webpage/cps/theory/lp/milp/slides.pdf # also check that `milp` accepts all valid ways of specifying constraints c = -np.ones(2) A = [[-2, 2], [-8, 10]] b_l = [1, -np.inf] b_u = [np.inf, 13] linear_constraint = LinearConstraint(A, b_l, b_u) # solve original problem res1 = milp(c=c, constraints=(A, b_l, b_u), integrality=True) res2 = milp(c=c, constraints=linear_constraint, integrality=True) res3 = milp(c=c, constraints=[(A, b_l, b_u)], integrality=True) res4 = milp(c=c, constraints=[linear_constraint], integrality=True) res5 = milp(c=c, integrality=True, constraints=[(A[:1], b_l[:1], b_u[:1]), (A[1:], b_l[1:], b_u[1:])]) res6 = milp(c=c, integrality=True, constraints=[LinearConstraint(A[:1], b_l[:1], b_u[:1]), LinearConstraint(A[1:], b_l[1:], b_u[1:])]) res7 = milp(c=c, integrality=True, constraints=[(A[:1], b_l[:1], b_u[:1]), LinearConstraint(A[1:], b_l[1:], b_u[1:])]) xs = np.array([res1.x, res2.x, res3.x, res4.x, res5.x, res6.x, res7.x]) funs = np.array([res1.fun, res2.fun, res3.fun, res4.fun, res5.fun, res6.fun, res7.fun]) np.testing.assert_allclose(xs, np.broadcast_to([1, 2], xs.shape)) np.testing.assert_allclose(funs, -3) # solve relaxed problem res = milp(c=c, constraints=(A, b_l, b_u)) np.testing.assert_allclose(res.x, [4, 4.5]) np.testing.assert_allclose(res.fun, -8.5) def test_milp_3(): # solve MIP with inequality constraints and all integer constraints # source: https://en.wikipedia.org/wiki/Integer_programming#Example c = [0, -1] A = [[-1, 1], [3, 2], [2, 3]] b_u = [1, 12, 12] b_l = np.full_like(b_u, -np.inf, dtype=np.float64) constraints = LinearConstraint(A, b_l, b_u) integrality = np.ones_like(c) # solve original problem res = milp(c=c, constraints=constraints, integrality=integrality) assert_allclose(res.fun, -2) # two optimal solutions possible, just need one of them assert np.allclose(res.x, [1, 2]) or np.allclose(res.x, [2, 2]) # solve relaxed problem res = milp(c=c, constraints=constraints) assert_allclose(res.fun, -2.8) assert_allclose(res.x, [1.8, 2.8]) def test_milp_4(): # solve MIP with inequality constraints and only one integer constraint # source: https://www.mathworks.com/help/optim/ug/intlinprog.html c = [8, 1] integrality = [0, 1] A = [[1, 2], [-4, -1], [2, 1]] b_l = [-14, -np.inf, -np.inf] b_u = [np.inf, -33, 20] constraints = LinearConstraint(A, b_l, b_u) bounds = Bounds(-np.inf, np.inf) res = milp(c, integrality=integrality, bounds=bounds, constraints=constraints) assert_allclose(res.fun, 59) assert_allclose(res.x, [6.5, 7]) def test_milp_5(): # solve MIP with inequality and equality constraints # source: https://www.mathworks.com/help/optim/ug/intlinprog.html c = [-3, -2, -1] integrality = [0, 0, 1] lb = [0, 0, 0] ub = [np.inf, np.inf, 1] bounds = Bounds(lb, ub) A = [[1, 1, 1], [4, 2, 1]] b_l = [-np.inf, 12] b_u = [7, 12] constraints = LinearConstraint(A, b_l, b_u) res = milp(c, integrality=integrality, bounds=bounds, constraints=constraints) # there are multiple solutions assert_allclose(res.fun, -12) @pytest.mark.slow @pytest.mark.timeout(120) # prerelease_deps_coverage_64bit_blas job def test_milp_6(): # solve a larger MIP with only equality constraints # source: https://www.mathworks.com/help/optim/ug/intlinprog.html integrality = 1 A_eq = np.array([[22, 13, 26, 33, 21, 3, 14, 26], [39, 16, 22, 28, 26, 30, 23, 24], [18, 14, 29, 27, 30, 38, 26, 26], [41, 26, 28, 36, 18, 38, 16, 26]]) b_eq = np.array([7872, 10466, 11322, 12058]) c = np.array([2, 10, 13, 17, 7, 5, 7, 3]) res = milp(c=c, constraints=(A_eq, b_eq, b_eq), integrality=integrality) np.testing.assert_allclose(res.fun, 1854) def test_infeasible_prob_16609(): # Ensure presolve does not mark trivially infeasible problems # as Optimal -- see gh-16609 c = [1.0, 0.0] integrality = [0, 1] lb = [0, -np.inf] ub = [np.inf, np.inf] bounds = Bounds(lb, ub) A_eq = [[0.0, 1.0]] b_eq = [0.5] constraints = LinearConstraint(A_eq, b_eq, b_eq) res = milp(c, integrality=integrality, bounds=bounds, constraints=constraints) np.testing.assert_equal(res.status, 2) _msg_time = "Time limit reached. (HiGHS Status 13:" _msg_iter = "Iteration limit reached. (HiGHS Status 14:" @pytest.mark.skipif(np.intp(0).itemsize < 8, reason="Unhandled 32-bit GCC FP bug") @pytest.mark.slow @pytest.mark.parametrize(["options", "msg"], [({"time_limit": 0.1}, _msg_time), ({"node_limit": 1}, _msg_iter)]) def test_milp_timeout_16545(options, msg): # Ensure solution is not thrown away if MILP solver times out # -- see gh-16545 rng = np.random.default_rng(5123833489170494244) A = rng.integers(0, 5, size=(100, 100)) b_lb = np.full(100, fill_value=-np.inf) b_ub = np.full(100, fill_value=25) constraints = LinearConstraint(A, b_lb, b_ub) variable_lb = np.zeros(100) variable_ub = np.ones(100) variable_bounds = Bounds(variable_lb, variable_ub) integrality = np.ones(100) c_vector = -np.ones(100) res = milp( c_vector, integrality=integrality, bounds=variable_bounds, constraints=constraints, options=options, ) assert res.message.startswith(msg) assert res["x"] is not None # ensure solution is feasible x = res["x"] tol = 1e-8 # sometimes needed due to finite numerical precision assert np.all(b_lb - tol <= A @ x) and np.all(A @ x <= b_ub + tol) assert np.all(variable_lb - tol <= x) and np.all(x <= variable_ub + tol) assert np.allclose(x, np.round(x)) def test_three_constraints_16878(): # `milp` failed when exactly three constraints were passed # Ensure that this is no longer the case. rng = np.random.default_rng(5123833489170494244) A = rng.integers(0, 5, size=(6, 6)) bl = np.full(6, fill_value=-np.inf) bu = np.full(6, fill_value=10) constraints = [LinearConstraint(A[:2], bl[:2], bu[:2]), LinearConstraint(A[2:4], bl[2:4], bu[2:4]), LinearConstraint(A[4:], bl[4:], bu[4:])] constraints2 = [(A[:2], bl[:2], bu[:2]), (A[2:4], bl[2:4], bu[2:4]), (A[4:], bl[4:], bu[4:])] lb = np.zeros(6) ub = np.ones(6) variable_bounds = Bounds(lb, ub) c = -np.ones(6) res1 = milp(c, bounds=variable_bounds, constraints=constraints) res2 = milp(c, bounds=variable_bounds, constraints=constraints2) ref = milp(c, bounds=variable_bounds, constraints=(A, bl, bu)) assert res1.success and res2.success assert_allclose(res1.x, ref.x) assert_allclose(res2.x, ref.x) @pytest.mark.xslow def test_mip_rel_gap_passdown(): # Solve problem with decreasing mip_gap to make sure mip_rel_gap decreases # Adapted from test_linprog::TestLinprogHiGHSMIP::test_mip_rel_gap_passdown # MIP taken from test_mip_6 above A_eq = np.array([[22, 13, 26, 33, 21, 3, 14, 26], [39, 16, 22, 28, 26, 30, 23, 24], [18, 14, 29, 27, 30, 38, 26, 26], [41, 26, 28, 36, 18, 38, 16, 26]]) b_eq = np.array([7872, 10466, 11322, 12058]) c = np.array([2, 10, 13, 17, 7, 5, 7, 3]) mip_rel_gaps = [0.25, 0.01, 0.001] sol_mip_gaps = [] for mip_rel_gap in mip_rel_gaps: res = milp(c=c, bounds=(0, np.inf), constraints=(A_eq, b_eq, b_eq), integrality=True, options={"mip_rel_gap": mip_rel_gap}) # assert that the solution actually has mip_gap lower than the # required mip_rel_gap supplied assert res.mip_gap <= mip_rel_gap # check that `res.mip_gap` is as defined in the documentation assert res.mip_gap == (res.fun - res.mip_dual_bound)/res.fun sol_mip_gaps.append(res.mip_gap) # make sure that the mip_rel_gap parameter is actually doing something # check that differences between solution gaps are declining # monotonically with the mip_rel_gap parameter. assert np.all(np.diff(sol_mip_gaps) < 0)
14,553
36.704663
79
py
scipy
scipy-main/scipy/optimize/tests/test__differential_evolution.py
""" Unit tests for the differential global minimization algorithm. """ import multiprocessing import platform from scipy.optimize._differentialevolution import (DifferentialEvolutionSolver, _ConstraintWrapper) from scipy.optimize import differential_evolution from scipy.optimize._constraints import (Bounds, NonlinearConstraint, LinearConstraint) from scipy.optimize import rosen, minimize from scipy.sparse import csr_matrix from scipy import stats import numpy as np from numpy.testing import (assert_equal, assert_allclose, assert_almost_equal, assert_string_equal, assert_, suppress_warnings) from pytest import raises as assert_raises, warns import pytest class TestDifferentialEvolutionSolver: def setup_method(self): self.old_seterr = np.seterr(invalid='raise') self.limits = np.array([[0., 0.], [2., 2.]]) self.bounds = [(0., 2.), (0., 2.)] self.dummy_solver = DifferentialEvolutionSolver(self.quadratic, [(0, 100)]) # dummy_solver2 will be used to test mutation strategies self.dummy_solver2 = DifferentialEvolutionSolver(self.quadratic, [(0, 1)], popsize=7, mutation=0.5) # create a population that's only 7 members long # [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7] population = np.atleast_2d(np.arange(0.1, 0.8, 0.1)).T self.dummy_solver2.population = population def teardown_method(self): np.seterr(**self.old_seterr) def quadratic(self, x): return x[0]**2 def test__strategy_resolves(self): # test that the correct mutation function is resolved by # different requested strategy arguments solver = DifferentialEvolutionSolver(rosen, self.bounds, strategy='best1exp') assert_equal(solver.strategy, 'best1exp') assert_equal(solver.mutation_func.__name__, '_best1') solver = DifferentialEvolutionSolver(rosen, self.bounds, strategy='best1bin') assert_equal(solver.strategy, 'best1bin') assert_equal(solver.mutation_func.__name__, '_best1') solver = DifferentialEvolutionSolver(rosen, self.bounds, strategy='rand1bin') assert_equal(solver.strategy, 'rand1bin') assert_equal(solver.mutation_func.__name__, '_rand1') solver = DifferentialEvolutionSolver(rosen, self.bounds, strategy='rand1exp') assert_equal(solver.strategy, 'rand1exp') assert_equal(solver.mutation_func.__name__, '_rand1') solver = DifferentialEvolutionSolver(rosen, self.bounds, strategy='rand2exp') assert_equal(solver.strategy, 'rand2exp') assert_equal(solver.mutation_func.__name__, '_rand2') solver = DifferentialEvolutionSolver(rosen, self.bounds, strategy='best2bin') assert_equal(solver.strategy, 'best2bin') assert_equal(solver.mutation_func.__name__, '_best2') solver = DifferentialEvolutionSolver(rosen, self.bounds, strategy='rand2bin') assert_equal(solver.strategy, 'rand2bin') assert_equal(solver.mutation_func.__name__, '_rand2') solver = DifferentialEvolutionSolver(rosen, self.bounds, strategy='rand2exp') assert_equal(solver.strategy, 'rand2exp') assert_equal(solver.mutation_func.__name__, '_rand2') solver = DifferentialEvolutionSolver(rosen, self.bounds, strategy='randtobest1bin') assert_equal(solver.strategy, 'randtobest1bin') assert_equal(solver.mutation_func.__name__, '_randtobest1') solver = DifferentialEvolutionSolver(rosen, self.bounds, strategy='randtobest1exp') assert_equal(solver.strategy, 'randtobest1exp') assert_equal(solver.mutation_func.__name__, '_randtobest1') solver = DifferentialEvolutionSolver(rosen, self.bounds, strategy='currenttobest1bin') assert_equal(solver.strategy, 'currenttobest1bin') assert_equal(solver.mutation_func.__name__, '_currenttobest1') solver = DifferentialEvolutionSolver(rosen, self.bounds, strategy='currenttobest1exp') assert_equal(solver.strategy, 'currenttobest1exp') assert_equal(solver.mutation_func.__name__, '_currenttobest1') def test__mutate1(self): # strategies */1/*, i.e. rand/1/bin, best/1/exp, etc. result = np.array([0.05]) trial = self.dummy_solver2._best1((2, 3, 4, 5, 6)) assert_allclose(trial, result) result = np.array([0.25]) trial = self.dummy_solver2._rand1((2, 3, 4, 5, 6)) assert_allclose(trial, result) def test__mutate2(self): # strategies */2/*, i.e. rand/2/bin, best/2/exp, etc. # [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7] result = np.array([-0.1]) trial = self.dummy_solver2._best2((2, 3, 4, 5, 6)) assert_allclose(trial, result) result = np.array([0.1]) trial = self.dummy_solver2._rand2((2, 3, 4, 5, 6)) assert_allclose(trial, result) def test__randtobest1(self): # strategies randtobest/1/* result = np.array([0.15]) trial = self.dummy_solver2._randtobest1((2, 3, 4, 5, 6)) assert_allclose(trial, result) def test__currenttobest1(self): # strategies currenttobest/1/* result = np.array([0.1]) trial = self.dummy_solver2._currenttobest1(1, (2, 3, 4, 5, 6)) assert_allclose(trial, result) def test_can_init_with_dithering(self): mutation = (0.5, 1) solver = DifferentialEvolutionSolver(self.quadratic, self.bounds, mutation=mutation) assert_equal(solver.dither, list(mutation)) def test_invalid_mutation_values_arent_accepted(self): func = rosen mutation = (0.5, 3) assert_raises(ValueError, DifferentialEvolutionSolver, func, self.bounds, mutation=mutation) mutation = (-1, 1) assert_raises(ValueError, DifferentialEvolutionSolver, func, self.bounds, mutation=mutation) mutation = (0.1, np.nan) assert_raises(ValueError, DifferentialEvolutionSolver, func, self.bounds, mutation=mutation) mutation = 0.5 solver = DifferentialEvolutionSolver(func, self.bounds, mutation=mutation) assert_equal(0.5, solver.scale) assert_equal(None, solver.dither) def test_invalid_functional(self): def func(x): return np.array([np.sum(x ** 2), np.sum(x)]) with assert_raises( RuntimeError, match=r"func\(x, \*args\) must return a scalar value"): differential_evolution(func, [(-2, 2), (-2, 2)]) def test__scale_parameters(self): trial = np.array([0.3]) assert_equal(30, self.dummy_solver._scale_parameters(trial)) # it should also work with the limits reversed self.dummy_solver.limits = np.array([[100], [0.]]) assert_equal(30, self.dummy_solver._scale_parameters(trial)) def test__unscale_parameters(self): trial = np.array([30]) assert_equal(0.3, self.dummy_solver._unscale_parameters(trial)) # it should also work with the limits reversed self.dummy_solver.limits = np.array([[100], [0.]]) assert_equal(0.3, self.dummy_solver._unscale_parameters(trial)) def test_equal_bounds(self): with np.errstate(invalid='raise'): solver = DifferentialEvolutionSolver( self.quadratic, bounds=[(2.0, 2.0), (1.0, 3.0)] ) v = solver._unscale_parameters([2.0, 2.0]) assert_allclose(v, 0.5) res = differential_evolution(self.quadratic, [(2.0, 2.0), (3.0, 3.0)]) assert_equal(res.x, [2.0, 3.0]) def test__ensure_constraint(self): trial = np.array([1.1, -100, 0.9, 2., 300., -0.00001]) self.dummy_solver._ensure_constraint(trial) assert_equal(trial[2], 0.9) assert_(np.logical_and(trial >= 0, trial <= 1).all()) def test_differential_evolution(self): # test that the Jmin of DifferentialEvolutionSolver # is the same as the function evaluation solver = DifferentialEvolutionSolver( self.quadratic, [(-2, 2)], maxiter=1, polish=False ) result = solver.solve() assert_equal(result.fun, self.quadratic(result.x)) solver = DifferentialEvolutionSolver( self.quadratic, [(-2, 2)], maxiter=1, polish=True ) result = solver.solve() assert_equal(result.fun, self.quadratic(result.x)) def test_best_solution_retrieval(self): # test that the getter property method for the best solution works. solver = DifferentialEvolutionSolver(self.quadratic, [(-2, 2)]) result = solver.solve() assert_equal(result.x, solver.x) def test_callback_terminates(self): # test that if the callback returns true, then the minimization halts bounds = [(0, 2), (0, 2)] expected_msg = 'callback function requested stop early by returning True' def callback_python_true(param, convergence=0.): return True result = differential_evolution(rosen, bounds, callback=callback_python_true) assert_string_equal(result.message, expected_msg) def callback_evaluates_true(param, convergence=0.): # DE should stop if bool(self.callback) is True return [10] result = differential_evolution(rosen, bounds, callback=callback_evaluates_true) assert_string_equal(result.message, expected_msg) def callback_evaluates_false(param, convergence=0.): return [] result = differential_evolution(rosen, bounds, callback=callback_evaluates_false) assert result.success def test_args_tuple_is_passed(self): # test that the args tuple is passed to the cost function properly. bounds = [(-10, 10)] args = (1., 2., 3.) def quadratic(x, *args): if type(args) != tuple: raise ValueError('args should be a tuple') return args[0] + args[1] * x + args[2] * x**2. result = differential_evolution(quadratic, bounds, args=args, polish=True) assert_almost_equal(result.fun, 2 / 3.) def test_init_with_invalid_strategy(self): # test that passing an invalid strategy raises ValueError func = rosen bounds = [(-3, 3)] assert_raises(ValueError, differential_evolution, func, bounds, strategy='abc') def test_bounds_checking(self): # test that the bounds checking works func = rosen bounds = [(-3)] assert_raises(ValueError, differential_evolution, func, bounds) bounds = [(-3, 3), (3, 4, 5)] assert_raises(ValueError, differential_evolution, func, bounds) # test that we can use a new-type Bounds object result = differential_evolution(rosen, Bounds([0, 0], [2, 2])) assert_almost_equal(result.x, (1., 1.)) def test_select_samples(self): # select_samples should return 5 separate random numbers. limits = np.arange(12., dtype='float64').reshape(2, 6) bounds = list(zip(limits[0, :], limits[1, :])) solver = DifferentialEvolutionSolver(None, bounds, popsize=1) candidate = 0 r1, r2, r3, r4, r5 = solver._select_samples(candidate, 5) assert_equal( len(np.unique(np.array([candidate, r1, r2, r3, r4, r5]))), 6) def test_maxiter_stops_solve(self): # test that if the maximum number of iterations is exceeded # the solver stops. solver = DifferentialEvolutionSolver(rosen, self.bounds, maxiter=1) result = solver.solve() assert_equal(result.success, False) assert_equal(result.message, 'Maximum number of iterations has been exceeded.') def test_maxfun_stops_solve(self): # test that if the maximum number of function evaluations is exceeded # during initialisation the solver stops solver = DifferentialEvolutionSolver(rosen, self.bounds, maxfun=1, polish=False) result = solver.solve() assert_equal(result.nfev, 2) assert_equal(result.success, False) assert_equal(result.message, 'Maximum number of function evaluations has ' 'been exceeded.') # test that if the maximum number of function evaluations is exceeded # during the actual minimisation, then the solver stops. # Have to turn polishing off, as this will still occur even if maxfun # is reached. For popsize=5 and len(bounds)=2, then there are only 10 # function evaluations during initialisation. solver = DifferentialEvolutionSolver(rosen, self.bounds, popsize=5, polish=False, maxfun=40) result = solver.solve() assert_equal(result.nfev, 41) assert_equal(result.success, False) assert_equal(result.message, 'Maximum number of function evaluations has ' 'been exceeded.') # now repeat for updating='deferred version # 47 function evaluations is not a multiple of the population size, # so maxfun is reached partway through a population evaluation. solver = DifferentialEvolutionSolver(rosen, self.bounds, popsize=5, polish=False, maxfun=47, updating='deferred') result = solver.solve() assert_equal(result.nfev, 47) assert_equal(result.success, False) assert_equal(result.message, 'Maximum number of function evaluations has ' 'been reached.') def test_quadratic(self): # test the quadratic function from object solver = DifferentialEvolutionSolver(self.quadratic, [(-100, 100)], tol=0.02) solver.solve() assert_equal(np.argmin(solver.population_energies), 0) def test_quadratic_from_diff_ev(self): # test the quadratic function from differential_evolution function differential_evolution(self.quadratic, [(-100, 100)], tol=0.02) def test_seed_gives_repeatability(self): result = differential_evolution(self.quadratic, [(-100, 100)], polish=False, seed=1, tol=0.5) result2 = differential_evolution(self.quadratic, [(-100, 100)], polish=False, seed=1, tol=0.5) assert_equal(result.x, result2.x) assert_equal(result.nfev, result2.nfev) def test_random_generator(self): # check that np.random.Generator can be used (numpy >= 1.17) # obtain a np.random.Generator object rng = np.random.default_rng() inits = ['random', 'latinhypercube', 'sobol', 'halton'] for init in inits: differential_evolution(self.quadratic, [(-100, 100)], polish=False, seed=rng, tol=0.5, init=init) def test_exp_runs(self): # test whether exponential mutation loop runs solver = DifferentialEvolutionSolver(rosen, self.bounds, strategy='best1exp', maxiter=1) solver.solve() def test_gh_4511_regression(self): # This modification of the differential evolution docstring example # uses a custom popsize that had triggered an off-by-one error. # Because we do not care about solving the optimization problem in # this test, we use maxiter=1 to reduce the testing time. bounds = [(-5, 5), (-5, 5)] # result = differential_evolution(rosen, bounds, popsize=1815, # maxiter=1) # the original issue arose because of rounding error in arange, with # linspace being a much better solution. 1815 is quite a large popsize # to use and results in a long test time (~13s). I used the original # issue to figure out the lowest number of samples that would cause # this rounding error to occur, 49. differential_evolution(rosen, bounds, popsize=49, maxiter=1) def test_calculate_population_energies(self): # if popsize is 3, then the overall generation has size (6,) solver = DifferentialEvolutionSolver(rosen, self.bounds, popsize=3) solver._calculate_population_energies(solver.population) solver._promote_lowest_energy() assert_equal(np.argmin(solver.population_energies), 0) # initial calculation of the energies should require 6 nfev. assert_equal(solver._nfev, 6) def test_iteration(self): # test that DifferentialEvolutionSolver is iterable # if popsize is 3, then the overall generation has size (6,) solver = DifferentialEvolutionSolver(rosen, self.bounds, popsize=3, maxfun=12) x, fun = next(solver) assert_equal(np.size(x, 0), 2) # 6 nfev are required for initial calculation of energies, 6 nfev are # required for the evolution of the 6 population members. assert_equal(solver._nfev, 12) # the next generation should halt because it exceeds maxfun assert_raises(StopIteration, next, solver) # check a proper minimisation can be done by an iterable solver solver = DifferentialEvolutionSolver(rosen, self.bounds) _, fun_prev = next(solver) for i, soln in enumerate(solver): x_current, fun_current = soln assert fun_prev >= fun_current _, fun_prev = x_current, fun_current # need to have this otherwise the solver would never stop. if i == 50: break def test_convergence(self): solver = DifferentialEvolutionSolver(rosen, self.bounds, tol=0.2, polish=False) solver.solve() assert_(solver.convergence < 0.2) def test_maxiter_none_GH5731(self): # Pre 0.17 the previous default for maxiter and maxfun was None. # the numerical defaults are now 1000 and np.inf. However, some scripts # will still supply None for both of those, this will raise a TypeError # in the solve method. solver = DifferentialEvolutionSolver(rosen, self.bounds, maxiter=None, maxfun=None) solver.solve() def test_population_initiation(self): # test the different modes of population initiation # init must be either 'latinhypercube' or 'random' # raising ValueError is something else is passed in assert_raises(ValueError, DifferentialEvolutionSolver, *(rosen, self.bounds), **{'init': 'rubbish'}) solver = DifferentialEvolutionSolver(rosen, self.bounds) # check that population initiation: # 1) resets _nfev to 0 # 2) all population energies are np.inf solver.init_population_random() assert_equal(solver._nfev, 0) assert_(np.all(np.isinf(solver.population_energies))) solver.init_population_lhs() assert_equal(solver._nfev, 0) assert_(np.all(np.isinf(solver.population_energies))) solver.init_population_qmc(qmc_engine='halton') assert_equal(solver._nfev, 0) assert_(np.all(np.isinf(solver.population_energies))) solver = DifferentialEvolutionSolver(rosen, self.bounds, init='sobol') solver.init_population_qmc(qmc_engine='sobol') assert_equal(solver._nfev, 0) assert_(np.all(np.isinf(solver.population_energies))) # we should be able to initialize with our own array population = np.linspace(-1, 3, 10).reshape(5, 2) solver = DifferentialEvolutionSolver(rosen, self.bounds, init=population, strategy='best2bin', atol=0.01, seed=1, popsize=5) assert_equal(solver._nfev, 0) assert_(np.all(np.isinf(solver.population_energies))) assert_(solver.num_population_members == 5) assert_(solver.population_shape == (5, 2)) # check that the population was initialized correctly unscaled_population = np.clip(solver._unscale_parameters(population), 0, 1) assert_almost_equal(solver.population[:5], unscaled_population) # population values need to be clipped to bounds assert_almost_equal(np.min(solver.population[:5]), 0) assert_almost_equal(np.max(solver.population[:5]), 1) # shouldn't be able to initialize with an array if it's the wrong shape # this would have too many parameters population = np.linspace(-1, 3, 15).reshape(5, 3) assert_raises(ValueError, DifferentialEvolutionSolver, *(rosen, self.bounds), **{'init': population}) # provide an initial solution # bounds are [(0, 2), (0, 2)] x0 = np.random.uniform(low=0.0, high=2.0, size=2) solver = DifferentialEvolutionSolver( rosen, self.bounds, x0=x0 ) # parameters are scaled to unit interval assert_allclose(solver.population[0], x0 / 2.0) def test_x0(self): # smoke test that checks that x0 is usable. res = differential_evolution(rosen, self.bounds, x0=[0.2, 0.8]) assert res.success # check what happens if some of the x0 lay outside the bounds with assert_raises(ValueError): differential_evolution(rosen, self.bounds, x0=[0.2, 2.1]) def test_infinite_objective_function(self): # Test that there are no problems if the objective function # returns inf on some runs def sometimes_inf(x): if x[0] < .5: return np.inf return x[1] bounds = [(0, 1), (0, 1)] differential_evolution(sometimes_inf, bounds=bounds, disp=False) def test_deferred_updating(self): # check setting of deferred updating, with default workers bounds = [(0., 2.), (0., 2.)] solver = DifferentialEvolutionSolver(rosen, bounds, updating='deferred') assert_(solver._updating == 'deferred') assert_(solver._mapwrapper._mapfunc is map) solver.solve() def test_immediate_updating(self): # check setting of immediate updating, with default workers bounds = [(0., 2.), (0., 2.)] solver = DifferentialEvolutionSolver(rosen, bounds) assert_(solver._updating == 'immediate') # should raise a UserWarning because the updating='immediate' # is being overridden by the workers keyword with warns(UserWarning): with DifferentialEvolutionSolver(rosen, bounds, workers=2) as solver: pass assert_(solver._updating == 'deferred') def test_parallel(self): # smoke test for parallelization with deferred updating bounds = [(0., 2.), (0., 2.)] with multiprocessing.Pool(2) as p, DifferentialEvolutionSolver( rosen, bounds, updating='deferred', workers=p.map) as solver: assert_(solver._mapwrapper.pool is not None) assert_(solver._updating == 'deferred') solver.solve() with DifferentialEvolutionSolver(rosen, bounds, updating='deferred', workers=2) as solver: assert_(solver._mapwrapper.pool is not None) assert_(solver._updating == 'deferred') solver.solve() def test_converged(self): solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)]) solver.solve() assert_(solver.converged()) def test_constraint_violation_fn(self): def constr_f(x): return [x[0] + x[1]] def constr_f2(x): return np.array([x[0]**2 + x[1], x[0] - x[1]]) nlc = NonlinearConstraint(constr_f, -np.inf, 1.9) solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)], constraints=(nlc)) cv = solver._constraint_violation_fn(np.array([1.0, 1.0])) assert_almost_equal(cv, 0.1) nlc2 = NonlinearConstraint(constr_f2, -np.inf, 1.8) solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)], constraints=(nlc, nlc2)) # for multiple constraints the constraint violations should # be concatenated. xs = [(1.2, 1), (2.0, 2.0), (0.5, 0.5)] vs = [(0.3, 0.64, 0.0), (2.1, 4.2, 0.0), (0, 0, 0)] for x, v in zip(xs, vs): cv = solver._constraint_violation_fn(np.array(x)) assert_allclose(cv, np.atleast_2d(v)) # vectorized calculation of a series of solutions assert_allclose( solver._constraint_violation_fn(np.array(xs)), np.array(vs) ) # the following line is used in _calculate_population_feasibilities. # _constraint_violation_fn returns an (1, M) array when # x.shape == (N,), i.e. a single solution. Therefore this list # comprehension should generate (S, 1, M) array. constraint_violation = np.array([solver._constraint_violation_fn(x) for x in np.array(xs)]) assert constraint_violation.shape == (3, 1, 3) # we need reasonable error messages if the constraint function doesn't # return the right thing def constr_f3(x): # returns (S, M), rather than (M, S) return constr_f2(x).T nlc2 = NonlinearConstraint(constr_f3, -np.inf, 1.8) solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)], constraints=(nlc, nlc2), vectorized=False) solver.vectorized = True with pytest.raises( RuntimeError, match="An array returned from a Constraint" ): solver._constraint_violation_fn(np.array(xs)) def test_constraint_population_feasibilities(self): def constr_f(x): return [x[0] + x[1]] def constr_f2(x): return [x[0]**2 + x[1], x[0] - x[1]] nlc = NonlinearConstraint(constr_f, -np.inf, 1.9) solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)], constraints=(nlc)) # are population feasibilities correct # [0.5, 0.5] corresponds to scaled values of [1., 1.] feas, cv = solver._calculate_population_feasibilities( np.array([[0.5, 0.5], [1., 1.]])) assert_equal(feas, [False, False]) assert_almost_equal(cv, np.array([[0.1], [2.1]])) assert cv.shape == (2, 1) nlc2 = NonlinearConstraint(constr_f2, -np.inf, 1.8) for vectorize in [False, True]: solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)], constraints=(nlc, nlc2), vectorized=vectorize, updating='deferred') feas, cv = solver._calculate_population_feasibilities( np.array([[0.5, 0.5], [0.6, 0.5]])) assert_equal(feas, [False, False]) assert_almost_equal(cv, np.array([[0.1, 0.2, 0], [0.3, 0.64, 0]])) feas, cv = solver._calculate_population_feasibilities( np.array([[0.5, 0.5], [1., 1.]])) assert_equal(feas, [False, False]) assert_almost_equal(cv, np.array([[0.1, 0.2, 0], [2.1, 4.2, 0]])) assert cv.shape == (2, 3) feas, cv = solver._calculate_population_feasibilities( np.array([[0.25, 0.25], [1., 1.]])) assert_equal(feas, [True, False]) assert_almost_equal(cv, np.array([[0.0, 0.0, 0.], [2.1, 4.2, 0]])) assert cv.shape == (2, 3) def test_constraint_solve(self): def constr_f(x): return np.array([x[0] + x[1]]) nlc = NonlinearConstraint(constr_f, -np.inf, 1.9) solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)], constraints=(nlc)) # trust-constr warns if the constraint function is linear with warns(UserWarning): res = solver.solve() assert constr_f(res.x) <= 1.9 assert res.success def test_impossible_constraint(self): def constr_f(x): return np.array([x[0] + x[1]]) nlc = NonlinearConstraint(constr_f, -np.inf, -1) solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)], constraints=(nlc), popsize=3, seed=1) # a UserWarning is issued because the 'trust-constr' polishing is # attempted on the least infeasible solution found. with warns(UserWarning): res = solver.solve() assert res.maxcv > 0 assert not res.success # test _promote_lowest_energy works when none of the population is # feasible. In this case, the solution with the lowest constraint # violation should be promoted. solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)], constraints=(nlc), polish=False) next(solver) assert not solver.feasible.all() assert not np.isfinite(solver.population_energies).all() # now swap two of the entries in the population l = 20 cv = solver.constraint_violation[0] solver.population_energies[[0, l]] = solver.population_energies[[l, 0]] solver.population[[0, l], :] = solver.population[[l, 0], :] solver.constraint_violation[[0, l], :] = ( solver.constraint_violation[[l, 0], :]) solver._promote_lowest_energy() assert_equal(solver.constraint_violation[0], cv) def test_accept_trial(self): # _accept_trial(self, energy_trial, feasible_trial, cv_trial, # energy_orig, feasible_orig, cv_orig) def constr_f(x): return [x[0] + x[1]] nlc = NonlinearConstraint(constr_f, -np.inf, 1.9) solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)], constraints=(nlc)) fn = solver._accept_trial # both solutions are feasible, select lower energy assert fn(0.1, True, np.array([0.]), 1.0, True, np.array([0.])) assert (fn(1.0, True, np.array([0.0]), 0.1, True, np.array([0.0])) is False) assert fn(0.1, True, np.array([0.]), 0.1, True, np.array([0.])) # trial is feasible, original is not assert fn(9.9, True, np.array([0.]), 1.0, False, np.array([1.])) # trial and original are infeasible # cv_trial have to be <= cv_original to be better assert (fn(0.1, False, np.array([0.5, 0.5]), 1.0, False, np.array([1., 1.0]))) assert (fn(0.1, False, np.array([0.5, 0.5]), 1.0, False, np.array([1., 0.50]))) assert (fn(1.0, False, np.array([0.5, 0.5]), 1.0, False, np.array([1.0, 0.4])) is False) def test_constraint_wrapper(self): lb = np.array([0, 20, 30]) ub = np.array([0.5, np.inf, 70]) x0 = np.array([1, 2, 3]) pc = _ConstraintWrapper(Bounds(lb, ub), x0) assert (pc.violation(x0) > 0).any() assert (pc.violation([0.25, 21, 31]) == 0).all() # check vectorized Bounds constraint xs = np.arange(1, 16).reshape(5, 3) violations = [] for x in xs: violations.append(pc.violation(x)) np.testing.assert_allclose(pc.violation(xs.T), np.array(violations).T) x0 = np.array([1, 2, 3, 4]) A = np.array([[1, 2, 3, 4], [5, 0, 0, 6], [7, 0, 8, 0]]) pc = _ConstraintWrapper(LinearConstraint(A, -np.inf, 0), x0) assert (pc.violation(x0) > 0).any() assert (pc.violation([-10, 2, -10, 4]) == 0).all() # check vectorized LinearConstraint, for 7 lots of parameter vectors # with each parameter vector being 4 long, with 3 constraints # xs is the same shape as stored in the differential evolution # population, but it's sent to the violation function as (len(x), M) xs = np.arange(1, 29).reshape(7, 4) violations = [] for x in xs: violations.append(pc.violation(x)) np.testing.assert_allclose(pc.violation(xs.T), np.array(violations).T) pc = _ConstraintWrapper(LinearConstraint(csr_matrix(A), -np.inf, 0), x0) assert (pc.violation(x0) > 0).any() assert (pc.violation([-10, 2, -10, 4]) == 0).all() def fun(x): return A.dot(x) nonlinear = NonlinearConstraint(fun, -np.inf, 0) pc = _ConstraintWrapper(nonlinear, [-10, 2, -10, 4]) assert (pc.violation(x0) > 0).any() assert (pc.violation([-10, 2, -10, 4]) == 0).all() def test_constraint_wrapper_violation(self): def cons_f(x): # written in vectorised form to accept an array of (N, S) # returning (M, S) # where N is the number of parameters, # S is the number of solution vectors to be examined, # and M is the number of constraint components return np.array([x[0] ** 2 + x[1], x[0] ** 2 - x[1]]) nlc = NonlinearConstraint(cons_f, [-1, -0.8500], [2, 2]) pc = _ConstraintWrapper(nlc, [0.5, 1]) assert np.size(pc.bounds[0]) == 2 xs = [(0.5, 1), (0.5, 1.2), (1.2, 1.2), (0.1, -1.2), (0.1, 2.0)] vs = [(0, 0), (0, 0.1), (0.64, 0), (0.19, 0), (0.01, 1.14)] for x, v in zip(xs, vs): assert_allclose(pc.violation(x), v) # now check that we can vectorize the constraint wrapper assert_allclose(pc.violation(np.array(xs).T), np.array(vs).T) assert pc.fun(np.array(xs).T).shape == (2, len(xs)) assert pc.violation(np.array(xs).T).shape == (2, len(xs)) assert pc.num_constr == 2 assert pc.parameter_count == 2 def test_L1(self): # Lampinen ([5]) test problem 1 def f(x): x = np.hstack(([0], x)) # 1-indexed to match reference fun = np.sum(5*x[1:5]) - 5*x[1:5]@x[1:5] - np.sum(x[5:]) return fun A = np.zeros((10, 14)) # 1-indexed to match reference A[1, [1, 2, 10, 11]] = 2, 2, 1, 1 A[2, [1, 10]] = -8, 1 A[3, [4, 5, 10]] = -2, -1, 1 A[4, [1, 3, 10, 11]] = 2, 2, 1, 1 A[5, [2, 11]] = -8, 1 A[6, [6, 7, 11]] = -2, -1, 1 A[7, [2, 3, 11, 12]] = 2, 2, 1, 1 A[8, [3, 12]] = -8, 1 A[9, [8, 9, 12]] = -2, -1, 1 A = A[1:, 1:] b = np.array([10, 0, 0, 10, 0, 0, 10, 0, 0]) L = LinearConstraint(A, -np.inf, b) bounds = [(0, 1)]*9 + [(0, 100)]*3 + [(0, 1)] # using a lower popsize to speed the test up res = differential_evolution(f, bounds, strategy='best1bin', seed=1234, constraints=(L), popsize=2) x_opt = (1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 3, 3, 1) f_opt = -15 assert_allclose(f(x_opt), f_opt) assert res.success assert_allclose(res.x, x_opt, atol=5e-4) assert_allclose(res.fun, f_opt, atol=5e-3) assert_(np.all(A@res.x <= b)) assert_(np.all(res.x >= np.array(bounds)[:, 0])) assert_(np.all(res.x <= np.array(bounds)[:, 1])) # now repeat the same solve, using the same overall constraints, # but using a sparse matrix for the LinearConstraint instead of an # array L = LinearConstraint(csr_matrix(A), -np.inf, b) # using a lower popsize to speed the test up res = differential_evolution(f, bounds, strategy='best1bin', seed=1234, constraints=(L), popsize=2) assert_allclose(f(x_opt), f_opt) assert res.success assert_allclose(res.x, x_opt, atol=5e-4) assert_allclose(res.fun, f_opt, atol=5e-3) assert_(np.all(A@res.x <= b)) assert_(np.all(res.x >= np.array(bounds)[:, 0])) assert_(np.all(res.x <= np.array(bounds)[:, 1])) # now repeat the same solve, using the same overall constraints, # but specify half the constraints in terms of LinearConstraint, # and the other half by NonlinearConstraint def c1(x): x = np.hstack(([0], x)) return [2*x[2] + 2*x[3] + x[11] + x[12], -8*x[3] + x[12]] def c2(x): x = np.hstack(([0], x)) return -2*x[8] - x[9] + x[12] L = LinearConstraint(A[:5, :], -np.inf, b[:5]) L2 = LinearConstraint(A[5:6, :], -np.inf, b[5:6]) N = NonlinearConstraint(c1, -np.inf, b[6:8]) N2 = NonlinearConstraint(c2, -np.inf, b[8:9]) constraints = (L, N, L2, N2) with suppress_warnings() as sup: sup.filter(UserWarning) res = differential_evolution(f, bounds, strategy='rand1bin', seed=1234, constraints=constraints, popsize=2) assert_allclose(res.x, x_opt, atol=5e-4) assert_allclose(res.fun, f_opt, atol=5e-3) assert_(np.all(A@res.x <= b)) assert_(np.all(res.x >= np.array(bounds)[:, 0])) assert_(np.all(res.x <= np.array(bounds)[:, 1])) def test_L2(self): # Lampinen ([5]) test problem 2 def f(x): x = np.hstack(([0], x)) # 1-indexed to match reference fun = ((x[1]-10)**2 + 5*(x[2]-12)**2 + x[3]**4 + 3*(x[4]-11)**2 + 10*x[5]**6 + 7*x[6]**2 + x[7]**4 - 4*x[6]*x[7] - 10*x[6] - 8*x[7]) return fun def c1(x): x = np.hstack(([0], x)) # 1-indexed to match reference return [127 - 2*x[1]**2 - 3*x[2]**4 - x[3] - 4*x[4]**2 - 5*x[5], 196 - 23*x[1] - x[2]**2 - 6*x[6]**2 + 8*x[7], 282 - 7*x[1] - 3*x[2] - 10*x[3]**2 - x[4] + x[5], -4*x[1]**2 - x[2]**2 + 3*x[1]*x[2] - 2*x[3]**2 - 5*x[6] + 11*x[7]] N = NonlinearConstraint(c1, 0, np.inf) bounds = [(-10, 10)]*7 constraints = (N) with suppress_warnings() as sup: sup.filter(UserWarning) res = differential_evolution(f, bounds, strategy='rand1bin', seed=1234, constraints=constraints) f_opt = 680.6300599487869 x_opt = (2.330499, 1.951372, -0.4775414, 4.365726, -0.6244870, 1.038131, 1.594227) assert_allclose(f(x_opt), f_opt) assert_allclose(res.fun, f_opt) assert_allclose(res.x, x_opt, atol=1e-5) assert res.success assert_(np.all(np.array(c1(res.x)) >= 0)) assert_(np.all(res.x >= np.array(bounds)[:, 0])) assert_(np.all(res.x <= np.array(bounds)[:, 1])) def test_L3(self): # Lampinen ([5]) test problem 3 def f(x): x = np.hstack(([0], x)) # 1-indexed to match reference fun = (x[1]**2 + x[2]**2 + x[1]*x[2] - 14*x[1] - 16*x[2] + (x[3]-10)**2 + 4*(x[4]-5)**2 + (x[5]-3)**2 + 2*(x[6]-1)**2 + 5*x[7]**2 + 7*(x[8]-11)**2 + 2*(x[9]-10)**2 + (x[10] - 7)**2 + 45 ) return fun # maximize A = np.zeros((4, 11)) A[1, [1, 2, 7, 8]] = -4, -5, 3, -9 A[2, [1, 2, 7, 8]] = -10, 8, 17, -2 A[3, [1, 2, 9, 10]] = 8, -2, -5, 2 A = A[1:, 1:] b = np.array([-105, 0, -12]) def c1(x): x = np.hstack(([0], x)) # 1-indexed to match reference return [3*x[1] - 6*x[2] - 12*(x[9]-8)**2 + 7*x[10], -3*(x[1]-2)**2 - 4*(x[2]-3)**2 - 2*x[3]**2 + 7*x[4] + 120, -x[1]**2 - 2*(x[2]-2)**2 + 2*x[1]*x[2] - 14*x[5] + 6*x[6], -5*x[1]**2 - 8*x[2] - (x[3]-6)**2 + 2*x[4] + 40, -0.5*(x[1]-8)**2 - 2*(x[2]-4)**2 - 3*x[5]**2 + x[6] + 30] L = LinearConstraint(A, b, np.inf) N = NonlinearConstraint(c1, 0, np.inf) bounds = [(-10, 10)]*10 constraints = (L, N) with suppress_warnings() as sup: sup.filter(UserWarning) res = differential_evolution(f, bounds, seed=1234, constraints=constraints, popsize=3) x_opt = (2.171996, 2.363683, 8.773926, 5.095984, 0.9906548, 1.430574, 1.321644, 9.828726, 8.280092, 8.375927) f_opt = 24.3062091 assert_allclose(f(x_opt), f_opt, atol=1e-5) assert_allclose(res.x, x_opt, atol=1e-6) assert_allclose(res.fun, f_opt, atol=1e-5) assert res.success assert_(np.all(A @ res.x >= b)) assert_(np.all(np.array(c1(res.x)) >= 0)) assert_(np.all(res.x >= np.array(bounds)[:, 0])) assert_(np.all(res.x <= np.array(bounds)[:, 1])) def test_L4(self): # Lampinen ([5]) test problem 4 def f(x): return np.sum(x[:3]) A = np.zeros((4, 9)) A[1, [4, 6]] = 0.0025, 0.0025 A[2, [5, 7, 4]] = 0.0025, 0.0025, -0.0025 A[3, [8, 5]] = 0.01, -0.01 A = A[1:, 1:] b = np.array([1, 1, 1]) def c1(x): x = np.hstack(([0], x)) # 1-indexed to match reference return [x[1]*x[6] - 833.33252*x[4] - 100*x[1] + 83333.333, x[2]*x[7] - 1250*x[5] - x[2]*x[4] + 1250*x[4], x[3]*x[8] - 1250000 - x[3]*x[5] + 2500*x[5]] L = LinearConstraint(A, -np.inf, 1) N = NonlinearConstraint(c1, 0, np.inf) bounds = [(100, 10000)] + [(1000, 10000)]*2 + [(10, 1000)]*5 constraints = (L, N) with suppress_warnings() as sup: sup.filter(UserWarning) res = differential_evolution(f, bounds, strategy='rand1bin', seed=1234, constraints=constraints, popsize=3) f_opt = 7049.248 x_opt = [579.306692, 1359.97063, 5109.9707, 182.0177, 295.601172, 217.9823, 286.416528, 395.601172] assert_allclose(f(x_opt), f_opt, atol=0.001) assert_allclose(res.fun, f_opt, atol=0.001) # use higher tol here for 32-bit Windows, see gh-11693 if (platform.system() == 'Windows' and np.dtype(np.intp).itemsize < 8): assert_allclose(res.x, x_opt, rtol=2.4e-6, atol=0.0035) else: # tolerance determined from macOS + MKL failure, see gh-12701 assert_allclose(res.x, x_opt, rtol=5e-6, atol=0.0024) assert res.success assert_(np.all(A @ res.x <= b)) assert_(np.all(np.array(c1(res.x)) >= 0)) assert_(np.all(res.x >= np.array(bounds)[:, 0])) assert_(np.all(res.x <= np.array(bounds)[:, 1])) def test_L5(self): # Lampinen ([5]) test problem 5 def f(x): x = np.hstack(([0], x)) # 1-indexed to match reference fun = (np.sin(2*np.pi*x[1])**3*np.sin(2*np.pi*x[2]) / (x[1]**3*(x[1]+x[2]))) return -fun # maximize def c1(x): x = np.hstack(([0], x)) # 1-indexed to match reference return [x[1]**2 - x[2] + 1, 1 - x[1] + (x[2]-4)**2] N = NonlinearConstraint(c1, -np.inf, 0) bounds = [(0, 10)]*2 constraints = (N) res = differential_evolution(f, bounds, strategy='rand1bin', seed=1234, constraints=constraints) x_opt = (1.22797135, 4.24537337) f_opt = -0.095825 assert_allclose(f(x_opt), f_opt, atol=2e-5) assert_allclose(res.fun, f_opt, atol=1e-4) assert res.success assert_(np.all(np.array(c1(res.x)) <= 0)) assert_(np.all(res.x >= np.array(bounds)[:, 0])) assert_(np.all(res.x <= np.array(bounds)[:, 1])) def test_L6(self): # Lampinen ([5]) test problem 6 def f(x): x = np.hstack(([0], x)) # 1-indexed to match reference fun = (x[1]-10)**3 + (x[2] - 20)**3 return fun def c1(x): x = np.hstack(([0], x)) # 1-indexed to match reference return [(x[1]-5)**2 + (x[2] - 5)**2 - 100, -(x[1]-6)**2 - (x[2] - 5)**2 + 82.81] N = NonlinearConstraint(c1, 0, np.inf) bounds = [(13, 100), (0, 100)] constraints = (N) res = differential_evolution(f, bounds, strategy='rand1bin', seed=1234, constraints=constraints, tol=1e-7) x_opt = (14.095, 0.84296) f_opt = -6961.814744 assert_allclose(f(x_opt), f_opt, atol=1e-6) assert_allclose(res.fun, f_opt, atol=0.001) assert_allclose(res.x, x_opt, atol=1e-4) assert res.success assert_(np.all(np.array(c1(res.x)) >= 0)) assert_(np.all(res.x >= np.array(bounds)[:, 0])) assert_(np.all(res.x <= np.array(bounds)[:, 1])) def test_L7(self): # Lampinen ([5]) test problem 7 def f(x): x = np.hstack(([0], x)) # 1-indexed to match reference fun = (5.3578547*x[3]**2 + 0.8356891*x[1]*x[5] + 37.293239*x[1] - 40792.141) return fun def c1(x): x = np.hstack(([0], x)) # 1-indexed to match reference return [ 85.334407 + 0.0056858*x[2]*x[5] + 0.0006262*x[1]*x[4] - 0.0022053*x[3]*x[5], 80.51249 + 0.0071317*x[2]*x[5] + 0.0029955*x[1]*x[2] + 0.0021813*x[3]**2, 9.300961 + 0.0047026*x[3]*x[5] + 0.0012547*x[1]*x[3] + 0.0019085*x[3]*x[4] ] N = NonlinearConstraint(c1, [0, 90, 20], [92, 110, 25]) bounds = [(78, 102), (33, 45)] + [(27, 45)]*3 constraints = (N) res = differential_evolution(f, bounds, strategy='rand1bin', seed=1234, constraints=constraints) # using our best solution, rather than Lampinen/Koziel. Koziel solution # doesn't satisfy constraints, Lampinen f_opt just plain wrong. x_opt = [78.00000686, 33.00000362, 29.99526064, 44.99999971, 36.77579979] f_opt = -30665.537578 assert_allclose(f(x_opt), f_opt) assert_allclose(res.x, x_opt, atol=1e-3) assert_allclose(res.fun, f_opt, atol=1e-3) assert res.success assert_(np.all(np.array(c1(res.x)) >= np.array([0, 90, 20]))) assert_(np.all(np.array(c1(res.x)) <= np.array([92, 110, 25]))) assert_(np.all(res.x >= np.array(bounds)[:, 0])) assert_(np.all(res.x <= np.array(bounds)[:, 1])) @pytest.mark.slow @pytest.mark.xfail(platform.machine() == 'ppc64le', reason="fails on ppc64le") def test_L8(self): def f(x): x = np.hstack(([0], x)) # 1-indexed to match reference fun = 3*x[1] + 0.000001*x[1]**3 + 2*x[2] + 0.000002/3*x[2]**3 return fun A = np.zeros((3, 5)) A[1, [4, 3]] = 1, -1 A[2, [3, 4]] = 1, -1 A = A[1:, 1:] b = np.array([-.55, -.55]) def c1(x): x = np.hstack(([0], x)) # 1-indexed to match reference return [ 1000*np.sin(-x[3]-0.25) + 1000*np.sin(-x[4]-0.25) + 894.8 - x[1], 1000*np.sin(x[3]-0.25) + 1000*np.sin(x[3]-x[4]-0.25) + 894.8 - x[2], 1000*np.sin(x[4]-0.25) + 1000*np.sin(x[4]-x[3]-0.25) + 1294.8 ] L = LinearConstraint(A, b, np.inf) N = NonlinearConstraint(c1, np.full(3, -0.001), np.full(3, 0.001)) bounds = [(0, 1200)]*2+[(-.55, .55)]*2 constraints = (L, N) with suppress_warnings() as sup: sup.filter(UserWarning) # original Lampinen test was with rand1bin, but that takes a # huge amount of CPU time. Changing strategy to best1bin speeds # things up a lot res = differential_evolution(f, bounds, strategy='best1bin', seed=1234, constraints=constraints, maxiter=5000) x_opt = (679.9453, 1026.067, 0.1188764, -0.3962336) f_opt = 5126.4981 assert_allclose(f(x_opt), f_opt, atol=1e-3) assert_allclose(res.x[:2], x_opt[:2], atol=2e-3) assert_allclose(res.x[2:], x_opt[2:], atol=2e-3) assert_allclose(res.fun, f_opt, atol=2e-2) assert res.success assert_(np.all(A@res.x >= b)) assert_(np.all(np.array(c1(res.x)) >= -0.001)) assert_(np.all(np.array(c1(res.x)) <= 0.001)) assert_(np.all(res.x >= np.array(bounds)[:, 0])) assert_(np.all(res.x <= np.array(bounds)[:, 1])) def test_L9(self): # Lampinen ([5]) test problem 9 def f(x): x = np.hstack(([0], x)) # 1-indexed to match reference return x[1]**2 + (x[2]-1)**2 def c1(x): x = np.hstack(([0], x)) # 1-indexed to match reference return [x[2] - x[1]**2] N = NonlinearConstraint(c1, [-.001], [0.001]) bounds = [(-1, 1)]*2 constraints = (N) res = differential_evolution(f, bounds, strategy='rand1bin', seed=1234, constraints=constraints) x_opt = [np.sqrt(2)/2, 0.5] f_opt = 0.75 assert_allclose(f(x_opt), f_opt) assert_allclose(np.abs(res.x), x_opt, atol=1e-3) assert_allclose(res.fun, f_opt, atol=1e-3) assert res.success assert_(np.all(np.array(c1(res.x)) >= -0.001)) assert_(np.all(np.array(c1(res.x)) <= 0.001)) assert_(np.all(res.x >= np.array(bounds)[:, 0])) assert_(np.all(res.x <= np.array(bounds)[:, 1])) def test_integrality(self): # test fitting discrete distribution to data rng = np.random.default_rng(6519843218105) dist = stats.nbinom shapes = (5, 0.5) x = dist.rvs(*shapes, size=10000, random_state=rng) def func(p, *args): dist, x = args # negative log-likelihood function ll = -np.log(dist.pmf(x, *p)).sum(axis=-1) if np.isnan(ll): # occurs when x is outside of support ll = np.inf # we don't want that return ll integrality = [True, False] bounds = [(1, 18), (0, 0.95)] res = differential_evolution(func, bounds, args=(dist, x), integrality=integrality, polish=False, seed=rng) # tolerance has to be fairly relaxed for the second parameter # because we're fitting a distribution to random variates. assert res.x[0] == 5 assert_allclose(res.x, shapes, rtol=0.025) # check that we can still use integrality constraints with polishing res2 = differential_evolution(func, bounds, args=(dist, x), integrality=integrality, polish=True, seed=rng) def func2(p, *args): n, dist, x = args return func(np.array([n, p[0]]), dist, x) # compare the DE derived solution to an LBFGSB solution (that doesn't # have to find the integral values). Note we're setting x0 to be the # output from the first DE result, thereby making the polishing step # and this minimisation pretty much equivalent. LBFGSB = minimize(func2, res2.x[1], args=(5, dist, x), bounds=[(0, 0.95)]) assert_allclose(res2.x[1], LBFGSB.x) assert res2.fun <= res.fun def test_integrality_limits(self): def f(x): return x integrality = [True, False, True] bounds = [(0.2, 1.1), (0.9, 2.2), (3.3, 4.9)] # no integrality constraints solver = DifferentialEvolutionSolver(f, bounds=bounds, polish=False, integrality=False) assert_allclose(solver.limits[0], [0.2, 0.9, 3.3]) assert_allclose(solver.limits[1], [1.1, 2.2, 4.9]) # with integrality constraints solver = DifferentialEvolutionSolver(f, bounds=bounds, polish=False, integrality=integrality) assert_allclose(solver.limits[0], [0.5, 0.9, 3.5]) assert_allclose(solver.limits[1], [1.5, 2.2, 4.5]) assert_equal(solver.integrality, [True, False, True]) assert solver.polish is False bounds = [(-1.2, -0.9), (0.9, 2.2), (-10.3, 4.1)] solver = DifferentialEvolutionSolver(f, bounds=bounds, polish=False, integrality=integrality) assert_allclose(solver.limits[0], [-1.5, 0.9, -10.5]) assert_allclose(solver.limits[1], [-0.5, 2.2, 4.5]) # A lower bound of -1.2 is converted to # np.nextafter(np.ceil(-1.2) - 0.5, np.inf) # with a similar process to the upper bound. Check that the # conversions work assert_allclose(np.round(solver.limits[0]), [-1.0, 1.0, -10.0]) assert_allclose(np.round(solver.limits[1]), [-1.0, 2.0, 4.0]) bounds = [(-10.2, -8.1), (0.9, 2.2), (-10.9, -9.9999)] solver = DifferentialEvolutionSolver(f, bounds=bounds, polish=False, integrality=integrality) assert_allclose(solver.limits[0], [-10.5, 0.9, -10.5]) assert_allclose(solver.limits[1], [-8.5, 2.2, -9.5]) bounds = [(-10.2, -10.1), (0.9, 2.2), (-10.9, -9.9999)] with pytest.raises(ValueError, match='One of the integrality'): DifferentialEvolutionSolver(f, bounds=bounds, polish=False, integrality=integrality) def test_vectorized(self): def quadratic(x): return np.sum(x**2) def quadratic_vec(x): return np.sum(x**2, axis=0) # A vectorized function needs to accept (len(x), S) and return (S,) with pytest.raises(RuntimeError, match='The vectorized function'): differential_evolution(quadratic, self.bounds, vectorized=True, updating='deferred') # vectorized overrides the updating keyword, check for warning with warns(UserWarning, match="differential_evolution: the 'vector"): differential_evolution(quadratic_vec, self.bounds, vectorized=True) # vectorized defers to the workers keyword, check for warning with warns(UserWarning, match="differential_evolution: the 'workers"): differential_evolution(quadratic_vec, self.bounds, vectorized=True, workers=map, updating='deferred') ncalls = [0] def rosen_vec(x): ncalls[0] += 1 return rosen(x) bounds = [(0, 10), (0, 10)] res1 = differential_evolution(rosen, bounds, updating='deferred', seed=1) res2 = differential_evolution(rosen_vec, bounds, vectorized=True, updating='deferred', seed=1) # the two minimisation runs should be functionally equivalent assert_allclose(res1.x, res2.x) assert ncalls[0] == res2.nfev assert res1.nit == res2.nit def test_vectorized_constraints(self): def constr_f(x): return np.array([x[0] + x[1]]) def constr_f2(x): return np.array([x[0]**2 + x[1], x[0] - x[1]]) nlc1 = NonlinearConstraint(constr_f, -np.inf, 1.9) nlc2 = NonlinearConstraint(constr_f2, (0.9, 0.5), (2.0, 2.0)) def rosen_vec(x): # accept an (len(x0), S) array, returning a (S,) array v = 100 * (x[1:] - x[:-1]**2.0)**2.0 v += (1 - x[:-1])**2.0 return np.squeeze(v) bounds = [(0, 10), (0, 10)] res1 = differential_evolution(rosen, bounds, updating='deferred', seed=1, constraints=[nlc1, nlc2], polish=False) res2 = differential_evolution(rosen_vec, bounds, vectorized=True, updating='deferred', seed=1, constraints=[nlc1, nlc2], polish=False) # the two minimisation runs should be functionally equivalent assert_allclose(res1.x, res2.x) def test_constraint_violation_error_message(self): def func(x): return np.cos(x[0]) + np.sin(x[1]) # Intentionally infeasible constraints. c0 = NonlinearConstraint(lambda x: x[1] - (x[0]-1)**2, 0, np.inf) c1 = NonlinearConstraint(lambda x: x[1] + x[0]**2, -np.inf, 0) result = differential_evolution(func, bounds=[(-1, 2), (-1, 1)], constraints=[c0, c1], maxiter=10, polish=False, seed=864197532) assert result.success is False # The numerical value in the error message might be sensitive to # changes in the implementation. It can be updated if the code is # changed. The essential part of the test is that there is a number # after the '=', so if necessary, the text could be reduced to, say, # "MAXCV = 0.". assert "MAXCV = 0.414" in result.message
61,777
40.323077
96
py
scipy
scipy-main/scipy/optimize/tests/test__linprog_clean_inputs.py
""" Unit test for Linear Programming via Simplex Algorithm. """ import numpy as np from numpy.testing import assert_, assert_allclose, assert_equal from pytest import raises as assert_raises from scipy.optimize._linprog_util import _clean_inputs, _LPProblem from copy import deepcopy from datetime import date def test_aliasing(): """ Test for ensuring that no objects referred to by `lp` attributes, `c`, `A_ub`, `b_ub`, `A_eq`, `b_eq`, `bounds`, have been modified by `_clean_inputs` as a side effect. """ lp = _LPProblem( c=1, A_ub=[[1]], b_ub=[1], A_eq=[[1]], b_eq=[1], bounds=(-np.inf, np.inf) ) lp_copy = deepcopy(lp) _clean_inputs(lp) assert_(lp.c == lp_copy.c, "c modified by _clean_inputs") assert_(lp.A_ub == lp_copy.A_ub, "A_ub modified by _clean_inputs") assert_(lp.b_ub == lp_copy.b_ub, "b_ub modified by _clean_inputs") assert_(lp.A_eq == lp_copy.A_eq, "A_eq modified by _clean_inputs") assert_(lp.b_eq == lp_copy.b_eq, "b_eq modified by _clean_inputs") assert_(lp.bounds == lp_copy.bounds, "bounds modified by _clean_inputs") def test_aliasing2(): """ Similar purpose as `test_aliasing` above. """ lp = _LPProblem( c=np.array([1, 1]), A_ub=np.array([[1, 1], [2, 2]]), b_ub=np.array([[1], [1]]), A_eq=np.array([[1, 1]]), b_eq=np.array([1]), bounds=[(-np.inf, np.inf), (None, 1)] ) lp_copy = deepcopy(lp) _clean_inputs(lp) assert_allclose(lp.c, lp_copy.c, err_msg="c modified by _clean_inputs") assert_allclose(lp.A_ub, lp_copy.A_ub, err_msg="A_ub modified by _clean_inputs") assert_allclose(lp.b_ub, lp_copy.b_ub, err_msg="b_ub modified by _clean_inputs") assert_allclose(lp.A_eq, lp_copy.A_eq, err_msg="A_eq modified by _clean_inputs") assert_allclose(lp.b_eq, lp_copy.b_eq, err_msg="b_eq modified by _clean_inputs") assert_(lp.bounds == lp_copy.bounds, "bounds modified by _clean_inputs") def test_missing_inputs(): c = [1, 2] A_ub = np.array([[1, 1], [2, 2]]) b_ub = np.array([1, 1]) A_eq = np.array([[1, 1], [2, 2]]) b_eq = np.array([1, 1]) assert_raises(TypeError, _clean_inputs) assert_raises(TypeError, _clean_inputs, _LPProblem(c=None)) assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_ub=A_ub)) assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_ub=A_ub, b_ub=None)) assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, b_ub=b_ub)) assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_ub=None, b_ub=b_ub)) assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_eq=A_eq)) assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_eq=A_eq, b_eq=None)) assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, b_eq=b_eq)) assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_eq=None, b_eq=b_eq)) def test_too_many_dimensions(): cb = [1, 2, 3, 4] A = np.random.rand(4, 4) bad2D = [[1, 2], [3, 4]] bad3D = np.random.rand(4, 4, 4) assert_raises(ValueError, _clean_inputs, _LPProblem(c=bad2D, A_ub=A, b_ub=cb)) assert_raises(ValueError, _clean_inputs, _LPProblem(c=cb, A_ub=bad3D, b_ub=cb)) assert_raises(ValueError, _clean_inputs, _LPProblem(c=cb, A_ub=A, b_ub=bad2D)) assert_raises(ValueError, _clean_inputs, _LPProblem(c=cb, A_eq=bad3D, b_eq=cb)) assert_raises(ValueError, _clean_inputs, _LPProblem(c=cb, A_eq=A, b_eq=bad2D)) def test_too_few_dimensions(): bad = np.random.rand(4, 4).ravel() cb = np.random.rand(4) assert_raises(ValueError, _clean_inputs, _LPProblem(c=cb, A_ub=bad, b_ub=cb)) assert_raises(ValueError, _clean_inputs, _LPProblem(c=cb, A_eq=bad, b_eq=cb)) def test_inconsistent_dimensions(): m = 2 n = 4 c = [1, 2, 3, 4] Agood = np.random.rand(m, n) Abad = np.random.rand(m, n + 1) bgood = np.random.rand(m) bbad = np.random.rand(m + 1) boundsbad = [(0, 1)] * (n + 1) assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_ub=Abad, b_ub=bgood)) assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_ub=Agood, b_ub=bbad)) assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_eq=Abad, b_eq=bgood)) assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_eq=Agood, b_eq=bbad)) assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, bounds=boundsbad)) with np.testing.suppress_warnings() as sup: sup.filter(np.VisibleDeprecationWarning, "Creating an ndarray from ragged") assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, bounds=[[1, 2], [2, 3], [3, 4], [4, 5, 6]])) def test_type_errors(): lp = _LPProblem( c=[1, 2], A_ub=np.array([[1, 1], [2, 2]]), b_ub=np.array([1, 1]), A_eq=np.array([[1, 1], [2, 2]]), b_eq=np.array([1, 1]), bounds=[(0, 1)] ) bad = "hello" assert_raises(TypeError, _clean_inputs, lp._replace(c=bad)) assert_raises(TypeError, _clean_inputs, lp._replace(A_ub=bad)) assert_raises(TypeError, _clean_inputs, lp._replace(b_ub=bad)) assert_raises(TypeError, _clean_inputs, lp._replace(A_eq=bad)) assert_raises(TypeError, _clean_inputs, lp._replace(b_eq=bad)) assert_raises(ValueError, _clean_inputs, lp._replace(bounds=bad)) assert_raises(ValueError, _clean_inputs, lp._replace(bounds="hi")) assert_raises(ValueError, _clean_inputs, lp._replace(bounds=["hi"])) assert_raises(ValueError, _clean_inputs, lp._replace(bounds=[("hi")])) assert_raises(ValueError, _clean_inputs, lp._replace(bounds=[(1, "")])) assert_raises(ValueError, _clean_inputs, lp._replace(bounds=[(1, 2), (1, "")])) assert_raises(TypeError, _clean_inputs, lp._replace(bounds=[(1, date(2020, 2, 29))])) assert_raises(ValueError, _clean_inputs, lp._replace(bounds=[[[1, 2]]])) def test_non_finite_errors(): lp = _LPProblem( c=[1, 2], A_ub=np.array([[1, 1], [2, 2]]), b_ub=np.array([1, 1]), A_eq=np.array([[1, 1], [2, 2]]), b_eq=np.array([1, 1]), bounds=[(0, 1)] ) assert_raises(ValueError, _clean_inputs, lp._replace(c=[0, None])) assert_raises(ValueError, _clean_inputs, lp._replace(c=[np.inf, 0])) assert_raises(ValueError, _clean_inputs, lp._replace(c=[0, -np.inf])) assert_raises(ValueError, _clean_inputs, lp._replace(c=[np.nan, 0])) assert_raises(ValueError, _clean_inputs, lp._replace(A_ub=[[1, 2], [None, 1]])) assert_raises(ValueError, _clean_inputs, lp._replace(b_ub=[np.inf, 1])) assert_raises(ValueError, _clean_inputs, lp._replace(A_eq=[[1, 2], [1, -np.inf]])) assert_raises(ValueError, _clean_inputs, lp._replace(b_eq=[1, np.nan])) def test__clean_inputs1(): lp = _LPProblem( c=[1, 2], A_ub=[[1, 1], [2, 2]], b_ub=[1, 1], A_eq=[[1, 1], [2, 2]], b_eq=[1, 1], bounds=None ) lp_cleaned = _clean_inputs(lp) assert_allclose(lp_cleaned.c, np.array(lp.c)) assert_allclose(lp_cleaned.A_ub, np.array(lp.A_ub)) assert_allclose(lp_cleaned.b_ub, np.array(lp.b_ub)) assert_allclose(lp_cleaned.A_eq, np.array(lp.A_eq)) assert_allclose(lp_cleaned.b_eq, np.array(lp.b_eq)) assert_equal(lp_cleaned.bounds, [(0, np.inf)] * 2) assert_(lp_cleaned.c.shape == (2,), "") assert_(lp_cleaned.A_ub.shape == (2, 2), "") assert_(lp_cleaned.b_ub.shape == (2,), "") assert_(lp_cleaned.A_eq.shape == (2, 2), "") assert_(lp_cleaned.b_eq.shape == (2,), "") def test__clean_inputs2(): lp = _LPProblem( c=1, A_ub=[[1]], b_ub=1, A_eq=[[1]], b_eq=1, bounds=(0, 1) ) lp_cleaned = _clean_inputs(lp) assert_allclose(lp_cleaned.c, np.array(lp.c)) assert_allclose(lp_cleaned.A_ub, np.array(lp.A_ub)) assert_allclose(lp_cleaned.b_ub, np.array(lp.b_ub)) assert_allclose(lp_cleaned.A_eq, np.array(lp.A_eq)) assert_allclose(lp_cleaned.b_eq, np.array(lp.b_eq)) assert_equal(lp_cleaned.bounds, [(0, 1)]) assert_(lp_cleaned.c.shape == (1,), "") assert_(lp_cleaned.A_ub.shape == (1, 1), "") assert_(lp_cleaned.b_ub.shape == (1,), "") assert_(lp_cleaned.A_eq.shape == (1, 1), "") assert_(lp_cleaned.b_eq.shape == (1,), "") def test__clean_inputs3(): lp = _LPProblem( c=[[1, 2]], A_ub=np.random.rand(2, 2), b_ub=[[1], [2]], A_eq=np.random.rand(2, 2), b_eq=[[1], [2]], bounds=[(0, 1)] ) lp_cleaned = _clean_inputs(lp) assert_allclose(lp_cleaned.c, np.array([1, 2])) assert_allclose(lp_cleaned.b_ub, np.array([1, 2])) assert_allclose(lp_cleaned.b_eq, np.array([1, 2])) assert_equal(lp_cleaned.bounds, [(0, 1)] * 2) assert_(lp_cleaned.c.shape == (2,), "") assert_(lp_cleaned.b_ub.shape == (2,), "") assert_(lp_cleaned.b_eq.shape == (2,), "") def test_bad_bounds(): lp = _LPProblem(c=[1, 2]) assert_raises(ValueError, _clean_inputs, lp._replace(bounds=(1, 2, 2))) assert_raises(ValueError, _clean_inputs, lp._replace(bounds=[(1, 2, 2)])) with np.testing.suppress_warnings() as sup: sup.filter(np.VisibleDeprecationWarning, "Creating an ndarray from ragged") assert_raises(ValueError, _clean_inputs, lp._replace(bounds=[(1, 2), (1, 2, 2)])) assert_raises(ValueError, _clean_inputs, lp._replace(bounds=[(1, 2), (1, 2), (1, 2)])) lp = _LPProblem(c=[1, 2, 3, 4]) assert_raises(ValueError, _clean_inputs, lp._replace(bounds=[(1, 2, 3, 4), (1, 2, 3, 4)])) def test_good_bounds(): lp = _LPProblem(c=[1, 2]) lp_cleaned = _clean_inputs(lp) # lp.bounds is None by default assert_equal(lp_cleaned.bounds, [(0, np.inf)] * 2) lp_cleaned = _clean_inputs(lp._replace(bounds=[])) assert_equal(lp_cleaned.bounds, [(0, np.inf)] * 2) lp_cleaned = _clean_inputs(lp._replace(bounds=[[]])) assert_equal(lp_cleaned.bounds, [(0, np.inf)] * 2) lp_cleaned = _clean_inputs(lp._replace(bounds=(1, 2))) assert_equal(lp_cleaned.bounds, [(1, 2)] * 2) lp_cleaned = _clean_inputs(lp._replace(bounds=[(1, 2)])) assert_equal(lp_cleaned.bounds, [(1, 2)] * 2) lp_cleaned = _clean_inputs(lp._replace(bounds=[(1, None)])) assert_equal(lp_cleaned.bounds, [(1, np.inf)] * 2) lp_cleaned = _clean_inputs(lp._replace(bounds=[(None, 1)])) assert_equal(lp_cleaned.bounds, [(-np.inf, 1)] * 2) lp_cleaned = _clean_inputs(lp._replace(bounds=[(None, None), (-np.inf, None)])) assert_equal(lp_cleaned.bounds, [(-np.inf, np.inf)] * 2) lp = _LPProblem(c=[1, 2, 3, 4]) lp_cleaned = _clean_inputs(lp) # lp.bounds is None by default assert_equal(lp_cleaned.bounds, [(0, np.inf)] * 4) lp_cleaned = _clean_inputs(lp._replace(bounds=(1, 2))) assert_equal(lp_cleaned.bounds, [(1, 2)] * 4) lp_cleaned = _clean_inputs(lp._replace(bounds=[(1, 2)])) assert_equal(lp_cleaned.bounds, [(1, 2)] * 4) lp_cleaned = _clean_inputs(lp._replace(bounds=[(1, None)])) assert_equal(lp_cleaned.bounds, [(1, np.inf)] * 4) lp_cleaned = _clean_inputs(lp._replace(bounds=[(None, 1)])) assert_equal(lp_cleaned.bounds, [(-np.inf, 1)] * 4) lp_cleaned = _clean_inputs(lp._replace(bounds=[(None, None), (-np.inf, None), (None, np.inf), (-np.inf, np.inf)])) assert_equal(lp_cleaned.bounds, [(-np.inf, np.inf)] * 4)
11,422
36.575658
118
py
scipy
scipy-main/scipy/optimize/tests/test_quadratic_assignment.py
import pytest import numpy as np from scipy.optimize import quadratic_assignment, OptimizeWarning from scipy.optimize._qap import _calc_score as _score from numpy.testing import assert_equal, assert_, assert_warns ################ # Common Tests # ################ def chr12c(): A = [ [0, 90, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0], [90, 0, 0, 23, 0, 0, 0, 0, 0, 0, 0, 0], [10, 0, 0, 0, 43, 0, 0, 0, 0, 0, 0, 0], [0, 23, 0, 0, 0, 88, 0, 0, 0, 0, 0, 0], [0, 0, 43, 0, 0, 0, 26, 0, 0, 0, 0, 0], [0, 0, 0, 88, 0, 0, 0, 16, 0, 0, 0, 0], [0, 0, 0, 0, 26, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 16, 0, 0, 0, 96, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 29, 0], [0, 0, 0, 0, 0, 0, 0, 96, 0, 0, 0, 37], [0, 0, 0, 0, 0, 0, 0, 0, 29, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 37, 0, 0], ] B = [ [0, 36, 54, 26, 59, 72, 9, 34, 79, 17, 46, 95], [36, 0, 73, 35, 90, 58, 30, 78, 35, 44, 79, 36], [54, 73, 0, 21, 10, 97, 58, 66, 69, 61, 54, 63], [26, 35, 21, 0, 93, 12, 46, 40, 37, 48, 68, 85], [59, 90, 10, 93, 0, 64, 5, 29, 76, 16, 5, 76], [72, 58, 97, 12, 64, 0, 96, 55, 38, 54, 0, 34], [9, 30, 58, 46, 5, 96, 0, 83, 35, 11, 56, 37], [34, 78, 66, 40, 29, 55, 83, 0, 44, 12, 15, 80], [79, 35, 69, 37, 76, 38, 35, 44, 0, 64, 39, 33], [17, 44, 61, 48, 16, 54, 11, 12, 64, 0, 70, 86], [46, 79, 54, 68, 5, 0, 56, 15, 39, 70, 0, 18], [95, 36, 63, 85, 76, 34, 37, 80, 33, 86, 18, 0], ] A, B = np.array(A), np.array(B) n = A.shape[0] opt_perm = np.array([7, 5, 1, 3, 10, 4, 8, 6, 9, 11, 2, 12]) - [1] * n return A, B, opt_perm class QAPCommonTests: """ Base class for `quadratic_assignment` tests. """ def setup_method(self): np.random.seed(0) # Test global optima of problem from Umeyama IVB # https://pcl.sitehost.iu.edu/rgoldsto/papers/weighted%20graph%20match2.pdf # Graph matching maximum is in the paper # QAP minimum determined by brute force def test_accuracy_1(self): # besides testing accuracy, check that A and B can be lists A = [[0, 3, 4, 2], [0, 0, 1, 2], [1, 0, 0, 1], [0, 0, 1, 0]] B = [[0, 4, 2, 4], [0, 0, 1, 0], [0, 2, 0, 2], [0, 1, 2, 0]] res = quadratic_assignment(A, B, method=self.method, options={"rng": 0, "maximize": False}) assert_equal(res.fun, 10) assert_equal(res.col_ind, np.array([1, 2, 3, 0])) res = quadratic_assignment(A, B, method=self.method, options={"rng": 0, "maximize": True}) if self.method == 'faq': # Global optimum is 40, but FAQ gets 37 assert_equal(res.fun, 37) assert_equal(res.col_ind, np.array([0, 2, 3, 1])) else: assert_equal(res.fun, 40) assert_equal(res.col_ind, np.array([0, 3, 1, 2])) res = quadratic_assignment(A, B, method=self.method, options={"rng": 0, "maximize": True}) # Test global optima of problem from Umeyama IIIB # https://pcl.sitehost.iu.edu/rgoldsto/papers/weighted%20graph%20match2.pdf # Graph matching maximum is in the paper # QAP minimum determined by brute force def test_accuracy_2(self): A = np.array([[0, 5, 8, 6], [5, 0, 5, 1], [8, 5, 0, 2], [6, 1, 2, 0]]) B = np.array([[0, 1, 8, 4], [1, 0, 5, 2], [8, 5, 0, 5], [4, 2, 5, 0]]) res = quadratic_assignment(A, B, method=self.method, options={"rng": 0, "maximize": False}) if self.method == 'faq': # Global optimum is 176, but FAQ gets 178 assert_equal(res.fun, 178) assert_equal(res.col_ind, np.array([1, 0, 3, 2])) else: assert_equal(res.fun, 176) assert_equal(res.col_ind, np.array([1, 2, 3, 0])) res = quadratic_assignment(A, B, method=self.method, options={"rng": 0, "maximize": True}) assert_equal(res.fun, 286) assert_equal(res.col_ind, np.array([2, 3, 0, 1])) def test_accuracy_3(self): A, B, opt_perm = chr12c() # basic minimization res = quadratic_assignment(A, B, method=self.method, options={"rng": 0}) assert_(11156 <= res.fun < 21000) assert_equal(res.fun, _score(A, B, res.col_ind)) # basic maximization res = quadratic_assignment(A, B, method=self.method, options={"rng": 0, 'maximize': True}) assert_(74000 <= res.fun < 85000) assert_equal(res.fun, _score(A, B, res.col_ind)) # check ofv with strictly partial match seed_cost = np.array([4, 8, 10]) seed = np.asarray([seed_cost, opt_perm[seed_cost]]).T res = quadratic_assignment(A, B, method=self.method, options={'partial_match': seed}) assert_(11156 <= res.fun < 21000) assert_equal(res.col_ind[seed_cost], opt_perm[seed_cost]) # check performance when partial match is the global optimum seed = np.asarray([np.arange(len(A)), opt_perm]).T res = quadratic_assignment(A, B, method=self.method, options={'partial_match': seed}) assert_equal(res.col_ind, seed[:, 1].T) assert_equal(res.fun, 11156) assert_equal(res.nit, 0) # check performance with zero sized matrix inputs empty = np.empty((0, 0)) res = quadratic_assignment(empty, empty, method=self.method, options={"rng": 0}) assert_equal(res.nit, 0) assert_equal(res.fun, 0) def test_unknown_options(self): A, B, opt_perm = chr12c() def f(): quadratic_assignment(A, B, method=self.method, options={"ekki-ekki": True}) assert_warns(OptimizeWarning, f) class TestFAQ(QAPCommonTests): method = "faq" def test_options(self): # cost and distance matrices of QAPLIB instance chr12c A, B, opt_perm = chr12c() n = len(A) # check that max_iter is obeying with low input value res = quadratic_assignment(A, B, options={'maxiter': 5}) assert_equal(res.nit, 5) # test with shuffle res = quadratic_assignment(A, B, options={'shuffle_input': True}) assert_(11156 <= res.fun < 21000) # test with randomized init res = quadratic_assignment(A, B, options={'rng': 1, 'P0': "randomized"}) assert_(11156 <= res.fun < 21000) # check with specified P0 K = np.ones((n, n)) / float(n) K = _doubly_stochastic(K) res = quadratic_assignment(A, B, options={'P0': K}) assert_(11156 <= res.fun < 21000) def test_specific_input_validation(self): A = np.identity(2) B = A # method is implicitly faq # ValueError Checks: making sure single value parameters are of # correct value with pytest.raises(ValueError, match="Invalid 'P0' parameter"): quadratic_assignment(A, B, options={'P0': "random"}) with pytest.raises( ValueError, match="'maxiter' must be a positive integer"): quadratic_assignment(A, B, options={'maxiter': -1}) with pytest.raises(ValueError, match="'tol' must be a positive float"): quadratic_assignment(A, B, options={'tol': -1}) # TypeError Checks: making sure single value parameters are of # correct type with pytest.raises(TypeError): quadratic_assignment(A, B, options={'maxiter': 1.5}) # test P0 matrix input with pytest.raises( ValueError, match="`P0` matrix must have shape m' x m', where m'=n-m"): quadratic_assignment( np.identity(4), np.identity(4), options={'P0': np.ones((3, 3))} ) K = [[0.4, 0.2, 0.3], [0.3, 0.6, 0.2], [0.2, 0.2, 0.7]] # matrix that isn't quite doubly stochastic with pytest.raises( ValueError, match="`P0` matrix must be doubly stochastic"): quadratic_assignment( np.identity(3), np.identity(3), options={'P0': K} ) class Test2opt(QAPCommonTests): method = "2opt" def test_deterministic(self): # np.random.seed(0) executes before every method n = 20 A = np.random.rand(n, n) B = np.random.rand(n, n) res1 = quadratic_assignment(A, B, method=self.method) np.random.seed(0) A = np.random.rand(n, n) B = np.random.rand(n, n) res2 = quadratic_assignment(A, B, method=self.method) assert_equal(res1.nit, res2.nit) def test_partial_guess(self): n = 5 A = np.random.rand(n, n) B = np.random.rand(n, n) res1 = quadratic_assignment(A, B, method=self.method, options={'rng': 0}) guess = np.array([np.arange(5), res1.col_ind]).T res2 = quadratic_assignment(A, B, method=self.method, options={'rng': 0, 'partial_guess': guess}) fix = [2, 4] match = np.array([np.arange(5)[fix], res1.col_ind[fix]]).T res3 = quadratic_assignment(A, B, method=self.method, options={'rng': 0, 'partial_guess': guess, 'partial_match': match}) assert_(res1.nit != n*(n+1)/2) assert_equal(res2.nit, n*(n+1)/2) # tests each swap exactly once assert_equal(res3.nit, (n-2)*(n-1)/2) # tests free swaps exactly once def test_specific_input_validation(self): # can't have more seed nodes than cost/dist nodes _rm = _range_matrix with pytest.raises( ValueError, match="`partial_guess` can have only as many entries as"): quadratic_assignment(np.identity(3), np.identity(3), method=self.method, options={'partial_guess': _rm(5, 2)}) # test for only two seed columns with pytest.raises( ValueError, match="`partial_guess` must have two columns"): quadratic_assignment( np.identity(3), np.identity(3), method=self.method, options={'partial_guess': _range_matrix(2, 3)} ) # test that seed has no more than two dimensions with pytest.raises( ValueError, match="`partial_guess` must have exactly two"): quadratic_assignment( np.identity(3), np.identity(3), method=self.method, options={'partial_guess': np.random.rand(3, 2, 2)} ) # seeds cannot be negative valued with pytest.raises( ValueError, match="`partial_guess` must contain only pos"): quadratic_assignment( np.identity(3), np.identity(3), method=self.method, options={'partial_guess': -1 * _range_matrix(2, 2)} ) # seeds can't have values greater than number of nodes with pytest.raises( ValueError, match="`partial_guess` entries must be less than number"): quadratic_assignment( np.identity(5), np.identity(5), method=self.method, options={'partial_guess': 2 * _range_matrix(4, 2)} ) # columns of seed matrix must be unique with pytest.raises( ValueError, match="`partial_guess` column entries must be unique"): quadratic_assignment( np.identity(3), np.identity(3), method=self.method, options={'partial_guess': np.ones((2, 2))} ) class TestQAPOnce(): def setup_method(self): np.random.seed(0) # these don't need to be repeated for each method def test_common_input_validation(self): # test that non square matrices return error with pytest.raises(ValueError, match="`A` must be square"): quadratic_assignment( np.random.random((3, 4)), np.random.random((3, 3)), ) with pytest.raises(ValueError, match="`B` must be square"): quadratic_assignment( np.random.random((3, 3)), np.random.random((3, 4)), ) # test that cost and dist matrices have no more than two dimensions with pytest.raises( ValueError, match="`A` and `B` must have exactly two"): quadratic_assignment( np.random.random((3, 3, 3)), np.random.random((3, 3, 3)), ) # test that cost and dist matrices of different sizes return error with pytest.raises( ValueError, match="`A` and `B` matrices must be of equal size"): quadratic_assignment( np.random.random((3, 3)), np.random.random((4, 4)), ) # can't have more seed nodes than cost/dist nodes _rm = _range_matrix with pytest.raises( ValueError, match="`partial_match` can have only as many seeds as"): quadratic_assignment(np.identity(3), np.identity(3), options={'partial_match': _rm(5, 2)}) # test for only two seed columns with pytest.raises( ValueError, match="`partial_match` must have two columns"): quadratic_assignment( np.identity(3), np.identity(3), options={'partial_match': _range_matrix(2, 3)} ) # test that seed has no more than two dimensions with pytest.raises( ValueError, match="`partial_match` must have exactly two"): quadratic_assignment( np.identity(3), np.identity(3), options={'partial_match': np.random.rand(3, 2, 2)} ) # seeds cannot be negative valued with pytest.raises( ValueError, match="`partial_match` must contain only pos"): quadratic_assignment( np.identity(3), np.identity(3), options={'partial_match': -1 * _range_matrix(2, 2)} ) # seeds can't have values greater than number of nodes with pytest.raises( ValueError, match="`partial_match` entries must be less than number"): quadratic_assignment( np.identity(5), np.identity(5), options={'partial_match': 2 * _range_matrix(4, 2)} ) # columns of seed matrix must be unique with pytest.raises( ValueError, match="`partial_match` column entries must be unique"): quadratic_assignment( np.identity(3), np.identity(3), options={'partial_match': np.ones((2, 2))} ) def _range_matrix(a, b): mat = np.zeros((a, b)) for i in range(b): mat[:, i] = np.arange(a) return mat def _doubly_stochastic(P, tol=1e-3): # cleaner implementation of btaba/sinkhorn_knopp max_iter = 1000 c = 1 / P.sum(axis=0) r = 1 / (P @ c) P_eps = P for it in range(max_iter): if ((np.abs(P_eps.sum(axis=1) - 1) < tol).all() and (np.abs(P_eps.sum(axis=0) - 1) < tol).all()): # All column/row sums ~= 1 within threshold break c = 1 / (r @ P) r = 1 / (P @ c) P_eps = r[:, None] * P * c return P_eps
16,309
36.75463
79
py
scipy
scipy-main/scipy/optimize/cython_optimize/setup.py
def configuration(parent_package='', top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('cython_optimize', parent_package, top_path) config.add_data_files('*.pxd') config.add_extension('_zeros', sources='_zeros.c') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict())
411
30.692308
71
py
scipy
scipy-main/scipy/optimize/cython_optimize/__init__.py
""" Cython optimize root finding API ================================ The underlying C functions for the following root finders can be accessed directly using Cython: - `~scipy.optimize.bisect` - `~scipy.optimize.ridder` - `~scipy.optimize.brenth` - `~scipy.optimize.brentq` The Cython API for the root finding functions is similar except there is no ``disp`` argument. Import the root finding functions using ``cimport`` from `scipy.optimize.cython_optimize`. :: from scipy.optimize.cython_optimize cimport bisect, ridder, brentq, brenth Callback signature ------------------ The zeros functions in `~scipy.optimize.cython_optimize` expect a callback that takes a double for the scalar independent variable as the 1st argument and a user defined ``struct`` with any extra parameters as the 2nd argument. :: double (*callback_type)(double, void*) Examples -------- Usage of `~scipy.optimize.cython_optimize` requires Cython to write callbacks that are compiled into C. For more information on compiling Cython, see the `Cython Documentation <http://docs.cython.org/en/latest/index.html>`_. These are the basic steps: 1. Create a Cython ``.pyx`` file, for example: ``myexample.pyx``. 2. Import the desired root finder from `~scipy.optimize.cython_optimize`. 3. Write the callback function, and call the selected root finding function passing the callback, any extra arguments, and the other solver parameters. :: from scipy.optimize.cython_optimize cimport brentq # import math from Cython from libc cimport math myargs = {'C0': 1.0, 'C1': 0.7} # a dictionary of extra arguments XLO, XHI = 0.5, 1.0 # lower and upper search boundaries XTOL, RTOL, MITR = 1e-3, 1e-3, 10 # other solver parameters # user-defined struct for extra parameters ctypedef struct test_params: double C0 double C1 # user-defined callback cdef double f(double x, void *args): cdef test_params *myargs = <test_params *> args return myargs.C0 - math.exp(-(x - myargs.C1)) # Cython wrapper function cdef double brentq_wrapper_example(dict args, double xa, double xb, double xtol, double rtol, int mitr): # Cython automatically casts dictionary to struct cdef test_params myargs = args return brentq( f, xa, xb, <test_params *> &myargs, xtol, rtol, mitr, NULL) # Python function def brentq_example(args=myargs, xa=XLO, xb=XHI, xtol=XTOL, rtol=RTOL, mitr=MITR): '''Calls Cython wrapper from Python.''' return brentq_wrapper_example(args, xa, xb, xtol, rtol, mitr) 4. If you want to call your function from Python, create a Cython wrapper, and a Python function that calls the wrapper, or use ``cpdef``. Then, in Python, you can import and run the example. :: from myexample import brentq_example x = brentq_example() # 0.6999942848231314 5. Create a Cython ``.pxd`` file if you need to export any Cython functions. Full output ----------- The functions in `~scipy.optimize.cython_optimize` can also copy the full output from the solver to a C ``struct`` that is passed as its last argument. If you don't want the full output, just pass ``NULL``. The full output ``struct`` must be type ``zeros_full_output``, which is defined in `scipy.optimize.cython_optimize` with the following fields: - ``int funcalls``: number of function calls - ``int iterations``: number of iterations - ``int error_num``: error number - ``double root``: root of function The root is copied by `~scipy.optimize.cython_optimize` to the full output ``struct``. An error number of -1 means a sign error, -2 means a convergence error, and 0 means the solver converged. Continuing from the previous example:: from scipy.optimize.cython_optimize cimport zeros_full_output # cython brentq solver with full output cdef zeros_full_output brentq_full_output_wrapper_example( dict args, double xa, double xb, double xtol, double rtol, int mitr): cdef test_params myargs = args cdef zeros_full_output my_full_output # use my_full_output instead of NULL brentq(f, xa, xb, &myargs, xtol, rtol, mitr, &my_full_output) return my_full_output # Python function def brent_full_output_example(args=myargs, xa=XLO, xb=XHI, xtol=XTOL, rtol=RTOL, mitr=MITR): '''Returns full output''' return brentq_full_output_wrapper_example(args, xa, xb, xtol, rtol, mitr) result = brent_full_output_example() # {'error_num': 0, # 'funcalls': 6, # 'iterations': 5, # 'root': 0.6999942848231314} """
4,869
35.343284
79
py
scipy
scipy-main/scipy/optimize/_trlib/setup.py
def configuration(parent_package='', top_path=None): from numpy import get_include from numpy.distutils.system_info import get_info from scipy._build_utils import uses_blas64 from numpy.distutils.misc_util import Configuration from os.path import join, dirname if uses_blas64(): lapack_opt = get_info('lapack_ilp64_opt') else: lapack_opt = get_info('lapack_opt') lib_inc = join(dirname(dirname(dirname(__file__))), '_lib') bld_inc = join(dirname(dirname(dirname(__file__))), '_build_utils', 'src') config = Configuration('_trlib', parent_package, top_path) config.add_extension('_trlib', sources=['_trlib.c', 'trlib_krylov.c', 'trlib_eigen_inverse.c', 'trlib_leftmost.c', 'trlib_quadratic_zero.c', 'trlib_tri_factor.c'], include_dirs=[get_include(), lib_inc, bld_inc, 'trlib'], extra_info=lapack_opt, ) return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict())
1,174
36.903226
82
py
scipy
scipy-main/scipy/optimize/_trlib/__init__.py
from ._trlib import TRLIBQuadraticSubproblem __all__ = ['TRLIBQuadraticSubproblem', 'get_trlib_quadratic_subproblem'] def get_trlib_quadratic_subproblem(tol_rel_i=-2.0, tol_rel_b=-3.0, disp=False): def subproblem_factory(x, fun, jac, hess, hessp): return TRLIBQuadraticSubproblem(x, fun, jac, hess, hessp, tol_rel_i=tol_rel_i, tol_rel_b=tol_rel_b, disp=disp) return subproblem_factory
524
39.384615
79
py
scipy
scipy-main/scipy/optimize/_lsq/dogbox.py
""" Dogleg algorithm with rectangular trust regions for least-squares minimization. The description of the algorithm can be found in [Voglis]_. The algorithm does trust-region iterations, but the shape of trust regions is rectangular as opposed to conventional elliptical. The intersection of a trust region and an initial feasible region is again some rectangle. Thus, on each iteration a bound-constrained quadratic optimization problem is solved. A quadratic problem is solved by well-known dogleg approach, where the function is minimized along piecewise-linear "dogleg" path [NumOpt]_, Chapter 4. If Jacobian is not rank-deficient then the function is decreasing along this path, and optimization amounts to simply following along this path as long as a point stays within the bounds. A constrained Cauchy step (along the anti-gradient) is considered for safety in rank deficient cases, in this situations the convergence might be slow. If during iterations some variable hit the initial bound and the component of anti-gradient points outside the feasible region, then a next dogleg step won't make any progress. At this state such variables satisfy first-order optimality conditions and they are excluded before computing a next dogleg step. Gauss-Newton step can be computed exactly by `numpy.linalg.lstsq` (for dense Jacobian matrices) or by iterative procedure `scipy.sparse.linalg.lsmr` (for dense and sparse matrices, or Jacobian being LinearOperator). The second option allows to solve very large problems (up to couple of millions of residuals on a regular PC), provided the Jacobian matrix is sufficiently sparse. But note that dogbox is not very good for solving problems with large number of constraints, because of variables exclusion-inclusion on each iteration (a required number of function evaluations might be high or accuracy of a solution will be poor), thus its large-scale usage is probably limited to unconstrained problems. References ---------- .. [Voglis] C. Voglis and I. E. Lagaris, "A Rectangular Trust Region Dogleg Approach for Unconstrained and Bound Constrained Nonlinear Optimization", WSEAS International Conference on Applied Mathematics, Corfu, Greece, 2004. .. [NumOpt] J. Nocedal and S. J. Wright, "Numerical optimization, 2nd edition". """ import numpy as np from numpy.linalg import lstsq, norm from scipy.sparse.linalg import LinearOperator, aslinearoperator, lsmr from scipy.optimize import OptimizeResult from .common import ( step_size_to_bound, in_bounds, update_tr_radius, evaluate_quadratic, build_quadratic_1d, minimize_quadratic_1d, compute_grad, compute_jac_scale, check_termination, scale_for_robust_loss_function, print_header_nonlinear, print_iteration_nonlinear) def lsmr_operator(Jop, d, active_set): """Compute LinearOperator to use in LSMR by dogbox algorithm. `active_set` mask is used to excluded active variables from computations of matrix-vector products. """ m, n = Jop.shape def matvec(x): x_free = x.ravel().copy() x_free[active_set] = 0 return Jop.matvec(x * d) def rmatvec(x): r = d * Jop.rmatvec(x) r[active_set] = 0 return r return LinearOperator((m, n), matvec=matvec, rmatvec=rmatvec, dtype=float) def find_intersection(x, tr_bounds, lb, ub): """Find intersection of trust-region bounds and initial bounds. Returns ------- lb_total, ub_total : ndarray with shape of x Lower and upper bounds of the intersection region. orig_l, orig_u : ndarray of bool with shape of x True means that an original bound is taken as a corresponding bound in the intersection region. tr_l, tr_u : ndarray of bool with shape of x True means that a trust-region bound is taken as a corresponding bound in the intersection region. """ lb_centered = lb - x ub_centered = ub - x lb_total = np.maximum(lb_centered, -tr_bounds) ub_total = np.minimum(ub_centered, tr_bounds) orig_l = np.equal(lb_total, lb_centered) orig_u = np.equal(ub_total, ub_centered) tr_l = np.equal(lb_total, -tr_bounds) tr_u = np.equal(ub_total, tr_bounds) return lb_total, ub_total, orig_l, orig_u, tr_l, tr_u def dogleg_step(x, newton_step, g, a, b, tr_bounds, lb, ub): """Find dogleg step in a rectangular region. Returns ------- step : ndarray, shape (n,) Computed dogleg step. bound_hits : ndarray of int, shape (n,) Each component shows whether a corresponding variable hits the initial bound after the step is taken: * 0 - a variable doesn't hit the bound. * -1 - lower bound is hit. * 1 - upper bound is hit. tr_hit : bool Whether the step hit the boundary of the trust-region. """ lb_total, ub_total, orig_l, orig_u, tr_l, tr_u = find_intersection( x, tr_bounds, lb, ub ) bound_hits = np.zeros_like(x, dtype=int) if in_bounds(newton_step, lb_total, ub_total): return newton_step, bound_hits, False to_bounds, _ = step_size_to_bound(np.zeros_like(x), -g, lb_total, ub_total) # The classical dogleg algorithm would check if Cauchy step fits into # the bounds, and just return it constrained version if not. But in a # rectangular trust region it makes sense to try to improve constrained # Cauchy step too. Thus, we don't distinguish these two cases. cauchy_step = -minimize_quadratic_1d(a, b, 0, to_bounds)[0] * g step_diff = newton_step - cauchy_step step_size, hits = step_size_to_bound(cauchy_step, step_diff, lb_total, ub_total) bound_hits[(hits < 0) & orig_l] = -1 bound_hits[(hits > 0) & orig_u] = 1 tr_hit = np.any((hits < 0) & tr_l | (hits > 0) & tr_u) return cauchy_step + step_size * step_diff, bound_hits, tr_hit def dogbox(fun, jac, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev, x_scale, loss_function, tr_solver, tr_options, verbose): f = f0 f_true = f.copy() nfev = 1 J = J0 njev = 1 if loss_function is not None: rho = loss_function(f) cost = 0.5 * np.sum(rho[0]) J, f = scale_for_robust_loss_function(J, f, rho) else: cost = 0.5 * np.dot(f, f) g = compute_grad(J, f) jac_scale = isinstance(x_scale, str) and x_scale == 'jac' if jac_scale: scale, scale_inv = compute_jac_scale(J) else: scale, scale_inv = x_scale, 1 / x_scale Delta = norm(x0 * scale_inv, ord=np.inf) if Delta == 0: Delta = 1.0 on_bound = np.zeros_like(x0, dtype=int) on_bound[np.equal(x0, lb)] = -1 on_bound[np.equal(x0, ub)] = 1 x = x0 step = np.empty_like(x0) if max_nfev is None: max_nfev = x0.size * 100 termination_status = None iteration = 0 step_norm = None actual_reduction = None if verbose == 2: print_header_nonlinear() while True: active_set = on_bound * g < 0 free_set = ~active_set g_free = g[free_set] g_full = g.copy() g[active_set] = 0 g_norm = norm(g, ord=np.inf) if g_norm < gtol: termination_status = 1 if verbose == 2: print_iteration_nonlinear(iteration, nfev, cost, actual_reduction, step_norm, g_norm) if termination_status is not None or nfev == max_nfev: break x_free = x[free_set] lb_free = lb[free_set] ub_free = ub[free_set] scale_free = scale[free_set] # Compute (Gauss-)Newton and build quadratic model for Cauchy step. if tr_solver == 'exact': J_free = J[:, free_set] newton_step = lstsq(J_free, -f, rcond=-1)[0] # Coefficients for the quadratic model along the anti-gradient. a, b = build_quadratic_1d(J_free, g_free, -g_free) elif tr_solver == 'lsmr': Jop = aslinearoperator(J) # We compute lsmr step in scaled variables and then # transform back to normal variables, if lsmr would give exact lsq # solution, this would be equivalent to not doing any # transformations, but from experience it's better this way. # We pass active_set to make computations as if we selected # the free subset of J columns, but without actually doing any # slicing, which is expensive for sparse matrices and impossible # for LinearOperator. lsmr_op = lsmr_operator(Jop, scale, active_set) newton_step = -lsmr(lsmr_op, f, **tr_options)[0][free_set] newton_step *= scale_free # Components of g for active variables were zeroed, so this call # is correct and equivalent to using J_free and g_free. a, b = build_quadratic_1d(Jop, g, -g) actual_reduction = -1.0 while actual_reduction <= 0 and nfev < max_nfev: tr_bounds = Delta * scale_free step_free, on_bound_free, tr_hit = dogleg_step( x_free, newton_step, g_free, a, b, tr_bounds, lb_free, ub_free) step.fill(0.0) step[free_set] = step_free if tr_solver == 'exact': predicted_reduction = -evaluate_quadratic(J_free, g_free, step_free) elif tr_solver == 'lsmr': predicted_reduction = -evaluate_quadratic(Jop, g, step) # gh11403 ensure that solution is fully within bounds. x_new = np.clip(x + step, lb, ub) f_new = fun(x_new) nfev += 1 step_h_norm = norm(step * scale_inv, ord=np.inf) if not np.all(np.isfinite(f_new)): Delta = 0.25 * step_h_norm continue # Usual trust-region step quality estimation. if loss_function is not None: cost_new = loss_function(f_new, cost_only=True) else: cost_new = 0.5 * np.dot(f_new, f_new) actual_reduction = cost - cost_new Delta, ratio = update_tr_radius( Delta, actual_reduction, predicted_reduction, step_h_norm, tr_hit ) step_norm = norm(step) termination_status = check_termination( actual_reduction, cost, step_norm, norm(x), ratio, ftol, xtol) if termination_status is not None: break if actual_reduction > 0: on_bound[free_set] = on_bound_free x = x_new # Set variables exactly at the boundary. mask = on_bound == -1 x[mask] = lb[mask] mask = on_bound == 1 x[mask] = ub[mask] f = f_new f_true = f.copy() cost = cost_new J = jac(x, f) njev += 1 if loss_function is not None: rho = loss_function(f) J, f = scale_for_robust_loss_function(J, f, rho) g = compute_grad(J, f) if jac_scale: scale, scale_inv = compute_jac_scale(J, scale_inv) else: step_norm = 0 actual_reduction = 0 iteration += 1 if termination_status is None: termination_status = 0 return OptimizeResult( x=x, cost=cost, fun=f_true, jac=J, grad=g_full, optimality=g_norm, active_mask=on_bound, nfev=nfev, njev=njev, status=termination_status)
11,682
34.189759
79
py
scipy
scipy-main/scipy/optimize/_lsq/least_squares.py
"""Generic interface for least-squares minimization.""" from warnings import warn import numpy as np from numpy.linalg import norm from scipy.sparse import issparse from scipy.sparse.linalg import LinearOperator from scipy.optimize import _minpack, OptimizeResult from scipy.optimize._numdiff import approx_derivative, group_columns from scipy.optimize._minimize import Bounds from .trf import trf from .dogbox import dogbox from .common import EPS, in_bounds, make_strictly_feasible TERMINATION_MESSAGES = { -1: "Improper input parameters status returned from `leastsq`", 0: "The maximum number of function evaluations is exceeded.", 1: "`gtol` termination condition is satisfied.", 2: "`ftol` termination condition is satisfied.", 3: "`xtol` termination condition is satisfied.", 4: "Both `ftol` and `xtol` termination conditions are satisfied." } FROM_MINPACK_TO_COMMON = { 0: -1, # Improper input parameters from MINPACK. 1: 2, 2: 3, 3: 4, 4: 1, 5: 0 # There are 6, 7, 8 for too small tolerance parameters, # but we guard against it by checking ftol, xtol, gtol beforehand. } def call_minpack(fun, x0, jac, ftol, xtol, gtol, max_nfev, x_scale, diff_step): n = x0.size if diff_step is None: epsfcn = EPS else: epsfcn = diff_step**2 # Compute MINPACK's `diag`, which is inverse of our `x_scale` and # ``x_scale='jac'`` corresponds to ``diag=None``. if isinstance(x_scale, str) and x_scale == 'jac': diag = None else: diag = 1 / x_scale full_output = True col_deriv = False factor = 100.0 if jac is None: if max_nfev is None: # n squared to account for Jacobian evaluations. max_nfev = 100 * n * (n + 1) x, info, status = _minpack._lmdif( fun, x0, (), full_output, ftol, xtol, gtol, max_nfev, epsfcn, factor, diag) else: if max_nfev is None: max_nfev = 100 * n x, info, status = _minpack._lmder( fun, jac, x0, (), full_output, col_deriv, ftol, xtol, gtol, max_nfev, factor, diag) f = info['fvec'] if callable(jac): J = jac(x) else: J = np.atleast_2d(approx_derivative(fun, x)) cost = 0.5 * np.dot(f, f) g = J.T.dot(f) g_norm = norm(g, ord=np.inf) nfev = info['nfev'] njev = info.get('njev', None) status = FROM_MINPACK_TO_COMMON[status] active_mask = np.zeros_like(x0, dtype=int) return OptimizeResult( x=x, cost=cost, fun=f, jac=J, grad=g, optimality=g_norm, active_mask=active_mask, nfev=nfev, njev=njev, status=status) def prepare_bounds(bounds, n): lb, ub = (np.asarray(b, dtype=float) for b in bounds) if lb.ndim == 0: lb = np.resize(lb, n) if ub.ndim == 0: ub = np.resize(ub, n) return lb, ub def check_tolerance(ftol, xtol, gtol, method): def check(tol, name): if tol is None: tol = 0 elif tol < EPS: warn("Setting `{}` below the machine epsilon ({:.2e}) effectively " "disables the corresponding termination condition." .format(name, EPS)) return tol ftol = check(ftol, "ftol") xtol = check(xtol, "xtol") gtol = check(gtol, "gtol") if method == "lm" and (ftol < EPS or xtol < EPS or gtol < EPS): raise ValueError("All tolerances must be higher than machine epsilon " "({:.2e}) for method 'lm'.".format(EPS)) elif ftol < EPS and xtol < EPS and gtol < EPS: raise ValueError("At least one of the tolerances must be higher than " "machine epsilon ({:.2e}).".format(EPS)) return ftol, xtol, gtol def check_x_scale(x_scale, x0): if isinstance(x_scale, str) and x_scale == 'jac': return x_scale try: x_scale = np.asarray(x_scale, dtype=float) valid = np.all(np.isfinite(x_scale)) and np.all(x_scale > 0) except (ValueError, TypeError): valid = False if not valid: raise ValueError("`x_scale` must be 'jac' or array_like with " "positive numbers.") if x_scale.ndim == 0: x_scale = np.resize(x_scale, x0.shape) if x_scale.shape != x0.shape: raise ValueError("Inconsistent shapes between `x_scale` and `x0`.") return x_scale def check_jac_sparsity(jac_sparsity, m, n): if jac_sparsity is None: return None if not issparse(jac_sparsity): jac_sparsity = np.atleast_2d(jac_sparsity) if jac_sparsity.shape != (m, n): raise ValueError("`jac_sparsity` has wrong shape.") return jac_sparsity, group_columns(jac_sparsity) # Loss functions. def huber(z, rho, cost_only): mask = z <= 1 rho[0, mask] = z[mask] rho[0, ~mask] = 2 * z[~mask]**0.5 - 1 if cost_only: return rho[1, mask] = 1 rho[1, ~mask] = z[~mask]**-0.5 rho[2, mask] = 0 rho[2, ~mask] = -0.5 * z[~mask]**-1.5 def soft_l1(z, rho, cost_only): t = 1 + z rho[0] = 2 * (t**0.5 - 1) if cost_only: return rho[1] = t**-0.5 rho[2] = -0.5 * t**-1.5 def cauchy(z, rho, cost_only): rho[0] = np.log1p(z) if cost_only: return t = 1 + z rho[1] = 1 / t rho[2] = -1 / t**2 def arctan(z, rho, cost_only): rho[0] = np.arctan(z) if cost_only: return t = 1 + z**2 rho[1] = 1 / t rho[2] = -2 * z / t**2 IMPLEMENTED_LOSSES = dict(linear=None, huber=huber, soft_l1=soft_l1, cauchy=cauchy, arctan=arctan) def construct_loss_function(m, loss, f_scale): if loss == 'linear': return None if not callable(loss): loss = IMPLEMENTED_LOSSES[loss] rho = np.empty((3, m)) def loss_function(f, cost_only=False): z = (f / f_scale) ** 2 loss(z, rho, cost_only=cost_only) if cost_only: return 0.5 * f_scale ** 2 * np.sum(rho[0]) rho[0] *= f_scale ** 2 rho[2] /= f_scale ** 2 return rho else: def loss_function(f, cost_only=False): z = (f / f_scale) ** 2 rho = loss(z) if cost_only: return 0.5 * f_scale ** 2 * np.sum(rho[0]) rho[0] *= f_scale ** 2 rho[2] /= f_scale ** 2 return rho return loss_function def least_squares( fun, x0, jac='2-point', bounds=(-np.inf, np.inf), method='trf', ftol=1e-8, xtol=1e-8, gtol=1e-8, x_scale=1.0, loss='linear', f_scale=1.0, diff_step=None, tr_solver=None, tr_options={}, jac_sparsity=None, max_nfev=None, verbose=0, args=(), kwargs={}): """Solve a nonlinear least-squares problem with bounds on the variables. Given the residuals f(x) (an m-D real function of n real variables) and the loss function rho(s) (a scalar function), `least_squares` finds a local minimum of the cost function F(x):: minimize F(x) = 0.5 * sum(rho(f_i(x)**2), i = 0, ..., m - 1) subject to lb <= x <= ub The purpose of the loss function rho(s) is to reduce the influence of outliers on the solution. Parameters ---------- fun : callable Function which computes the vector of residuals, with the signature ``fun(x, *args, **kwargs)``, i.e., the minimization proceeds with respect to its first argument. The argument ``x`` passed to this function is an ndarray of shape (n,) (never a scalar, even for n=1). It must allocate and return a 1-D array_like of shape (m,) or a scalar. If the argument ``x`` is complex or the function ``fun`` returns complex residuals, it must be wrapped in a real function of real arguments, as shown at the end of the Examples section. x0 : array_like with shape (n,) or float Initial guess on independent variables. If float, it will be treated as a 1-D array with one element. jac : {'2-point', '3-point', 'cs', callable}, optional Method of computing the Jacobian matrix (an m-by-n matrix, where element (i, j) is the partial derivative of f[i] with respect to x[j]). The keywords select a finite difference scheme for numerical estimation. The scheme '3-point' is more accurate, but requires twice as many operations as '2-point' (default). The scheme 'cs' uses complex steps, and while potentially the most accurate, it is applicable only when `fun` correctly handles complex inputs and can be analytically continued to the complex plane. Method 'lm' always uses the '2-point' scheme. If callable, it is used as ``jac(x, *args, **kwargs)`` and should return a good approximation (or the exact value) for the Jacobian as an array_like (np.atleast_2d is applied), a sparse matrix (csr_matrix preferred for performance) or a `scipy.sparse.linalg.LinearOperator`. bounds : 2-tuple of array_like or `Bounds`, optional There are two ways to specify bounds: 1. Instance of `Bounds` class 2. Lower and upper bounds on independent variables. Defaults to no bounds. Each array must match the size of `x0` or be a scalar, in the latter case a bound will be the same for all variables. Use ``np.inf`` with an appropriate sign to disable bounds on all or some variables. method : {'trf', 'dogbox', 'lm'}, optional Algorithm to perform minimization. * 'trf' : Trust Region Reflective algorithm, particularly suitable for large sparse problems with bounds. Generally robust method. * 'dogbox' : dogleg algorithm with rectangular trust regions, typical use case is small problems with bounds. Not recommended for problems with rank-deficient Jacobian. * 'lm' : Levenberg-Marquardt algorithm as implemented in MINPACK. Doesn't handle bounds and sparse Jacobians. Usually the most efficient method for small unconstrained problems. Default is 'trf'. See Notes for more information. ftol : float or None, optional Tolerance for termination by the change of the cost function. Default is 1e-8. The optimization process is stopped when ``dF < ftol * F``, and there was an adequate agreement between a local quadratic model and the true model in the last step. If None and 'method' is not 'lm', the termination by this condition is disabled. If 'method' is 'lm', this tolerance must be higher than machine epsilon. xtol : float or None, optional Tolerance for termination by the change of the independent variables. Default is 1e-8. The exact condition depends on the `method` used: * For 'trf' and 'dogbox' : ``norm(dx) < xtol * (xtol + norm(x))``. * For 'lm' : ``Delta < xtol * norm(xs)``, where ``Delta`` is a trust-region radius and ``xs`` is the value of ``x`` scaled according to `x_scale` parameter (see below). If None and 'method' is not 'lm', the termination by this condition is disabled. If 'method' is 'lm', this tolerance must be higher than machine epsilon. gtol : float or None, optional Tolerance for termination by the norm of the gradient. Default is 1e-8. The exact condition depends on a `method` used: * For 'trf' : ``norm(g_scaled, ord=np.inf) < gtol``, where ``g_scaled`` is the value of the gradient scaled to account for the presence of the bounds [STIR]_. * For 'dogbox' : ``norm(g_free, ord=np.inf) < gtol``, where ``g_free`` is the gradient with respect to the variables which are not in the optimal state on the boundary. * For 'lm' : the maximum absolute value of the cosine of angles between columns of the Jacobian and the residual vector is less than `gtol`, or the residual vector is zero. If None and 'method' is not 'lm', the termination by this condition is disabled. If 'method' is 'lm', this tolerance must be higher than machine epsilon. x_scale : array_like or 'jac', optional Characteristic scale of each variable. Setting `x_scale` is equivalent to reformulating the problem in scaled variables ``xs = x / x_scale``. An alternative view is that the size of a trust region along jth dimension is proportional to ``x_scale[j]``. Improved convergence may be achieved by setting `x_scale` such that a step of a given size along any of the scaled variables has a similar effect on the cost function. If set to 'jac', the scale is iteratively updated using the inverse norms of the columns of the Jacobian matrix (as described in [JJMore]_). loss : str or callable, optional Determines the loss function. The following keyword values are allowed: * 'linear' (default) : ``rho(z) = z``. Gives a standard least-squares problem. * 'soft_l1' : ``rho(z) = 2 * ((1 + z)**0.5 - 1)``. The smooth approximation of l1 (absolute value) loss. Usually a good choice for robust least squares. * 'huber' : ``rho(z) = z if z <= 1 else 2*z**0.5 - 1``. Works similarly to 'soft_l1'. * 'cauchy' : ``rho(z) = ln(1 + z)``. Severely weakens outliers influence, but may cause difficulties in optimization process. * 'arctan' : ``rho(z) = arctan(z)``. Limits a maximum loss on a single residual, has properties similar to 'cauchy'. If callable, it must take a 1-D ndarray ``z=f**2`` and return an array_like with shape (3, m) where row 0 contains function values, row 1 contains first derivatives and row 2 contains second derivatives. Method 'lm' supports only 'linear' loss. f_scale : float, optional Value of soft margin between inlier and outlier residuals, default is 1.0. The loss function is evaluated as follows ``rho_(f**2) = C**2 * rho(f**2 / C**2)``, where ``C`` is `f_scale`, and ``rho`` is determined by `loss` parameter. This parameter has no effect with ``loss='linear'``, but for other `loss` values it is of crucial importance. max_nfev : None or int, optional Maximum number of function evaluations before the termination. If None (default), the value is chosen automatically: * For 'trf' and 'dogbox' : 100 * n. * For 'lm' : 100 * n if `jac` is callable and 100 * n * (n + 1) otherwise (because 'lm' counts function calls in Jacobian estimation). diff_step : None or array_like, optional Determines the relative step size for the finite difference approximation of the Jacobian. The actual step is computed as ``x * diff_step``. If None (default), then `diff_step` is taken to be a conventional "optimal" power of machine epsilon for the finite difference scheme used [NR]_. tr_solver : {None, 'exact', 'lsmr'}, optional Method for solving trust-region subproblems, relevant only for 'trf' and 'dogbox' methods. * 'exact' is suitable for not very large problems with dense Jacobian matrices. The computational complexity per iteration is comparable to a singular value decomposition of the Jacobian matrix. * 'lsmr' is suitable for problems with sparse and large Jacobian matrices. It uses the iterative procedure `scipy.sparse.linalg.lsmr` for finding a solution of a linear least-squares problem and only requires matrix-vector product evaluations. If None (default), the solver is chosen based on the type of Jacobian returned on the first iteration. tr_options : dict, optional Keyword options passed to trust-region solver. * ``tr_solver='exact'``: `tr_options` are ignored. * ``tr_solver='lsmr'``: options for `scipy.sparse.linalg.lsmr`. Additionally, ``method='trf'`` supports 'regularize' option (bool, default is True), which adds a regularization term to the normal equation, which improves convergence if the Jacobian is rank-deficient [Byrd]_ (eq. 3.4). jac_sparsity : {None, array_like, sparse matrix}, optional Defines the sparsity structure of the Jacobian matrix for finite difference estimation, its shape must be (m, n). If the Jacobian has only few non-zero elements in *each* row, providing the sparsity structure will greatly speed up the computations [Curtis]_. A zero entry means that a corresponding element in the Jacobian is identically zero. If provided, forces the use of 'lsmr' trust-region solver. If None (default), then dense differencing will be used. Has no effect for 'lm' method. verbose : {0, 1, 2}, optional Level of algorithm's verbosity: * 0 (default) : work silently. * 1 : display a termination report. * 2 : display progress during iterations (not supported by 'lm' method). args, kwargs : tuple and dict, optional Additional arguments passed to `fun` and `jac`. Both empty by default. The calling signature is ``fun(x, *args, **kwargs)`` and the same for `jac`. Returns ------- result : OptimizeResult `OptimizeResult` with the following fields defined: x : ndarray, shape (n,) Solution found. cost : float Value of the cost function at the solution. fun : ndarray, shape (m,) Vector of residuals at the solution. jac : ndarray, sparse matrix or LinearOperator, shape (m, n) Modified Jacobian matrix at the solution, in the sense that J^T J is a Gauss-Newton approximation of the Hessian of the cost function. The type is the same as the one used by the algorithm. grad : ndarray, shape (m,) Gradient of the cost function at the solution. optimality : float First-order optimality measure. In unconstrained problems, it is always the uniform norm of the gradient. In constrained problems, it is the quantity which was compared with `gtol` during iterations. active_mask : ndarray of int, shape (n,) Each component shows whether a corresponding constraint is active (that is, whether a variable is at the bound): * 0 : a constraint is not active. * -1 : a lower bound is active. * 1 : an upper bound is active. Might be somewhat arbitrary for 'trf' method as it generates a sequence of strictly feasible iterates and `active_mask` is determined within a tolerance threshold. nfev : int Number of function evaluations done. Methods 'trf' and 'dogbox' do not count function calls for numerical Jacobian approximation, as opposed to 'lm' method. njev : int or None Number of Jacobian evaluations done. If numerical Jacobian approximation is used in 'lm' method, it is set to None. status : int The reason for algorithm termination: * -1 : improper input parameters status returned from MINPACK. * 0 : the maximum number of function evaluations is exceeded. * 1 : `gtol` termination condition is satisfied. * 2 : `ftol` termination condition is satisfied. * 3 : `xtol` termination condition is satisfied. * 4 : Both `ftol` and `xtol` termination conditions are satisfied. message : str Verbal description of the termination reason. success : bool True if one of the convergence criteria is satisfied (`status` > 0). See Also -------- leastsq : A legacy wrapper for the MINPACK implementation of the Levenberg-Marquadt algorithm. curve_fit : Least-squares minimization applied to a curve-fitting problem. Notes ----- Method 'lm' (Levenberg-Marquardt) calls a wrapper over least-squares algorithms implemented in MINPACK (lmder, lmdif). It runs the Levenberg-Marquardt algorithm formulated as a trust-region type algorithm. The implementation is based on paper [JJMore]_, it is very robust and efficient with a lot of smart tricks. It should be your first choice for unconstrained problems. Note that it doesn't support bounds. Also, it doesn't work when m < n. Method 'trf' (Trust Region Reflective) is motivated by the process of solving a system of equations, which constitute the first-order optimality condition for a bound-constrained minimization problem as formulated in [STIR]_. The algorithm iteratively solves trust-region subproblems augmented by a special diagonal quadratic term and with trust-region shape determined by the distance from the bounds and the direction of the gradient. This enhancements help to avoid making steps directly into bounds and efficiently explore the whole space of variables. To further improve convergence, the algorithm considers search directions reflected from the bounds. To obey theoretical requirements, the algorithm keeps iterates strictly feasible. With dense Jacobians trust-region subproblems are solved by an exact method very similar to the one described in [JJMore]_ (and implemented in MINPACK). The difference from the MINPACK implementation is that a singular value decomposition of a Jacobian matrix is done once per iteration, instead of a QR decomposition and series of Givens rotation eliminations. For large sparse Jacobians a 2-D subspace approach of solving trust-region subproblems is used [STIR]_, [Byrd]_. The subspace is spanned by a scaled gradient and an approximate Gauss-Newton solution delivered by `scipy.sparse.linalg.lsmr`. When no constraints are imposed the algorithm is very similar to MINPACK and has generally comparable performance. The algorithm works quite robust in unbounded and bounded problems, thus it is chosen as a default algorithm. Method 'dogbox' operates in a trust-region framework, but considers rectangular trust regions as opposed to conventional ellipsoids [Voglis]_. The intersection of a current trust region and initial bounds is again rectangular, so on each iteration a quadratic minimization problem subject to bound constraints is solved approximately by Powell's dogleg method [NumOpt]_. The required Gauss-Newton step can be computed exactly for dense Jacobians or approximately by `scipy.sparse.linalg.lsmr` for large sparse Jacobians. The algorithm is likely to exhibit slow convergence when the rank of Jacobian is less than the number of variables. The algorithm often outperforms 'trf' in bounded problems with a small number of variables. Robust loss functions are implemented as described in [BA]_. The idea is to modify a residual vector and a Jacobian matrix on each iteration such that computed gradient and Gauss-Newton Hessian approximation match the true gradient and Hessian approximation of the cost function. Then the algorithm proceeds in a normal way, i.e., robust loss functions are implemented as a simple wrapper over standard least-squares algorithms. .. versionadded:: 0.17.0 References ---------- .. [STIR] M. A. Branch, T. F. Coleman, and Y. Li, "A Subspace, Interior, and Conjugate Gradient Method for Large-Scale Bound-Constrained Minimization Problems," SIAM Journal on Scientific Computing, Vol. 21, Number 1, pp 1-23, 1999. .. [NR] William H. Press et. al., "Numerical Recipes. The Art of Scientific Computing. 3rd edition", Sec. 5.7. .. [Byrd] R. H. Byrd, R. B. Schnabel and G. A. Shultz, "Approximate solution of the trust region problem by minimization over two-dimensional subspaces", Math. Programming, 40, pp. 247-263, 1988. .. [Curtis] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of sparse Jacobian matrices", Journal of the Institute of Mathematics and its Applications, 13, pp. 117-120, 1974. .. [JJMore] J. J. More, "The Levenberg-Marquardt Algorithm: Implementation and Theory," Numerical Analysis, ed. G. A. Watson, Lecture Notes in Mathematics 630, Springer Verlag, pp. 105-116, 1977. .. [Voglis] C. Voglis and I. E. Lagaris, "A Rectangular Trust Region Dogleg Approach for Unconstrained and Bound Constrained Nonlinear Optimization", WSEAS International Conference on Applied Mathematics, Corfu, Greece, 2004. .. [NumOpt] J. Nocedal and S. J. Wright, "Numerical optimization, 2nd edition", Chapter 4. .. [BA] B. Triggs et. al., "Bundle Adjustment - A Modern Synthesis", Proceedings of the International Workshop on Vision Algorithms: Theory and Practice, pp. 298-372, 1999. Examples -------- In this example we find a minimum of the Rosenbrock function without bounds on independent variables. >>> import numpy as np >>> def fun_rosenbrock(x): ... return np.array([10 * (x[1] - x[0]**2), (1 - x[0])]) Notice that we only provide the vector of the residuals. The algorithm constructs the cost function as a sum of squares of the residuals, which gives the Rosenbrock function. The exact minimum is at ``x = [1.0, 1.0]``. >>> from scipy.optimize import least_squares >>> x0_rosenbrock = np.array([2, 2]) >>> res_1 = least_squares(fun_rosenbrock, x0_rosenbrock) >>> res_1.x array([ 1., 1.]) >>> res_1.cost 9.8669242910846867e-30 >>> res_1.optimality 8.8928864934219529e-14 We now constrain the variables, in such a way that the previous solution becomes infeasible. Specifically, we require that ``x[1] >= 1.5``, and ``x[0]`` left unconstrained. To this end, we specify the `bounds` parameter to `least_squares` in the form ``bounds=([-np.inf, 1.5], np.inf)``. We also provide the analytic Jacobian: >>> def jac_rosenbrock(x): ... return np.array([ ... [-20 * x[0], 10], ... [-1, 0]]) Putting this all together, we see that the new solution lies on the bound: >>> res_2 = least_squares(fun_rosenbrock, x0_rosenbrock, jac_rosenbrock, ... bounds=([-np.inf, 1.5], np.inf)) >>> res_2.x array([ 1.22437075, 1.5 ]) >>> res_2.cost 0.025213093946805685 >>> res_2.optimality 1.5885401433157753e-07 Now we solve a system of equations (i.e., the cost function should be zero at a minimum) for a Broyden tridiagonal vector-valued function of 100000 variables: >>> def fun_broyden(x): ... f = (3 - x) * x + 1 ... f[1:] -= x[:-1] ... f[:-1] -= 2 * x[1:] ... return f The corresponding Jacobian matrix is sparse. We tell the algorithm to estimate it by finite differences and provide the sparsity structure of Jacobian to significantly speed up this process. >>> from scipy.sparse import lil_matrix >>> def sparsity_broyden(n): ... sparsity = lil_matrix((n, n), dtype=int) ... i = np.arange(n) ... sparsity[i, i] = 1 ... i = np.arange(1, n) ... sparsity[i, i - 1] = 1 ... i = np.arange(n - 1) ... sparsity[i, i + 1] = 1 ... return sparsity ... >>> n = 100000 >>> x0_broyden = -np.ones(n) ... >>> res_3 = least_squares(fun_broyden, x0_broyden, ... jac_sparsity=sparsity_broyden(n)) >>> res_3.cost 4.5687069299604613e-23 >>> res_3.optimality 1.1650454296851518e-11 Let's also solve a curve fitting problem using robust loss function to take care of outliers in the data. Define the model function as ``y = a + b * exp(c * t)``, where t is a predictor variable, y is an observation and a, b, c are parameters to estimate. First, define the function which generates the data with noise and outliers, define the model parameters, and generate data: >>> from numpy.random import default_rng >>> rng = default_rng() >>> def gen_data(t, a, b, c, noise=0., n_outliers=0, seed=None): ... rng = default_rng(seed) ... ... y = a + b * np.exp(t * c) ... ... error = noise * rng.standard_normal(t.size) ... outliers = rng.integers(0, t.size, n_outliers) ... error[outliers] *= 10 ... ... return y + error ... >>> a = 0.5 >>> b = 2.0 >>> c = -1 >>> t_min = 0 >>> t_max = 10 >>> n_points = 15 ... >>> t_train = np.linspace(t_min, t_max, n_points) >>> y_train = gen_data(t_train, a, b, c, noise=0.1, n_outliers=3) Define function for computing residuals and initial estimate of parameters. >>> def fun(x, t, y): ... return x[0] + x[1] * np.exp(x[2] * t) - y ... >>> x0 = np.array([1.0, 1.0, 0.0]) Compute a standard least-squares solution: >>> res_lsq = least_squares(fun, x0, args=(t_train, y_train)) Now compute two solutions with two different robust loss functions. The parameter `f_scale` is set to 0.1, meaning that inlier residuals should not significantly exceed 0.1 (the noise level used). >>> res_soft_l1 = least_squares(fun, x0, loss='soft_l1', f_scale=0.1, ... args=(t_train, y_train)) >>> res_log = least_squares(fun, x0, loss='cauchy', f_scale=0.1, ... args=(t_train, y_train)) And, finally, plot all the curves. We see that by selecting an appropriate `loss` we can get estimates close to optimal even in the presence of strong outliers. But keep in mind that generally it is recommended to try 'soft_l1' or 'huber' losses first (if at all necessary) as the other two options may cause difficulties in optimization process. >>> t_test = np.linspace(t_min, t_max, n_points * 10) >>> y_true = gen_data(t_test, a, b, c) >>> y_lsq = gen_data(t_test, *res_lsq.x) >>> y_soft_l1 = gen_data(t_test, *res_soft_l1.x) >>> y_log = gen_data(t_test, *res_log.x) ... >>> import matplotlib.pyplot as plt >>> plt.plot(t_train, y_train, 'o') >>> plt.plot(t_test, y_true, 'k', linewidth=2, label='true') >>> plt.plot(t_test, y_lsq, label='linear loss') >>> plt.plot(t_test, y_soft_l1, label='soft_l1 loss') >>> plt.plot(t_test, y_log, label='cauchy loss') >>> plt.xlabel("t") >>> plt.ylabel("y") >>> plt.legend() >>> plt.show() In the next example, we show how complex-valued residual functions of complex variables can be optimized with ``least_squares()``. Consider the following function: >>> def f(z): ... return z - (0.5 + 0.5j) We wrap it into a function of real variables that returns real residuals by simply handling the real and imaginary parts as independent variables: >>> def f_wrap(x): ... fx = f(x[0] + 1j*x[1]) ... return np.array([fx.real, fx.imag]) Thus, instead of the original m-D complex function of n complex variables we optimize a 2m-D real function of 2n real variables: >>> from scipy.optimize import least_squares >>> res_wrapped = least_squares(f_wrap, (0.1, 0.1), bounds=([0, 0], [1, 1])) >>> z = res_wrapped.x[0] + res_wrapped.x[1]*1j >>> z (0.49999999999925893+0.49999999999925893j) """ if method not in ['trf', 'dogbox', 'lm']: raise ValueError("`method` must be 'trf', 'dogbox' or 'lm'.") if jac not in ['2-point', '3-point', 'cs'] and not callable(jac): raise ValueError("`jac` must be '2-point', '3-point', 'cs' or " "callable.") if tr_solver not in [None, 'exact', 'lsmr']: raise ValueError("`tr_solver` must be None, 'exact' or 'lsmr'.") if loss not in IMPLEMENTED_LOSSES and not callable(loss): raise ValueError("`loss` must be one of {} or a callable." .format(IMPLEMENTED_LOSSES.keys())) if method == 'lm' and loss != 'linear': raise ValueError("method='lm' supports only 'linear' loss function.") if verbose not in [0, 1, 2]: raise ValueError("`verbose` must be in [0, 1, 2].") if max_nfev is not None and max_nfev <= 0: raise ValueError("`max_nfev` must be None or positive integer.") if np.iscomplexobj(x0): raise ValueError("`x0` must be real.") x0 = np.atleast_1d(x0).astype(float) if x0.ndim > 1: raise ValueError("`x0` must have at most 1 dimension.") if isinstance(bounds, Bounds): lb, ub = bounds.lb, bounds.ub bounds = (lb, ub) else: if len(bounds) == 2: lb, ub = prepare_bounds(bounds, x0.shape[0]) else: raise ValueError("`bounds` must contain 2 elements.") if method == 'lm' and not np.all((lb == -np.inf) & (ub == np.inf)): raise ValueError("Method 'lm' doesn't support bounds.") if lb.shape != x0.shape or ub.shape != x0.shape: raise ValueError("Inconsistent shapes between bounds and `x0`.") if np.any(lb >= ub): raise ValueError("Each lower bound must be strictly less than each " "upper bound.") if not in_bounds(x0, lb, ub): raise ValueError("`x0` is infeasible.") x_scale = check_x_scale(x_scale, x0) ftol, xtol, gtol = check_tolerance(ftol, xtol, gtol, method) def fun_wrapped(x): return np.atleast_1d(fun(x, *args, **kwargs)) if method == 'trf': x0 = make_strictly_feasible(x0, lb, ub) f0 = fun_wrapped(x0) if f0.ndim != 1: raise ValueError("`fun` must return at most 1-d array_like. " "f0.shape: {}".format(f0.shape)) if not np.all(np.isfinite(f0)): raise ValueError("Residuals are not finite in the initial point.") n = x0.size m = f0.size if method == 'lm' and m < n: raise ValueError("Method 'lm' doesn't work when the number of " "residuals is less than the number of variables.") loss_function = construct_loss_function(m, loss, f_scale) if callable(loss): rho = loss_function(f0) if rho.shape != (3, m): raise ValueError("The return value of `loss` callable has wrong " "shape.") initial_cost = 0.5 * np.sum(rho[0]) elif loss_function is not None: initial_cost = loss_function(f0, cost_only=True) else: initial_cost = 0.5 * np.dot(f0, f0) if callable(jac): J0 = jac(x0, *args, **kwargs) if issparse(J0): J0 = J0.tocsr() def jac_wrapped(x, _=None): return jac(x, *args, **kwargs).tocsr() elif isinstance(J0, LinearOperator): def jac_wrapped(x, _=None): return jac(x, *args, **kwargs) else: J0 = np.atleast_2d(J0) def jac_wrapped(x, _=None): return np.atleast_2d(jac(x, *args, **kwargs)) else: # Estimate Jacobian by finite differences. if method == 'lm': if jac_sparsity is not None: raise ValueError("method='lm' does not support " "`jac_sparsity`.") if jac != '2-point': warn("jac='{}' works equivalently to '2-point' " "for method='lm'.".format(jac)) J0 = jac_wrapped = None else: if jac_sparsity is not None and tr_solver == 'exact': raise ValueError("tr_solver='exact' is incompatible " "with `jac_sparsity`.") jac_sparsity = check_jac_sparsity(jac_sparsity, m, n) def jac_wrapped(x, f): J = approx_derivative(fun, x, rel_step=diff_step, method=jac, f0=f, bounds=bounds, args=args, kwargs=kwargs, sparsity=jac_sparsity) if J.ndim != 2: # J is guaranteed not sparse. J = np.atleast_2d(J) return J J0 = jac_wrapped(x0, f0) if J0 is not None: if J0.shape != (m, n): raise ValueError( "The return value of `jac` has wrong shape: expected {}, " "actual {}.".format((m, n), J0.shape)) if not isinstance(J0, np.ndarray): if method == 'lm': raise ValueError("method='lm' works only with dense " "Jacobian matrices.") if tr_solver == 'exact': raise ValueError( "tr_solver='exact' works only with dense " "Jacobian matrices.") jac_scale = isinstance(x_scale, str) and x_scale == 'jac' if isinstance(J0, LinearOperator) and jac_scale: raise ValueError("x_scale='jac' can't be used when `jac` " "returns LinearOperator.") if tr_solver is None: if isinstance(J0, np.ndarray): tr_solver = 'exact' else: tr_solver = 'lsmr' if method == 'lm': result = call_minpack(fun_wrapped, x0, jac_wrapped, ftol, xtol, gtol, max_nfev, x_scale, diff_step) elif method == 'trf': result = trf(fun_wrapped, jac_wrapped, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev, x_scale, loss_function, tr_solver, tr_options.copy(), verbose) elif method == 'dogbox': if tr_solver == 'lsmr' and 'regularize' in tr_options: warn("The keyword 'regularize' in `tr_options` is not relevant " "for 'dogbox' method.") tr_options = tr_options.copy() del tr_options['regularize'] result = dogbox(fun_wrapped, jac_wrapped, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev, x_scale, loss_function, tr_solver, tr_options, verbose) result.message = TERMINATION_MESSAGES[result.status] result.success = result.status > 0 if verbose >= 1: print(result.message) print("Function evaluations {}, initial cost {:.4e}, final cost " "{:.4e}, first-order optimality {:.2e}." .format(result.nfev, initial_cost, result.cost, result.optimality)) return result
39,522
39.998963
87
py
scipy
scipy-main/scipy/optimize/_lsq/setup.py
def configuration(parent_package='', top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('_lsq', parent_package, top_path) config.add_extension('givens_elimination', sources=['givens_elimination.c']) return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict())
415
33.666667
60
py
scipy
scipy-main/scipy/optimize/_lsq/lsq_linear.py
"""Linear least squares with bound constraints on independent variables.""" import numpy as np from numpy.linalg import norm from scipy.sparse import issparse, csr_matrix from scipy.sparse.linalg import LinearOperator, lsmr from scipy.optimize import OptimizeResult from scipy.optimize._minimize import Bounds from .common import in_bounds, compute_grad from .trf_linear import trf_linear from .bvls import bvls def prepare_bounds(bounds, n): if len(bounds) != 2: raise ValueError("`bounds` must contain 2 elements.") lb, ub = (np.asarray(b, dtype=float) for b in bounds) if lb.ndim == 0: lb = np.resize(lb, n) if ub.ndim == 0: ub = np.resize(ub, n) return lb, ub TERMINATION_MESSAGES = { -1: "The algorithm was not able to make progress on the last iteration.", 0: "The maximum number of iterations is exceeded.", 1: "The first-order optimality measure is less than `tol`.", 2: "The relative change of the cost function is less than `tol`.", 3: "The unconstrained solution is optimal." } def lsq_linear(A, b, bounds=(-np.inf, np.inf), method='trf', tol=1e-10, lsq_solver=None, lsmr_tol=None, max_iter=None, verbose=0, *, lsmr_maxiter=None,): r"""Solve a linear least-squares problem with bounds on the variables. Given a m-by-n design matrix A and a target vector b with m elements, `lsq_linear` solves the following optimization problem:: minimize 0.5 * ||A x - b||**2 subject to lb <= x <= ub This optimization problem is convex, hence a found minimum (if iterations have converged) is guaranteed to be global. Parameters ---------- A : array_like, sparse matrix of LinearOperator, shape (m, n) Design matrix. Can be `scipy.sparse.linalg.LinearOperator`. b : array_like, shape (m,) Target vector. bounds : 2-tuple of array_like or `Bounds`, optional Lower and upper bounds on parameters. Defaults to no bounds. There are two ways to specify the bounds: - Instance of `Bounds` class. - 2-tuple of array_like: Each element of the tuple must be either an array with the length equal to the number of parameters, or a scalar (in which case the bound is taken to be the same for all parameters). Use ``np.inf`` with an appropriate sign to disable bounds on all or some parameters. method : 'trf' or 'bvls', optional Method to perform minimization. * 'trf' : Trust Region Reflective algorithm adapted for a linear least-squares problem. This is an interior-point-like method and the required number of iterations is weakly correlated with the number of variables. * 'bvls' : Bounded-variable least-squares algorithm. This is an active set method, which requires the number of iterations comparable to the number of variables. Can't be used when `A` is sparse or LinearOperator. Default is 'trf'. tol : float, optional Tolerance parameter. The algorithm terminates if a relative change of the cost function is less than `tol` on the last iteration. Additionally, the first-order optimality measure is considered: * ``method='trf'`` terminates if the uniform norm of the gradient, scaled to account for the presence of the bounds, is less than `tol`. * ``method='bvls'`` terminates if Karush-Kuhn-Tucker conditions are satisfied within `tol` tolerance. lsq_solver : {None, 'exact', 'lsmr'}, optional Method of solving unbounded least-squares problems throughout iterations: * 'exact' : Use dense QR or SVD decomposition approach. Can't be used when `A` is sparse or LinearOperator. * 'lsmr' : Use `scipy.sparse.linalg.lsmr` iterative procedure which requires only matrix-vector product evaluations. Can't be used with ``method='bvls'``. If None (default), the solver is chosen based on type of `A`. lsmr_tol : None, float or 'auto', optional Tolerance parameters 'atol' and 'btol' for `scipy.sparse.linalg.lsmr` If None (default), it is set to ``1e-2 * tol``. If 'auto', the tolerance will be adjusted based on the optimality of the current iterate, which can speed up the optimization process, but is not always reliable. max_iter : None or int, optional Maximum number of iterations before termination. If None (default), it is set to 100 for ``method='trf'`` or to the number of variables for ``method='bvls'`` (not counting iterations for 'bvls' initialization). verbose : {0, 1, 2}, optional Level of algorithm's verbosity: * 0 : work silently (default). * 1 : display a termination report. * 2 : display progress during iterations. lsmr_maxiter : None or int, optional Maximum number of iterations for the lsmr least squares solver, if it is used (by setting ``lsq_solver='lsmr'``). If None (default), it uses lsmr's default of ``min(m, n)`` where ``m`` and ``n`` are the number of rows and columns of `A`, respectively. Has no effect if ``lsq_solver='exact'``. Returns ------- OptimizeResult with the following fields defined: x : ndarray, shape (n,) Solution found. cost : float Value of the cost function at the solution. fun : ndarray, shape (m,) Vector of residuals at the solution. optimality : float First-order optimality measure. The exact meaning depends on `method`, refer to the description of `tol` parameter. active_mask : ndarray of int, shape (n,) Each component shows whether a corresponding constraint is active (that is, whether a variable is at the bound): * 0 : a constraint is not active. * -1 : a lower bound is active. * 1 : an upper bound is active. Might be somewhat arbitrary for the `trf` method as it generates a sequence of strictly feasible iterates and active_mask is determined within a tolerance threshold. unbounded_sol : tuple Unbounded least squares solution tuple returned by the least squares solver (set with `lsq_solver` option). If `lsq_solver` is not set or is set to ``'exact'``, the tuple contains an ndarray of shape (n,) with the unbounded solution, an ndarray with the sum of squared residuals, an int with the rank of `A`, and an ndarray with the singular values of `A` (see NumPy's ``linalg.lstsq`` for more information). If `lsq_solver` is set to ``'lsmr'``, the tuple contains an ndarray of shape (n,) with the unbounded solution, an int with the exit code, an int with the number of iterations, and five floats with various norms and the condition number of `A` (see SciPy's ``sparse.linalg.lsmr`` for more information). This output can be useful for determining the convergence of the least squares solver, particularly the iterative ``'lsmr'`` solver. The unbounded least squares problem is to minimize ``0.5 * ||A x - b||**2``. nit : int Number of iterations. Zero if the unconstrained solution is optimal. status : int Reason for algorithm termination: * -1 : the algorithm was not able to make progress on the last iteration. * 0 : the maximum number of iterations is exceeded. * 1 : the first-order optimality measure is less than `tol`. * 2 : the relative change of the cost function is less than `tol`. * 3 : the unconstrained solution is optimal. message : str Verbal description of the termination reason. success : bool True if one of the convergence criteria is satisfied (`status` > 0). See Also -------- nnls : Linear least squares with non-negativity constraint. least_squares : Nonlinear least squares with bounds on the variables. Notes ----- The algorithm first computes the unconstrained least-squares solution by `numpy.linalg.lstsq` or `scipy.sparse.linalg.lsmr` depending on `lsq_solver`. This solution is returned as optimal if it lies within the bounds. Method 'trf' runs the adaptation of the algorithm described in [STIR]_ for a linear least-squares problem. The iterations are essentially the same as in the nonlinear least-squares algorithm, but as the quadratic function model is always accurate, we don't need to track or modify the radius of a trust region. The line search (backtracking) is used as a safety net when a selected step does not decrease the cost function. Read more detailed description of the algorithm in `scipy.optimize.least_squares`. Method 'bvls' runs a Python implementation of the algorithm described in [BVLS]_. The algorithm maintains active and free sets of variables, on each iteration chooses a new variable to move from the active set to the free set and then solves the unconstrained least-squares problem on free variables. This algorithm is guaranteed to give an accurate solution eventually, but may require up to n iterations for a problem with n variables. Additionally, an ad-hoc initialization procedure is implemented, that determines which variables to set free or active initially. It takes some number of iterations before actual BVLS starts, but can significantly reduce the number of further iterations. References ---------- .. [STIR] M. A. Branch, T. F. Coleman, and Y. Li, "A Subspace, Interior, and Conjugate Gradient Method for Large-Scale Bound-Constrained Minimization Problems," SIAM Journal on Scientific Computing, Vol. 21, Number 1, pp 1-23, 1999. .. [BVLS] P. B. Start and R. L. Parker, "Bounded-Variable Least-Squares: an Algorithm and Applications", Computational Statistics, 10, 129-141, 1995. Examples -------- In this example, a problem with a large sparse matrix and bounds on the variables is solved. >>> import numpy as np >>> from scipy.sparse import rand >>> from scipy.optimize import lsq_linear >>> rng = np.random.default_rng() ... >>> m = 20000 >>> n = 10000 ... >>> A = rand(m, n, density=1e-4, random_state=rng) >>> b = rng.standard_normal(m) ... >>> lb = rng.standard_normal(n) >>> ub = lb + 1 ... >>> res = lsq_linear(A, b, bounds=(lb, ub), lsmr_tol='auto', verbose=1) # may vary The relative change of the cost function is less than `tol`. Number of iterations 16, initial cost 1.5039e+04, final cost 1.1112e+04, first-order optimality 4.66e-08. """ if method not in ['trf', 'bvls']: raise ValueError("`method` must be 'trf' or 'bvls'") if lsq_solver not in [None, 'exact', 'lsmr']: raise ValueError("`solver` must be None, 'exact' or 'lsmr'.") if verbose not in [0, 1, 2]: raise ValueError("`verbose` must be in [0, 1, 2].") if issparse(A): A = csr_matrix(A) elif not isinstance(A, LinearOperator): A = np.atleast_2d(np.asarray(A)) if method == 'bvls': if lsq_solver == 'lsmr': raise ValueError("method='bvls' can't be used with " "lsq_solver='lsmr'") if not isinstance(A, np.ndarray): raise ValueError("method='bvls' can't be used with `A` being " "sparse or LinearOperator.") if lsq_solver is None: if isinstance(A, np.ndarray): lsq_solver = 'exact' else: lsq_solver = 'lsmr' elif lsq_solver == 'exact' and not isinstance(A, np.ndarray): raise ValueError("`exact` solver can't be used when `A` is " "sparse or LinearOperator.") if len(A.shape) != 2: # No ndim for LinearOperator. raise ValueError("`A` must have at most 2 dimensions.") if max_iter is not None and max_iter <= 0: raise ValueError("`max_iter` must be None or positive integer.") m, n = A.shape b = np.atleast_1d(b) if b.ndim != 1: raise ValueError("`b` must have at most 1 dimension.") if b.size != m: raise ValueError("Inconsistent shapes between `A` and `b`.") if isinstance(bounds, Bounds): lb = bounds.lb ub = bounds.ub else: lb, ub = prepare_bounds(bounds, n) if lb.shape != (n,) and ub.shape != (n,): raise ValueError("Bounds have wrong shape.") if np.any(lb >= ub): raise ValueError("Each lower bound must be strictly less than each " "upper bound.") if lsmr_maxiter is not None and lsmr_maxiter < 1: raise ValueError("`lsmr_maxiter` must be None or positive integer.") if not ((isinstance(lsmr_tol, float) and lsmr_tol > 0) or lsmr_tol in ('auto', None)): raise ValueError("`lsmr_tol` must be None, 'auto', or positive float.") if lsq_solver == 'exact': unbd_lsq = np.linalg.lstsq(A, b, rcond=-1) elif lsq_solver == 'lsmr': first_lsmr_tol = lsmr_tol # tol of first call to lsmr if lsmr_tol is None or lsmr_tol == 'auto': first_lsmr_tol = 1e-2 * tol # default if lsmr_tol not defined unbd_lsq = lsmr(A, b, maxiter=lsmr_maxiter, atol=first_lsmr_tol, btol=first_lsmr_tol) x_lsq = unbd_lsq[0] # extract the solution from the least squares solver if in_bounds(x_lsq, lb, ub): r = A @ x_lsq - b cost = 0.5 * np.dot(r, r) termination_status = 3 termination_message = TERMINATION_MESSAGES[termination_status] g = compute_grad(A, r) g_norm = norm(g, ord=np.inf) if verbose > 0: print(termination_message) print("Final cost {:.4e}, first-order optimality {:.2e}" .format(cost, g_norm)) return OptimizeResult( x=x_lsq, fun=r, cost=cost, optimality=g_norm, active_mask=np.zeros(n), unbounded_sol=unbd_lsq, nit=0, status=termination_status, message=termination_message, success=True) if method == 'trf': res = trf_linear(A, b, x_lsq, lb, ub, tol, lsq_solver, lsmr_tol, max_iter, verbose, lsmr_maxiter=lsmr_maxiter) elif method == 'bvls': res = bvls(A, b, x_lsq, lb, ub, tol, max_iter, verbose) res.unbounded_sol = unbd_lsq res.message = TERMINATION_MESSAGES[res.status] res.success = res.status > 0 if verbose > 0: print(res.message) print("Number of iterations {}, initial cost {:.4e}, " "final cost {:.4e}, first-order optimality {:.2e}." .format(res.nit, res.initial_cost, res.cost, res.optimality)) del res.initial_cost return res
15,217
40.922865
79
py
scipy
scipy-main/scipy/optimize/_lsq/trf.py
"""Trust Region Reflective algorithm for least-squares optimization. The algorithm is based on ideas from paper [STIR]_. The main idea is to account for the presence of the bounds by appropriate scaling of the variables (or, equivalently, changing a trust-region shape). Let's introduce a vector v: | ub[i] - x[i], if g[i] < 0 and ub[i] < np.inf v[i] = | x[i] - lb[i], if g[i] > 0 and lb[i] > -np.inf | 1, otherwise where g is the gradient of a cost function and lb, ub are the bounds. Its components are distances to the bounds at which the anti-gradient points (if this distance is finite). Define a scaling matrix D = diag(v**0.5). First-order optimality conditions can be stated as D^2 g(x) = 0. Meaning that components of the gradient should be zero for strictly interior variables, and components must point inside the feasible region for variables on the bound. Now consider this system of equations as a new optimization problem. If the point x is strictly interior (not on the bound), then the left-hand side is differentiable and the Newton step for it satisfies (D^2 H + diag(g) Jv) p = -D^2 g where H is the Hessian matrix (or its J^T J approximation in least squares), Jv is the Jacobian matrix of v with components -1, 1 or 0, such that all elements of matrix C = diag(g) Jv are non-negative. Introduce the change of the variables x = D x_h (_h would be "hat" in LaTeX). In the new variables, we have a Newton step satisfying B_h p_h = -g_h, where B_h = D H D + C, g_h = D g. In least squares B_h = J_h^T J_h, where J_h = J D. Note that J_h and g_h are proper Jacobian and gradient with respect to "hat" variables. To guarantee global convergence we formulate a trust-region problem based on the Newton step in the new variables: 0.5 * p_h^T B_h p + g_h^T p_h -> min, ||p_h|| <= Delta In the original space B = H + D^{-1} C D^{-1}, and the equivalent trust-region problem is 0.5 * p^T B p + g^T p -> min, ||D^{-1} p|| <= Delta Here, the meaning of the matrix D becomes more clear: it alters the shape of a trust-region, such that large steps towards the bounds are not allowed. In the implementation, the trust-region problem is solved in "hat" space, but handling of the bounds is done in the original space (see below and read the code). The introduction of the matrix D doesn't allow to ignore bounds, the algorithm must keep iterates strictly feasible (to satisfy aforementioned differentiability), the parameter theta controls step back from the boundary (see the code for details). The algorithm does another important trick. If the trust-region solution doesn't fit into the bounds, then a reflected (from a firstly encountered bound) search direction is considered. For motivation and analysis refer to [STIR]_ paper (and other papers of the authors). In practice, it doesn't need a lot of justifications, the algorithm simply chooses the best step among three: a constrained trust-region step, a reflected step and a constrained Cauchy step (a minimizer along -g_h in "hat" space, or -D^2 g in the original space). Another feature is that a trust-region radius control strategy is modified to account for appearance of the diagonal C matrix (called diag_h in the code). Note that all described peculiarities are completely gone as we consider problems without bounds (the algorithm becomes a standard trust-region type algorithm very similar to ones implemented in MINPACK). The implementation supports two methods of solving the trust-region problem. The first, called 'exact', applies SVD on Jacobian and then solves the problem very accurately using the algorithm described in [JJMore]_. It is not applicable to large problem. The second, called 'lsmr', uses the 2-D subspace approach (sometimes called "indefinite dogleg"), where the problem is solved in a subspace spanned by the gradient and the approximate Gauss-Newton step found by ``scipy.sparse.linalg.lsmr``. A 2-D trust-region problem is reformulated as a 4th order algebraic equation and solved very accurately by ``numpy.roots``. The subspace approach allows to solve very large problems (up to couple of millions of residuals on a regular PC), provided the Jacobian matrix is sufficiently sparse. References ---------- .. [STIR] Branch, M.A., T.F. Coleman, and Y. Li, "A Subspace, Interior, and Conjugate Gradient Method for Large-Scale Bound-Constrained Minimization Problems," SIAM Journal on Scientific Computing, Vol. 21, Number 1, pp 1-23, 1999. .. [JJMore] More, J. J., "The Levenberg-Marquardt Algorithm: Implementation and Theory," Numerical Analysis, ed. G. A. Watson, Lecture """ import numpy as np from numpy.linalg import norm from scipy.linalg import svd, qr from scipy.sparse.linalg import lsmr from scipy.optimize import OptimizeResult from .common import ( step_size_to_bound, find_active_constraints, in_bounds, make_strictly_feasible, intersect_trust_region, solve_lsq_trust_region, solve_trust_region_2d, minimize_quadratic_1d, build_quadratic_1d, evaluate_quadratic, right_multiplied_operator, regularized_lsq_operator, CL_scaling_vector, compute_grad, compute_jac_scale, check_termination, update_tr_radius, scale_for_robust_loss_function, print_header_nonlinear, print_iteration_nonlinear) def trf(fun, jac, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev, x_scale, loss_function, tr_solver, tr_options, verbose): # For efficiency, it makes sense to run the simplified version of the # algorithm when no bounds are imposed. We decided to write the two # separate functions. It violates the DRY principle, but the individual # functions are kept the most readable. if np.all(lb == -np.inf) and np.all(ub == np.inf): return trf_no_bounds( fun, jac, x0, f0, J0, ftol, xtol, gtol, max_nfev, x_scale, loss_function, tr_solver, tr_options, verbose) else: return trf_bounds( fun, jac, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev, x_scale, loss_function, tr_solver, tr_options, verbose) def select_step(x, J_h, diag_h, g_h, p, p_h, d, Delta, lb, ub, theta): """Select the best step according to Trust Region Reflective algorithm.""" if in_bounds(x + p, lb, ub): p_value = evaluate_quadratic(J_h, g_h, p_h, diag=diag_h) return p, p_h, -p_value p_stride, hits = step_size_to_bound(x, p, lb, ub) # Compute the reflected direction. r_h = np.copy(p_h) r_h[hits.astype(bool)] *= -1 r = d * r_h # Restrict trust-region step, such that it hits the bound. p *= p_stride p_h *= p_stride x_on_bound = x + p # Reflected direction will cross first either feasible region or trust # region boundary. _, to_tr = intersect_trust_region(p_h, r_h, Delta) to_bound, _ = step_size_to_bound(x_on_bound, r, lb, ub) # Find lower and upper bounds on a step size along the reflected # direction, considering the strict feasibility requirement. There is no # single correct way to do that, the chosen approach seems to work best # on test problems. r_stride = min(to_bound, to_tr) if r_stride > 0: r_stride_l = (1 - theta) * p_stride / r_stride if r_stride == to_bound: r_stride_u = theta * to_bound else: r_stride_u = to_tr else: r_stride_l = 0 r_stride_u = -1 # Check if reflection step is available. if r_stride_l <= r_stride_u: a, b, c = build_quadratic_1d(J_h, g_h, r_h, s0=p_h, diag=diag_h) r_stride, r_value = minimize_quadratic_1d( a, b, r_stride_l, r_stride_u, c=c) r_h *= r_stride r_h += p_h r = r_h * d else: r_value = np.inf # Now correct p_h to make it strictly interior. p *= theta p_h *= theta p_value = evaluate_quadratic(J_h, g_h, p_h, diag=diag_h) ag_h = -g_h ag = d * ag_h to_tr = Delta / norm(ag_h) to_bound, _ = step_size_to_bound(x, ag, lb, ub) if to_bound < to_tr: ag_stride = theta * to_bound else: ag_stride = to_tr a, b = build_quadratic_1d(J_h, g_h, ag_h, diag=diag_h) ag_stride, ag_value = minimize_quadratic_1d(a, b, 0, ag_stride) ag_h *= ag_stride ag *= ag_stride if p_value < r_value and p_value < ag_value: return p, p_h, -p_value elif r_value < p_value and r_value < ag_value: return r, r_h, -r_value else: return ag, ag_h, -ag_value def trf_bounds(fun, jac, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev, x_scale, loss_function, tr_solver, tr_options, verbose): x = x0.copy() f = f0 f_true = f.copy() nfev = 1 J = J0 njev = 1 m, n = J.shape if loss_function is not None: rho = loss_function(f) cost = 0.5 * np.sum(rho[0]) J, f = scale_for_robust_loss_function(J, f, rho) else: cost = 0.5 * np.dot(f, f) g = compute_grad(J, f) jac_scale = isinstance(x_scale, str) and x_scale == 'jac' if jac_scale: scale, scale_inv = compute_jac_scale(J) else: scale, scale_inv = x_scale, 1 / x_scale v, dv = CL_scaling_vector(x, g, lb, ub) v[dv != 0] *= scale_inv[dv != 0] Delta = norm(x0 * scale_inv / v**0.5) if Delta == 0: Delta = 1.0 g_norm = norm(g * v, ord=np.inf) f_augmented = np.zeros(m + n) if tr_solver == 'exact': J_augmented = np.empty((m + n, n)) elif tr_solver == 'lsmr': reg_term = 0.0 regularize = tr_options.pop('regularize', True) if max_nfev is None: max_nfev = x0.size * 100 alpha = 0.0 # "Levenberg-Marquardt" parameter termination_status = None iteration = 0 step_norm = None actual_reduction = None if verbose == 2: print_header_nonlinear() while True: v, dv = CL_scaling_vector(x, g, lb, ub) g_norm = norm(g * v, ord=np.inf) if g_norm < gtol: termination_status = 1 if verbose == 2: print_iteration_nonlinear(iteration, nfev, cost, actual_reduction, step_norm, g_norm) if termination_status is not None or nfev == max_nfev: break # Now compute variables in "hat" space. Here, we also account for # scaling introduced by `x_scale` parameter. This part is a bit tricky, # you have to write down the formulas and see how the trust-region # problem is formulated when the two types of scaling are applied. # The idea is that first we apply `x_scale` and then apply Coleman-Li # approach in the new variables. # v is recomputed in the variables after applying `x_scale`, note that # components which were identically 1 not affected. v[dv != 0] *= scale_inv[dv != 0] # Here, we apply two types of scaling. d = v**0.5 * scale # C = diag(g * scale) Jv diag_h = g * dv * scale # After all this has been done, we continue normally. # "hat" gradient. g_h = d * g f_augmented[:m] = f if tr_solver == 'exact': J_augmented[:m] = J * d J_h = J_augmented[:m] # Memory view. J_augmented[m:] = np.diag(diag_h**0.5) U, s, V = svd(J_augmented, full_matrices=False) V = V.T uf = U.T.dot(f_augmented) elif tr_solver == 'lsmr': J_h = right_multiplied_operator(J, d) if regularize: a, b = build_quadratic_1d(J_h, g_h, -g_h, diag=diag_h) to_tr = Delta / norm(g_h) ag_value = minimize_quadratic_1d(a, b, 0, to_tr)[1] reg_term = -ag_value / Delta**2 lsmr_op = regularized_lsq_operator(J_h, (diag_h + reg_term)**0.5) gn_h = lsmr(lsmr_op, f_augmented, **tr_options)[0] S = np.vstack((g_h, gn_h)).T S, _ = qr(S, mode='economic') JS = J_h.dot(S) # LinearOperator does dot too. B_S = np.dot(JS.T, JS) + np.dot(S.T * diag_h, S) g_S = S.T.dot(g_h) # theta controls step back step ratio from the bounds. theta = max(0.995, 1 - g_norm) actual_reduction = -1 while actual_reduction <= 0 and nfev < max_nfev: if tr_solver == 'exact': p_h, alpha, n_iter = solve_lsq_trust_region( n, m, uf, s, V, Delta, initial_alpha=alpha) elif tr_solver == 'lsmr': p_S, _ = solve_trust_region_2d(B_S, g_S, Delta) p_h = S.dot(p_S) p = d * p_h # Trust-region solution in the original space. step, step_h, predicted_reduction = select_step( x, J_h, diag_h, g_h, p, p_h, d, Delta, lb, ub, theta) x_new = make_strictly_feasible(x + step, lb, ub, rstep=0) f_new = fun(x_new) nfev += 1 step_h_norm = norm(step_h) if not np.all(np.isfinite(f_new)): Delta = 0.25 * step_h_norm continue # Usual trust-region step quality estimation. if loss_function is not None: cost_new = loss_function(f_new, cost_only=True) else: cost_new = 0.5 * np.dot(f_new, f_new) actual_reduction = cost - cost_new Delta_new, ratio = update_tr_radius( Delta, actual_reduction, predicted_reduction, step_h_norm, step_h_norm > 0.95 * Delta) step_norm = norm(step) termination_status = check_termination( actual_reduction, cost, step_norm, norm(x), ratio, ftol, xtol) if termination_status is not None: break alpha *= Delta / Delta_new Delta = Delta_new if actual_reduction > 0: x = x_new f = f_new f_true = f.copy() cost = cost_new J = jac(x, f) njev += 1 if loss_function is not None: rho = loss_function(f) J, f = scale_for_robust_loss_function(J, f, rho) g = compute_grad(J, f) if jac_scale: scale, scale_inv = compute_jac_scale(J, scale_inv) else: step_norm = 0 actual_reduction = 0 iteration += 1 if termination_status is None: termination_status = 0 active_mask = find_active_constraints(x, lb, ub, rtol=xtol) return OptimizeResult( x=x, cost=cost, fun=f_true, jac=J, grad=g, optimality=g_norm, active_mask=active_mask, nfev=nfev, njev=njev, status=termination_status) def trf_no_bounds(fun, jac, x0, f0, J0, ftol, xtol, gtol, max_nfev, x_scale, loss_function, tr_solver, tr_options, verbose): x = x0.copy() f = f0 f_true = f.copy() nfev = 1 J = J0 njev = 1 m, n = J.shape if loss_function is not None: rho = loss_function(f) cost = 0.5 * np.sum(rho[0]) J, f = scale_for_robust_loss_function(J, f, rho) else: cost = 0.5 * np.dot(f, f) g = compute_grad(J, f) jac_scale = isinstance(x_scale, str) and x_scale == 'jac' if jac_scale: scale, scale_inv = compute_jac_scale(J) else: scale, scale_inv = x_scale, 1 / x_scale Delta = norm(x0 * scale_inv) if Delta == 0: Delta = 1.0 if tr_solver == 'lsmr': reg_term = 0 damp = tr_options.pop('damp', 0.0) regularize = tr_options.pop('regularize', True) if max_nfev is None: max_nfev = x0.size * 100 alpha = 0.0 # "Levenberg-Marquardt" parameter termination_status = None iteration = 0 step_norm = None actual_reduction = None if verbose == 2: print_header_nonlinear() while True: g_norm = norm(g, ord=np.inf) if g_norm < gtol: termination_status = 1 if verbose == 2: print_iteration_nonlinear(iteration, nfev, cost, actual_reduction, step_norm, g_norm) if termination_status is not None or nfev == max_nfev: break d = scale g_h = d * g if tr_solver == 'exact': J_h = J * d U, s, V = svd(J_h, full_matrices=False) V = V.T uf = U.T.dot(f) elif tr_solver == 'lsmr': J_h = right_multiplied_operator(J, d) if regularize: a, b = build_quadratic_1d(J_h, g_h, -g_h) to_tr = Delta / norm(g_h) ag_value = minimize_quadratic_1d(a, b, 0, to_tr)[1] reg_term = -ag_value / Delta**2 damp_full = (damp**2 + reg_term)**0.5 gn_h = lsmr(J_h, f, damp=damp_full, **tr_options)[0] S = np.vstack((g_h, gn_h)).T S, _ = qr(S, mode='economic') JS = J_h.dot(S) B_S = np.dot(JS.T, JS) g_S = S.T.dot(g_h) actual_reduction = -1 while actual_reduction <= 0 and nfev < max_nfev: if tr_solver == 'exact': step_h, alpha, n_iter = solve_lsq_trust_region( n, m, uf, s, V, Delta, initial_alpha=alpha) elif tr_solver == 'lsmr': p_S, _ = solve_trust_region_2d(B_S, g_S, Delta) step_h = S.dot(p_S) predicted_reduction = -evaluate_quadratic(J_h, g_h, step_h) step = d * step_h x_new = x + step f_new = fun(x_new) nfev += 1 step_h_norm = norm(step_h) if not np.all(np.isfinite(f_new)): Delta = 0.25 * step_h_norm continue # Usual trust-region step quality estimation. if loss_function is not None: cost_new = loss_function(f_new, cost_only=True) else: cost_new = 0.5 * np.dot(f_new, f_new) actual_reduction = cost - cost_new Delta_new, ratio = update_tr_radius( Delta, actual_reduction, predicted_reduction, step_h_norm, step_h_norm > 0.95 * Delta) step_norm = norm(step) termination_status = check_termination( actual_reduction, cost, step_norm, norm(x), ratio, ftol, xtol) if termination_status is not None: break alpha *= Delta / Delta_new Delta = Delta_new if actual_reduction > 0: x = x_new f = f_new f_true = f.copy() cost = cost_new J = jac(x, f) njev += 1 if loss_function is not None: rho = loss_function(f) J, f = scale_for_robust_loss_function(J, f, rho) g = compute_grad(J, f) if jac_scale: scale, scale_inv = compute_jac_scale(J, scale_inv) else: step_norm = 0 actual_reduction = 0 iteration += 1 if termination_status is None: termination_status = 0 active_mask = np.zeros_like(x) return OptimizeResult( x=x, cost=cost, fun=f_true, jac=J, grad=g, optimality=g_norm, active_mask=active_mask, nfev=nfev, njev=njev, status=termination_status)
19,477
33.720143
83
py
scipy
scipy-main/scipy/optimize/_lsq/bvls.py
"""Bounded-variable least-squares algorithm.""" import numpy as np from numpy.linalg import norm, lstsq from scipy.optimize import OptimizeResult from .common import print_header_linear, print_iteration_linear def compute_kkt_optimality(g, on_bound): """Compute the maximum violation of KKT conditions.""" g_kkt = g * on_bound free_set = on_bound == 0 g_kkt[free_set] = np.abs(g[free_set]) return np.max(g_kkt) def bvls(A, b, x_lsq, lb, ub, tol, max_iter, verbose, rcond=None): m, n = A.shape x = x_lsq.copy() on_bound = np.zeros(n) mask = x <= lb x[mask] = lb[mask] on_bound[mask] = -1 mask = x >= ub x[mask] = ub[mask] on_bound[mask] = 1 free_set = on_bound == 0 active_set = ~free_set free_set, = np.nonzero(free_set) r = A.dot(x) - b cost = 0.5 * np.dot(r, r) initial_cost = cost g = A.T.dot(r) cost_change = None step_norm = None iteration = 0 if verbose == 2: print_header_linear() # This is the initialization loop. The requirement is that the # least-squares solution on free variables is feasible before BVLS starts. # One possible initialization is to set all variables to lower or upper # bounds, but many iterations may be required from this state later on. # The implemented ad-hoc procedure which intuitively should give a better # initial state: find the least-squares solution on current free variables, # if its feasible then stop, otherwise, set violating variables to # corresponding bounds and continue on the reduced set of free variables. while free_set.size > 0: if verbose == 2: optimality = compute_kkt_optimality(g, on_bound) print_iteration_linear(iteration, cost, cost_change, step_norm, optimality) iteration += 1 x_free_old = x[free_set].copy() A_free = A[:, free_set] b_free = b - A.dot(x * active_set) z = lstsq(A_free, b_free, rcond=rcond)[0] lbv = z < lb[free_set] ubv = z > ub[free_set] v = lbv | ubv if np.any(lbv): ind = free_set[lbv] x[ind] = lb[ind] active_set[ind] = True on_bound[ind] = -1 if np.any(ubv): ind = free_set[ubv] x[ind] = ub[ind] active_set[ind] = True on_bound[ind] = 1 ind = free_set[~v] x[ind] = z[~v] r = A.dot(x) - b cost_new = 0.5 * np.dot(r, r) cost_change = cost - cost_new cost = cost_new g = A.T.dot(r) step_norm = norm(x[free_set] - x_free_old) if np.any(v): free_set = free_set[~v] else: break if max_iter is None: max_iter = n max_iter += iteration termination_status = None # Main BVLS loop. optimality = compute_kkt_optimality(g, on_bound) for iteration in range(iteration, max_iter): # BVLS Loop A if verbose == 2: print_iteration_linear(iteration, cost, cost_change, step_norm, optimality) if optimality < tol: termination_status = 1 if termination_status is not None: break move_to_free = np.argmax(g * on_bound) on_bound[move_to_free] = 0 while True: # BVLS Loop B free_set = on_bound == 0 active_set = ~free_set free_set, = np.nonzero(free_set) x_free = x[free_set] x_free_old = x_free.copy() lb_free = lb[free_set] ub_free = ub[free_set] A_free = A[:, free_set] b_free = b - A.dot(x * active_set) z = lstsq(A_free, b_free, rcond=rcond)[0] lbv, = np.nonzero(z < lb_free) ubv, = np.nonzero(z > ub_free) v = np.hstack((lbv, ubv)) if v.size > 0: alphas = np.hstack(( lb_free[lbv] - x_free[lbv], ub_free[ubv] - x_free[ubv])) / (z[v] - x_free[v]) i = np.argmin(alphas) i_free = v[i] alpha = alphas[i] x_free *= 1 - alpha x_free += alpha * z x[free_set] = x_free if i < lbv.size: on_bound[free_set[i_free]] = -1 else: on_bound[free_set[i_free]] = 1 else: x_free = z x[free_set] = x_free break step_norm = norm(x_free - x_free_old) r = A.dot(x) - b cost_new = 0.5 * np.dot(r, r) cost_change = cost - cost_new if cost_change < tol * cost: termination_status = 2 cost = cost_new g = A.T.dot(r) optimality = compute_kkt_optimality(g, on_bound) if termination_status is None: termination_status = 0 return OptimizeResult( x=x, fun=r, cost=cost, optimality=optimality, active_mask=on_bound, nit=iteration + 1, status=termination_status, initial_cost=initial_cost)
5,195
27.23913
79
py
scipy
scipy-main/scipy/optimize/_lsq/common.py
"""Functions used by least-squares algorithms.""" from math import copysign import numpy as np from numpy.linalg import norm from scipy.linalg import cho_factor, cho_solve, LinAlgError from scipy.sparse import issparse from scipy.sparse.linalg import LinearOperator, aslinearoperator EPS = np.finfo(float).eps # Functions related to a trust-region problem. def intersect_trust_region(x, s, Delta): """Find the intersection of a line with the boundary of a trust region. This function solves the quadratic equation with respect to t ||(x + s*t)||**2 = Delta**2. Returns ------- t_neg, t_pos : tuple of float Negative and positive roots. Raises ------ ValueError If `s` is zero or `x` is not within the trust region. """ a = np.dot(s, s) if a == 0: raise ValueError("`s` is zero.") b = np.dot(x, s) c = np.dot(x, x) - Delta**2 if c > 0: raise ValueError("`x` is not within the trust region.") d = np.sqrt(b*b - a*c) # Root from one fourth of the discriminant. # Computations below avoid loss of significance, see "Numerical Recipes". q = -(b + copysign(d, b)) t1 = q / a t2 = c / q if t1 < t2: return t1, t2 else: return t2, t1 def solve_lsq_trust_region(n, m, uf, s, V, Delta, initial_alpha=None, rtol=0.01, max_iter=10): """Solve a trust-region problem arising in least-squares minimization. This function implements a method described by J. J. More [1]_ and used in MINPACK, but it relies on a single SVD of Jacobian instead of series of Cholesky decompositions. Before running this function, compute: ``U, s, VT = svd(J, full_matrices=False)``. Parameters ---------- n : int Number of variables. m : int Number of residuals. uf : ndarray Computed as U.T.dot(f). s : ndarray Singular values of J. V : ndarray Transpose of VT. Delta : float Radius of a trust region. initial_alpha : float, optional Initial guess for alpha, which might be available from a previous iteration. If None, determined automatically. rtol : float, optional Stopping tolerance for the root-finding procedure. Namely, the solution ``p`` will satisfy ``abs(norm(p) - Delta) < rtol * Delta``. max_iter : int, optional Maximum allowed number of iterations for the root-finding procedure. Returns ------- p : ndarray, shape (n,) Found solution of a trust-region problem. alpha : float Positive value such that (J.T*J + alpha*I)*p = -J.T*f. Sometimes called Levenberg-Marquardt parameter. n_iter : int Number of iterations made by root-finding procedure. Zero means that Gauss-Newton step was selected as the solution. References ---------- .. [1] More, J. J., "The Levenberg-Marquardt Algorithm: Implementation and Theory," Numerical Analysis, ed. G. A. Watson, Lecture Notes in Mathematics 630, Springer Verlag, pp. 105-116, 1977. """ def phi_and_derivative(alpha, suf, s, Delta): """Function of which to find zero. It is defined as "norm of regularized (by alpha) least-squares solution minus `Delta`". Refer to [1]_. """ denom = s**2 + alpha p_norm = norm(suf / denom) phi = p_norm - Delta phi_prime = -np.sum(suf ** 2 / denom**3) / p_norm return phi, phi_prime suf = s * uf # Check if J has full rank and try Gauss-Newton step. if m >= n: threshold = EPS * m * s[0] full_rank = s[-1] > threshold else: full_rank = False if full_rank: p = -V.dot(uf / s) if norm(p) <= Delta: return p, 0.0, 0 alpha_upper = norm(suf) / Delta if full_rank: phi, phi_prime = phi_and_derivative(0.0, suf, s, Delta) alpha_lower = -phi / phi_prime else: alpha_lower = 0.0 if initial_alpha is None or not full_rank and initial_alpha == 0: alpha = max(0.001 * alpha_upper, (alpha_lower * alpha_upper)**0.5) else: alpha = initial_alpha for it in range(max_iter): if alpha < alpha_lower or alpha > alpha_upper: alpha = max(0.001 * alpha_upper, (alpha_lower * alpha_upper)**0.5) phi, phi_prime = phi_and_derivative(alpha, suf, s, Delta) if phi < 0: alpha_upper = alpha ratio = phi / phi_prime alpha_lower = max(alpha_lower, alpha - ratio) alpha -= (phi + Delta) * ratio / Delta if np.abs(phi) < rtol * Delta: break p = -V.dot(suf / (s**2 + alpha)) # Make the norm of p equal to Delta, p is changed only slightly during # this. It is done to prevent p lie outside the trust region (which can # cause problems later). p *= Delta / norm(p) return p, alpha, it + 1 def solve_trust_region_2d(B, g, Delta): """Solve a general trust-region problem in 2 dimensions. The problem is reformulated as a 4th order algebraic equation, the solution of which is found by numpy.roots. Parameters ---------- B : ndarray, shape (2, 2) Symmetric matrix, defines a quadratic term of the function. g : ndarray, shape (2,) Defines a linear term of the function. Delta : float Radius of a trust region. Returns ------- p : ndarray, shape (2,) Found solution. newton_step : bool Whether the returned solution is the Newton step which lies within the trust region. """ try: R, lower = cho_factor(B) p = -cho_solve((R, lower), g) if np.dot(p, p) <= Delta**2: return p, True except LinAlgError: pass a = B[0, 0] * Delta**2 b = B[0, 1] * Delta**2 c = B[1, 1] * Delta**2 d = g[0] * Delta f = g[1] * Delta coeffs = np.array( [-b + d, 2 * (a - c + f), 6 * b, 2 * (-a + c + f), -b - d]) t = np.roots(coeffs) # Can handle leading zeros. t = np.real(t[np.isreal(t)]) p = Delta * np.vstack((2 * t / (1 + t**2), (1 - t**2) / (1 + t**2))) value = 0.5 * np.sum(p * B.dot(p), axis=0) + np.dot(g, p) i = np.argmin(value) p = p[:, i] return p, False def update_tr_radius(Delta, actual_reduction, predicted_reduction, step_norm, bound_hit): """Update the radius of a trust region based on the cost reduction. Returns ------- Delta : float New radius. ratio : float Ratio between actual and predicted reductions. """ if predicted_reduction > 0: ratio = actual_reduction / predicted_reduction elif predicted_reduction == actual_reduction == 0: ratio = 1 else: ratio = 0 if ratio < 0.25: Delta = 0.25 * step_norm elif ratio > 0.75 and bound_hit: Delta *= 2.0 return Delta, ratio # Construction and minimization of quadratic functions. def build_quadratic_1d(J, g, s, diag=None, s0=None): """Parameterize a multivariate quadratic function along a line. The resulting univariate quadratic function is given as follows:: f(t) = 0.5 * (s0 + s*t).T * (J.T*J + diag) * (s0 + s*t) + g.T * (s0 + s*t) Parameters ---------- J : ndarray, sparse matrix or LinearOperator shape (m, n) Jacobian matrix, affects the quadratic term. g : ndarray, shape (n,) Gradient, defines the linear term. s : ndarray, shape (n,) Direction vector of a line. diag : None or ndarray with shape (n,), optional Addition diagonal part, affects the quadratic term. If None, assumed to be 0. s0 : None or ndarray with shape (n,), optional Initial point. If None, assumed to be 0. Returns ------- a : float Coefficient for t**2. b : float Coefficient for t. c : float Free term. Returned only if `s0` is provided. """ v = J.dot(s) a = np.dot(v, v) if diag is not None: a += np.dot(s * diag, s) a *= 0.5 b = np.dot(g, s) if s0 is not None: u = J.dot(s0) b += np.dot(u, v) c = 0.5 * np.dot(u, u) + np.dot(g, s0) if diag is not None: b += np.dot(s0 * diag, s) c += 0.5 * np.dot(s0 * diag, s0) return a, b, c else: return a, b def minimize_quadratic_1d(a, b, lb, ub, c=0): """Minimize a 1-D quadratic function subject to bounds. The free term `c` is 0 by default. Bounds must be finite. Returns ------- t : float Minimum point. y : float Minimum value. """ t = [lb, ub] if a != 0: extremum = -0.5 * b / a if lb < extremum < ub: t.append(extremum) t = np.asarray(t) y = t * (a * t + b) + c min_index = np.argmin(y) return t[min_index], y[min_index] def evaluate_quadratic(J, g, s, diag=None): """Compute values of a quadratic function arising in least squares. The function is 0.5 * s.T * (J.T * J + diag) * s + g.T * s. Parameters ---------- J : ndarray, sparse matrix or LinearOperator, shape (m, n) Jacobian matrix, affects the quadratic term. g : ndarray, shape (n,) Gradient, defines the linear term. s : ndarray, shape (k, n) or (n,) Array containing steps as rows. diag : ndarray, shape (n,), optional Addition diagonal part, affects the quadratic term. If None, assumed to be 0. Returns ------- values : ndarray with shape (k,) or float Values of the function. If `s` was 2-D, then ndarray is returned, otherwise, float is returned. """ if s.ndim == 1: Js = J.dot(s) q = np.dot(Js, Js) if diag is not None: q += np.dot(s * diag, s) else: Js = J.dot(s.T) q = np.sum(Js**2, axis=0) if diag is not None: q += np.sum(diag * s**2, axis=1) l = np.dot(s, g) return 0.5 * q + l # Utility functions to work with bound constraints. def in_bounds(x, lb, ub): """Check if a point lies within bounds.""" return np.all((x >= lb) & (x <= ub)) def step_size_to_bound(x, s, lb, ub): """Compute a min_step size required to reach a bound. The function computes a positive scalar t, such that x + s * t is on the bound. Returns ------- step : float Computed step. Non-negative value. hits : ndarray of int with shape of x Each element indicates whether a corresponding variable reaches the bound: * 0 - the bound was not hit. * -1 - the lower bound was hit. * 1 - the upper bound was hit. """ non_zero = np.nonzero(s) s_non_zero = s[non_zero] steps = np.empty_like(x) steps.fill(np.inf) with np.errstate(over='ignore'): steps[non_zero] = np.maximum((lb - x)[non_zero] / s_non_zero, (ub - x)[non_zero] / s_non_zero) min_step = np.min(steps) return min_step, np.equal(steps, min_step) * np.sign(s).astype(int) def find_active_constraints(x, lb, ub, rtol=1e-10): """Determine which constraints are active in a given point. The threshold is computed using `rtol` and the absolute value of the closest bound. Returns ------- active : ndarray of int with shape of x Each component shows whether the corresponding constraint is active: * 0 - a constraint is not active. * -1 - a lower bound is active. * 1 - a upper bound is active. """ active = np.zeros_like(x, dtype=int) if rtol == 0: active[x <= lb] = -1 active[x >= ub] = 1 return active lower_dist = x - lb upper_dist = ub - x lower_threshold = rtol * np.maximum(1, np.abs(lb)) upper_threshold = rtol * np.maximum(1, np.abs(ub)) lower_active = (np.isfinite(lb) & (lower_dist <= np.minimum(upper_dist, lower_threshold))) active[lower_active] = -1 upper_active = (np.isfinite(ub) & (upper_dist <= np.minimum(lower_dist, upper_threshold))) active[upper_active] = 1 return active def make_strictly_feasible(x, lb, ub, rstep=1e-10): """Shift a point to the interior of a feasible region. Each element of the returned vector is at least at a relative distance `rstep` from the closest bound. If ``rstep=0`` then `np.nextafter` is used. """ x_new = x.copy() active = find_active_constraints(x, lb, ub, rstep) lower_mask = np.equal(active, -1) upper_mask = np.equal(active, 1) if rstep == 0: x_new[lower_mask] = np.nextafter(lb[lower_mask], ub[lower_mask]) x_new[upper_mask] = np.nextafter(ub[upper_mask], lb[upper_mask]) else: x_new[lower_mask] = (lb[lower_mask] + rstep * np.maximum(1, np.abs(lb[lower_mask]))) x_new[upper_mask] = (ub[upper_mask] - rstep * np.maximum(1, np.abs(ub[upper_mask]))) tight_bounds = (x_new < lb) | (x_new > ub) x_new[tight_bounds] = 0.5 * (lb[tight_bounds] + ub[tight_bounds]) return x_new def CL_scaling_vector(x, g, lb, ub): """Compute Coleman-Li scaling vector and its derivatives. Components of a vector v are defined as follows:: | ub[i] - x[i], if g[i] < 0 and ub[i] < np.inf v[i] = | x[i] - lb[i], if g[i] > 0 and lb[i] > -np.inf | 1, otherwise According to this definition v[i] >= 0 for all i. It differs from the definition in paper [1]_ (eq. (2.2)), where the absolute value of v is used. Both definitions are equivalent down the line. Derivatives of v with respect to x take value 1, -1 or 0 depending on a case. Returns ------- v : ndarray with shape of x Scaling vector. dv : ndarray with shape of x Derivatives of v[i] with respect to x[i], diagonal elements of v's Jacobian. References ---------- .. [1] M.A. Branch, T.F. Coleman, and Y. Li, "A Subspace, Interior, and Conjugate Gradient Method for Large-Scale Bound-Constrained Minimization Problems," SIAM Journal on Scientific Computing, Vol. 21, Number 1, pp 1-23, 1999. """ v = np.ones_like(x) dv = np.zeros_like(x) mask = (g < 0) & np.isfinite(ub) v[mask] = ub[mask] - x[mask] dv[mask] = -1 mask = (g > 0) & np.isfinite(lb) v[mask] = x[mask] - lb[mask] dv[mask] = 1 return v, dv def reflective_transformation(y, lb, ub): """Compute reflective transformation and its gradient.""" if in_bounds(y, lb, ub): return y, np.ones_like(y) lb_finite = np.isfinite(lb) ub_finite = np.isfinite(ub) x = y.copy() g_negative = np.zeros_like(y, dtype=bool) mask = lb_finite & ~ub_finite x[mask] = np.maximum(y[mask], 2 * lb[mask] - y[mask]) g_negative[mask] = y[mask] < lb[mask] mask = ~lb_finite & ub_finite x[mask] = np.minimum(y[mask], 2 * ub[mask] - y[mask]) g_negative[mask] = y[mask] > ub[mask] mask = lb_finite & ub_finite d = ub - lb t = np.remainder(y[mask] - lb[mask], 2 * d[mask]) x[mask] = lb[mask] + np.minimum(t, 2 * d[mask] - t) g_negative[mask] = t > d[mask] g = np.ones_like(y) g[g_negative] = -1 return x, g # Functions to display algorithm's progress. def print_header_nonlinear(): print("{:^15}{:^15}{:^15}{:^15}{:^15}{:^15}" .format("Iteration", "Total nfev", "Cost", "Cost reduction", "Step norm", "Optimality")) def print_iteration_nonlinear(iteration, nfev, cost, cost_reduction, step_norm, optimality): if cost_reduction is None: cost_reduction = " " * 15 else: cost_reduction = f"{cost_reduction:^15.2e}" if step_norm is None: step_norm = " " * 15 else: step_norm = f"{step_norm:^15.2e}" print("{:^15}{:^15}{:^15.4e}{}{}{:^15.2e}" .format(iteration, nfev, cost, cost_reduction, step_norm, optimality)) def print_header_linear(): print("{:^15}{:^15}{:^15}{:^15}{:^15}" .format("Iteration", "Cost", "Cost reduction", "Step norm", "Optimality")) def print_iteration_linear(iteration, cost, cost_reduction, step_norm, optimality): if cost_reduction is None: cost_reduction = " " * 15 else: cost_reduction = f"{cost_reduction:^15.2e}" if step_norm is None: step_norm = " " * 15 else: step_norm = f"{step_norm:^15.2e}" print("{:^15}{:^15.4e}{}{}{:^15.2e}".format( iteration, cost, cost_reduction, step_norm, optimality)) # Simple helper functions. def compute_grad(J, f): """Compute gradient of the least-squares cost function.""" if isinstance(J, LinearOperator): return J.rmatvec(f) else: return J.T.dot(f) def compute_jac_scale(J, scale_inv_old=None): """Compute variables scale based on the Jacobian matrix.""" if issparse(J): scale_inv = np.asarray(J.power(2).sum(axis=0)).ravel()**0.5 else: scale_inv = np.sum(J**2, axis=0)**0.5 if scale_inv_old is None: scale_inv[scale_inv == 0] = 1 else: scale_inv = np.maximum(scale_inv, scale_inv_old) return 1 / scale_inv, scale_inv def left_multiplied_operator(J, d): """Return diag(d) J as LinearOperator.""" J = aslinearoperator(J) def matvec(x): return d * J.matvec(x) def matmat(X): return d[:, np.newaxis] * J.matmat(X) def rmatvec(x): return J.rmatvec(x.ravel() * d) return LinearOperator(J.shape, matvec=matvec, matmat=matmat, rmatvec=rmatvec) def right_multiplied_operator(J, d): """Return J diag(d) as LinearOperator.""" J = aslinearoperator(J) def matvec(x): return J.matvec(np.ravel(x) * d) def matmat(X): return J.matmat(X * d[:, np.newaxis]) def rmatvec(x): return d * J.rmatvec(x) return LinearOperator(J.shape, matvec=matvec, matmat=matmat, rmatvec=rmatvec) def regularized_lsq_operator(J, diag): """Return a matrix arising in regularized least squares as LinearOperator. The matrix is [ J ] [ D ] where D is diagonal matrix with elements from `diag`. """ J = aslinearoperator(J) m, n = J.shape def matvec(x): return np.hstack((J.matvec(x), diag * x)) def rmatvec(x): x1 = x[:m] x2 = x[m:] return J.rmatvec(x1) + diag * x2 return LinearOperator((m + n, n), matvec=matvec, rmatvec=rmatvec) def right_multiply(J, d, copy=True): """Compute J diag(d). If `copy` is False, `J` is modified in place (unless being LinearOperator). """ if copy and not isinstance(J, LinearOperator): J = J.copy() if issparse(J): J.data *= d.take(J.indices, mode='clip') # scikit-learn recipe. elif isinstance(J, LinearOperator): J = right_multiplied_operator(J, d) else: J *= d return J def left_multiply(J, d, copy=True): """Compute diag(d) J. If `copy` is False, `J` is modified in place (unless being LinearOperator). """ if copy and not isinstance(J, LinearOperator): J = J.copy() if issparse(J): J.data *= np.repeat(d, np.diff(J.indptr)) # scikit-learn recipe. elif isinstance(J, LinearOperator): J = left_multiplied_operator(J, d) else: J *= d[:, np.newaxis] return J def check_termination(dF, F, dx_norm, x_norm, ratio, ftol, xtol): """Check termination condition for nonlinear least squares.""" ftol_satisfied = dF < ftol * F and ratio > 0.25 xtol_satisfied = dx_norm < xtol * (xtol + x_norm) if ftol_satisfied and xtol_satisfied: return 4 elif ftol_satisfied: return 2 elif xtol_satisfied: return 3 else: return None def scale_for_robust_loss_function(J, f, rho): """Scale Jacobian and residuals for a robust loss function. Arrays are modified in place. """ J_scale = rho[1] + 2 * rho[2] * f**2 J_scale[J_scale < EPS] = EPS J_scale **= 0.5 f *= rho[1] / J_scale return left_multiply(J, J_scale, copy=False), f
20,548
26.957823
79
py
scipy
scipy-main/scipy/optimize/_lsq/__init__.py
"""This module contains least-squares algorithms.""" from .least_squares import least_squares from .lsq_linear import lsq_linear __all__ = ['least_squares', 'lsq_linear']
172
27.833333
52
py
scipy
scipy-main/scipy/optimize/_lsq/trf_linear.py
"""The adaptation of Trust Region Reflective algorithm for a linear least-squares problem.""" import numpy as np from numpy.linalg import norm from scipy.linalg import qr, solve_triangular from scipy.sparse.linalg import lsmr from scipy.optimize import OptimizeResult from .givens_elimination import givens_elimination from .common import ( EPS, step_size_to_bound, find_active_constraints, in_bounds, make_strictly_feasible, build_quadratic_1d, evaluate_quadratic, minimize_quadratic_1d, CL_scaling_vector, reflective_transformation, print_header_linear, print_iteration_linear, compute_grad, regularized_lsq_operator, right_multiplied_operator) def regularized_lsq_with_qr(m, n, R, QTb, perm, diag, copy_R=True): """Solve regularized least squares using information from QR-decomposition. The initial problem is to solve the following system in a least-squares sense:: A x = b D x = 0 where D is diagonal matrix. The method is based on QR decomposition of the form A P = Q R, where P is a column permutation matrix, Q is an orthogonal matrix and R is an upper triangular matrix. Parameters ---------- m, n : int Initial shape of A. R : ndarray, shape (n, n) Upper triangular matrix from QR decomposition of A. QTb : ndarray, shape (n,) First n components of Q^T b. perm : ndarray, shape (n,) Array defining column permutation of A, such that ith column of P is perm[i]-th column of identity matrix. diag : ndarray, shape (n,) Array containing diagonal elements of D. Returns ------- x : ndarray, shape (n,) Found least-squares solution. """ if copy_R: R = R.copy() v = QTb.copy() givens_elimination(R, v, diag[perm]) abs_diag_R = np.abs(np.diag(R)) threshold = EPS * max(m, n) * np.max(abs_diag_R) nns, = np.nonzero(abs_diag_R > threshold) R = R[np.ix_(nns, nns)] v = v[nns] x = np.zeros(n) x[perm[nns]] = solve_triangular(R, v) return x def backtracking(A, g, x, p, theta, p_dot_g, lb, ub): """Find an appropriate step size using backtracking line search.""" alpha = 1 while True: x_new, _ = reflective_transformation(x + alpha * p, lb, ub) step = x_new - x cost_change = -evaluate_quadratic(A, g, step) if cost_change > -0.1 * alpha * p_dot_g: break alpha *= 0.5 active = find_active_constraints(x_new, lb, ub) if np.any(active != 0): x_new, _ = reflective_transformation(x + theta * alpha * p, lb, ub) x_new = make_strictly_feasible(x_new, lb, ub, rstep=0) step = x_new - x cost_change = -evaluate_quadratic(A, g, step) return x, step, cost_change def select_step(x, A_h, g_h, c_h, p, p_h, d, lb, ub, theta): """Select the best step according to Trust Region Reflective algorithm.""" if in_bounds(x + p, lb, ub): return p p_stride, hits = step_size_to_bound(x, p, lb, ub) r_h = np.copy(p_h) r_h[hits.astype(bool)] *= -1 r = d * r_h # Restrict step, such that it hits the bound. p *= p_stride p_h *= p_stride x_on_bound = x + p # Find the step size along reflected direction. r_stride_u, _ = step_size_to_bound(x_on_bound, r, lb, ub) # Stay interior. r_stride_l = (1 - theta) * r_stride_u r_stride_u *= theta if r_stride_u > 0: a, b, c = build_quadratic_1d(A_h, g_h, r_h, s0=p_h, diag=c_h) r_stride, r_value = minimize_quadratic_1d( a, b, r_stride_l, r_stride_u, c=c) r_h = p_h + r_h * r_stride r = d * r_h else: r_value = np.inf # Now correct p_h to make it strictly interior. p_h *= theta p *= theta p_value = evaluate_quadratic(A_h, g_h, p_h, diag=c_h) ag_h = -g_h ag = d * ag_h ag_stride_u, _ = step_size_to_bound(x, ag, lb, ub) ag_stride_u *= theta a, b = build_quadratic_1d(A_h, g_h, ag_h, diag=c_h) ag_stride, ag_value = minimize_quadratic_1d(a, b, 0, ag_stride_u) ag *= ag_stride if p_value < r_value and p_value < ag_value: return p elif r_value < p_value and r_value < ag_value: return r else: return ag def trf_linear(A, b, x_lsq, lb, ub, tol, lsq_solver, lsmr_tol, max_iter, verbose, *, lsmr_maxiter=None): m, n = A.shape x, _ = reflective_transformation(x_lsq, lb, ub) x = make_strictly_feasible(x, lb, ub, rstep=0.1) if lsq_solver == 'exact': QT, R, perm = qr(A, mode='economic', pivoting=True) QT = QT.T if m < n: R = np.vstack((R, np.zeros((n - m, n)))) QTr = np.zeros(n) k = min(m, n) elif lsq_solver == 'lsmr': r_aug = np.zeros(m + n) auto_lsmr_tol = False if lsmr_tol is None: lsmr_tol = 1e-2 * tol elif lsmr_tol == 'auto': auto_lsmr_tol = True r = A.dot(x) - b g = compute_grad(A, r) cost = 0.5 * np.dot(r, r) initial_cost = cost termination_status = None step_norm = None cost_change = None if max_iter is None: max_iter = 100 if verbose == 2: print_header_linear() for iteration in range(max_iter): v, dv = CL_scaling_vector(x, g, lb, ub) g_scaled = g * v g_norm = norm(g_scaled, ord=np.inf) if g_norm < tol: termination_status = 1 if verbose == 2: print_iteration_linear(iteration, cost, cost_change, step_norm, g_norm) if termination_status is not None: break diag_h = g * dv diag_root_h = diag_h ** 0.5 d = v ** 0.5 g_h = d * g A_h = right_multiplied_operator(A, d) if lsq_solver == 'exact': QTr[:k] = QT.dot(r) p_h = -regularized_lsq_with_qr(m, n, R * d[perm], QTr, perm, diag_root_h, copy_R=False) elif lsq_solver == 'lsmr': lsmr_op = regularized_lsq_operator(A_h, diag_root_h) r_aug[:m] = r if auto_lsmr_tol: eta = 1e-2 * min(0.5, g_norm) lsmr_tol = max(EPS, min(0.1, eta * g_norm)) p_h = -lsmr(lsmr_op, r_aug, maxiter=lsmr_maxiter, atol=lsmr_tol, btol=lsmr_tol)[0] p = d * p_h p_dot_g = np.dot(p, g) if p_dot_g > 0: termination_status = -1 theta = 1 - min(0.005, g_norm) step = select_step(x, A_h, g_h, diag_h, p, p_h, d, lb, ub, theta) cost_change = -evaluate_quadratic(A, g, step) # Perhaps almost never executed, the idea is that `p` is descent # direction thus we must find acceptable cost decrease using simple # "backtracking", otherwise the algorithm's logic would break. if cost_change < 0: x, step, cost_change = backtracking( A, g, x, p, theta, p_dot_g, lb, ub) else: x = make_strictly_feasible(x + step, lb, ub, rstep=0) step_norm = norm(step) r = A.dot(x) - b g = compute_grad(A, r) if cost_change < tol * cost: termination_status = 2 cost = 0.5 * np.dot(r, r) if termination_status is None: termination_status = 0 active_mask = find_active_constraints(x, lb, ub, rtol=tol) return OptimizeResult( x=x, fun=r, cost=cost, optimality=g_norm, active_mask=active_mask, nit=iteration + 1, status=termination_status, initial_cost=initial_cost)
7,642
29.572
79
py
scipy
scipy-main/scipy/signal/_filter_design.py
"""Filter design.""" import math import operator import warnings import numpy import numpy as np from numpy import (atleast_1d, poly, polyval, roots, real, asarray, resize, pi, absolute, sqrt, tan, log10, arcsinh, sin, exp, cosh, arccosh, ceil, conjugate, zeros, sinh, append, concatenate, prod, ones, full, array, mintypecode) from numpy.polynomial.polynomial import polyval as npp_polyval from numpy.polynomial.polynomial import polyvalfromroots from scipy import special, optimize, fft as sp_fft from scipy.special import comb from scipy._lib._util import float_factorial __all__ = ['findfreqs', 'freqs', 'freqz', 'tf2zpk', 'zpk2tf', 'normalize', 'lp2lp', 'lp2hp', 'lp2bp', 'lp2bs', 'bilinear', 'iirdesign', 'iirfilter', 'butter', 'cheby1', 'cheby2', 'ellip', 'bessel', 'band_stop_obj', 'buttord', 'cheb1ord', 'cheb2ord', 'ellipord', 'buttap', 'cheb1ap', 'cheb2ap', 'ellipap', 'besselap', 'BadCoefficients', 'freqs_zpk', 'freqz_zpk', 'tf2sos', 'sos2tf', 'zpk2sos', 'sos2zpk', 'group_delay', 'sosfreqz', 'iirnotch', 'iirpeak', 'bilinear_zpk', 'lp2lp_zpk', 'lp2hp_zpk', 'lp2bp_zpk', 'lp2bs_zpk', 'gammatone', 'iircomb'] class BadCoefficients(UserWarning): """Warning about badly conditioned filter coefficients""" pass abs = absolute def _is_int_type(x): """ Check if input is of a scalar integer type (so ``5`` and ``array(5)`` will pass, while ``5.0`` and ``array([5])`` will fail. """ if np.ndim(x) != 0: # Older versions of NumPy did not raise for np.array([1]).__index__() # This is safe to remove when support for those versions is dropped return False try: operator.index(x) except TypeError: return False else: return True def findfreqs(num, den, N, kind='ba'): """ Find array of frequencies for computing the response of an analog filter. Parameters ---------- num, den : array_like, 1-D The polynomial coefficients of the numerator and denominator of the transfer function of the filter or LTI system, where the coefficients are ordered from highest to lowest degree. Or, the roots of the transfer function numerator and denominator (i.e., zeroes and poles). N : int The length of the array to be computed. kind : str {'ba', 'zp'}, optional Specifies whether the numerator and denominator are specified by their polynomial coefficients ('ba'), or their roots ('zp'). Returns ------- w : (N,) ndarray A 1-D array of frequencies, logarithmically spaced. Examples -------- Find a set of nine frequencies that span the "interesting part" of the frequency response for the filter with the transfer function H(s) = s / (s^2 + 8s + 25) >>> from scipy import signal >>> signal.findfreqs([1, 0], [1, 8, 25], N=9) array([ 1.00000000e-02, 3.16227766e-02, 1.00000000e-01, 3.16227766e-01, 1.00000000e+00, 3.16227766e+00, 1.00000000e+01, 3.16227766e+01, 1.00000000e+02]) """ if kind == 'ba': ep = atleast_1d(roots(den)) + 0j tz = atleast_1d(roots(num)) + 0j elif kind == 'zp': ep = atleast_1d(den) + 0j tz = atleast_1d(num) + 0j else: raise ValueError("input must be one of {'ba', 'zp'}") if len(ep) == 0: ep = atleast_1d(-1000) + 0j ez = np.r_[ep[ep.imag >= 0], tz[(np.abs(tz) < 1e5) & (tz.imag >= 0)]] integ = np.abs(ez) < 1e-10 hfreq = np.round(np.log10(np.max(3 * np.abs(ez.real + integ) + 1.5 * ez.imag)) + 0.5) lfreq = np.round(np.log10(0.1 * np.min(np.abs((ez + integ).real) + 2 * ez.imag)) - 0.5) w = np.logspace(lfreq, hfreq, N) return w def freqs(b, a, worN=200, plot=None): """ Compute frequency response of analog filter. Given the M-order numerator `b` and N-order denominator `a` of an analog filter, compute its frequency response:: b[0]*(jw)**M + b[1]*(jw)**(M-1) + ... + b[M] H(w) = ---------------------------------------------- a[0]*(jw)**N + a[1]*(jw)**(N-1) + ... + a[N] Parameters ---------- b : array_like Numerator of a linear filter. a : array_like Denominator of a linear filter. worN : {None, int, array_like}, optional If None, then compute at 200 frequencies around the interesting parts of the response curve (determined by pole-zero locations). If a single integer, then compute at that many frequencies. Otherwise, compute the response at the angular frequencies (e.g., rad/s) given in `worN`. plot : callable, optional A callable that takes two arguments. If given, the return parameters `w` and `h` are passed to plot. Useful for plotting the frequency response inside `freqs`. Returns ------- w : ndarray The angular frequencies at which `h` was computed. h : ndarray The frequency response. See Also -------- freqz : Compute the frequency response of a digital filter. Notes ----- Using Matplotlib's "plot" function as the callable for `plot` produces unexpected results, this plots the real part of the complex transfer function, not the magnitude. Try ``lambda w, h: plot(w, abs(h))``. Examples -------- >>> from scipy.signal import freqs, iirfilter >>> import numpy as np >>> b, a = iirfilter(4, [1, 10], 1, 60, analog=True, ftype='cheby1') >>> w, h = freqs(b, a, worN=np.logspace(-1, 2, 1000)) >>> import matplotlib.pyplot as plt >>> plt.semilogx(w, 20 * np.log10(abs(h))) >>> plt.xlabel('Frequency') >>> plt.ylabel('Amplitude response [dB]') >>> plt.grid(True) >>> plt.show() """ if worN is None: # For backwards compatibility w = findfreqs(b, a, 200) elif _is_int_type(worN): w = findfreqs(b, a, worN) else: w = atleast_1d(worN) s = 1j * w h = polyval(b, s) / polyval(a, s) if plot is not None: plot(w, h) return w, h def freqs_zpk(z, p, k, worN=200): """ Compute frequency response of analog filter. Given the zeros `z`, poles `p`, and gain `k` of a filter, compute its frequency response:: (jw-z[0]) * (jw-z[1]) * ... * (jw-z[-1]) H(w) = k * ---------------------------------------- (jw-p[0]) * (jw-p[1]) * ... * (jw-p[-1]) Parameters ---------- z : array_like Zeroes of a linear filter p : array_like Poles of a linear filter k : scalar Gain of a linear filter worN : {None, int, array_like}, optional If None, then compute at 200 frequencies around the interesting parts of the response curve (determined by pole-zero locations). If a single integer, then compute at that many frequencies. Otherwise, compute the response at the angular frequencies (e.g., rad/s) given in `worN`. Returns ------- w : ndarray The angular frequencies at which `h` was computed. h : ndarray The frequency response. See Also -------- freqs : Compute the frequency response of an analog filter in TF form freqz : Compute the frequency response of a digital filter in TF form freqz_zpk : Compute the frequency response of a digital filter in ZPK form Notes ----- .. versionadded:: 0.19.0 Examples -------- >>> import numpy as np >>> from scipy.signal import freqs_zpk, iirfilter >>> z, p, k = iirfilter(4, [1, 10], 1, 60, analog=True, ftype='cheby1', ... output='zpk') >>> w, h = freqs_zpk(z, p, k, worN=np.logspace(-1, 2, 1000)) >>> import matplotlib.pyplot as plt >>> plt.semilogx(w, 20 * np.log10(abs(h))) >>> plt.xlabel('Frequency') >>> plt.ylabel('Amplitude response [dB]') >>> plt.grid(True) >>> plt.show() """ k = np.asarray(k) if k.size > 1: raise ValueError('k must be a single scalar gain') if worN is None: # For backwards compatibility w = findfreqs(z, p, 200, kind='zp') elif _is_int_type(worN): w = findfreqs(z, p, worN, kind='zp') else: w = worN w = atleast_1d(w) s = 1j * w num = polyvalfromroots(s, z) den = polyvalfromroots(s, p) h = k * num/den return w, h def freqz(b, a=1, worN=512, whole=False, plot=None, fs=2*pi, include_nyquist=False): """ Compute the frequency response of a digital filter. Given the M-order numerator `b` and N-order denominator `a` of a digital filter, compute its frequency response:: jw -jw -jwM jw B(e ) b[0] + b[1]e + ... + b[M]e H(e ) = ------ = ----------------------------------- jw -jw -jwN A(e ) a[0] + a[1]e + ... + a[N]e Parameters ---------- b : array_like Numerator of a linear filter. If `b` has dimension greater than 1, it is assumed that the coefficients are stored in the first dimension, and ``b.shape[1:]``, ``a.shape[1:]``, and the shape of the frequencies array must be compatible for broadcasting. a : array_like Denominator of a linear filter. If `b` has dimension greater than 1, it is assumed that the coefficients are stored in the first dimension, and ``b.shape[1:]``, ``a.shape[1:]``, and the shape of the frequencies array must be compatible for broadcasting. worN : {None, int, array_like}, optional If a single integer, then compute at that many frequencies (default is N=512). This is a convenient alternative to:: np.linspace(0, fs if whole else fs/2, N, endpoint=include_nyquist) Using a number that is fast for FFT computations can result in faster computations (see Notes). If an array_like, compute the response at the frequencies given. These are in the same units as `fs`. whole : bool, optional Normally, frequencies are computed from 0 to the Nyquist frequency, fs/2 (upper-half of unit-circle). If `whole` is True, compute frequencies from 0 to fs. Ignored if worN is array_like. plot : callable A callable that takes two arguments. If given, the return parameters `w` and `h` are passed to plot. Useful for plotting the frequency response inside `freqz`. fs : float, optional The sampling frequency of the digital system. Defaults to 2*pi radians/sample (so w is from 0 to pi). .. versionadded:: 1.2.0 include_nyquist : bool, optional If `whole` is False and `worN` is an integer, setting `include_nyquist` to True will include the last frequency (Nyquist frequency) and is otherwise ignored. .. versionadded:: 1.5.0 Returns ------- w : ndarray The frequencies at which `h` was computed, in the same units as `fs`. By default, `w` is normalized to the range [0, pi) (radians/sample). h : ndarray The frequency response, as complex numbers. See Also -------- freqz_zpk sosfreqz Notes ----- Using Matplotlib's :func:`matplotlib.pyplot.plot` function as the callable for `plot` produces unexpected results, as this plots the real part of the complex transfer function, not the magnitude. Try ``lambda w, h: plot(w, np.abs(h))``. A direct computation via (R)FFT is used to compute the frequency response when the following conditions are met: 1. An integer value is given for `worN`. 2. `worN` is fast to compute via FFT (i.e., `next_fast_len(worN) <scipy.fft.next_fast_len>` equals `worN`). 3. The denominator coefficients are a single value (``a.shape[0] == 1``). 4. `worN` is at least as long as the numerator coefficients (``worN >= b.shape[0]``). 5. If ``b.ndim > 1``, then ``b.shape[-1] == 1``. For long FIR filters, the FFT approach can have lower error and be much faster than the equivalent direct polynomial calculation. Examples -------- >>> from scipy import signal >>> import numpy as np >>> b = signal.firwin(80, 0.5, window=('kaiser', 8)) >>> w, h = signal.freqz(b) >>> import matplotlib.pyplot as plt >>> fig, ax1 = plt.subplots() >>> ax1.set_title('Digital filter frequency response') >>> ax1.plot(w, 20 * np.log10(abs(h)), 'b') >>> ax1.set_ylabel('Amplitude [dB]', color='b') >>> ax1.set_xlabel('Frequency [rad/sample]') >>> ax2 = ax1.twinx() >>> angles = np.unwrap(np.angle(h)) >>> ax2.plot(w, angles, 'g') >>> ax2.set_ylabel('Angle (radians)', color='g') >>> ax2.grid(True) >>> ax2.axis('tight') >>> plt.show() Broadcasting Examples Suppose we have two FIR filters whose coefficients are stored in the rows of an array with shape (2, 25). For this demonstration, we'll use random data: >>> rng = np.random.default_rng() >>> b = rng.random((2, 25)) To compute the frequency response for these two filters with one call to `freqz`, we must pass in ``b.T``, because `freqz` expects the first axis to hold the coefficients. We must then extend the shape with a trivial dimension of length 1 to allow broadcasting with the array of frequencies. That is, we pass in ``b.T[..., np.newaxis]``, which has shape (25, 2, 1): >>> w, h = signal.freqz(b.T[..., np.newaxis], worN=1024) >>> w.shape (1024,) >>> h.shape (2, 1024) Now, suppose we have two transfer functions, with the same numerator coefficients ``b = [0.5, 0.5]``. The coefficients for the two denominators are stored in the first dimension of the 2-D array `a`:: a = [ 1 1 ] [ -0.25, -0.5 ] >>> b = np.array([0.5, 0.5]) >>> a = np.array([[1, 1], [-0.25, -0.5]]) Only `a` is more than 1-D. To make it compatible for broadcasting with the frequencies, we extend it with a trivial dimension in the call to `freqz`: >>> w, h = signal.freqz(b, a[..., np.newaxis], worN=1024) >>> w.shape (1024,) >>> h.shape (2, 1024) """ b = atleast_1d(b) a = atleast_1d(a) if worN is None: # For backwards compatibility worN = 512 h = None if _is_int_type(worN): N = operator.index(worN) del worN if N < 0: raise ValueError(f'worN must be nonnegative, got {N}') lastpoint = 2 * pi if whole else pi # if include_nyquist is true and whole is false, w should # include end point w = np.linspace(0, lastpoint, N, endpoint=include_nyquist and not whole) if (a.size == 1 and N >= b.shape[0] and sp_fft.next_fast_len(N) == N and (b.ndim == 1 or (b.shape[-1] == 1))): # if N is fast, 2 * N will be fast, too, so no need to check n_fft = N if whole else N * 2 if np.isrealobj(b) and np.isrealobj(a): fft_func = sp_fft.rfft else: fft_func = sp_fft.fft h = fft_func(b, n=n_fft, axis=0)[:N] h /= a if fft_func is sp_fft.rfft and whole: # exclude DC and maybe Nyquist (no need to use axis_reverse # here because we can build reversal with the truncation) stop = -1 if n_fft % 2 == 1 else -2 h_flip = slice(stop, 0, -1) h = np.concatenate((h, h[h_flip].conj())) if b.ndim > 1: # Last axis of h has length 1, so drop it. h = h[..., 0] # Move the first axis of h to the end. h = np.moveaxis(h, 0, -1) else: w = atleast_1d(worN) del worN w = 2*pi*w/fs if h is None: # still need to compute using freqs w zm1 = exp(-1j * w) h = (npp_polyval(zm1, b, tensor=False) / npp_polyval(zm1, a, tensor=False)) w = w*fs/(2*pi) if plot is not None: plot(w, h) return w, h def freqz_zpk(z, p, k, worN=512, whole=False, fs=2*pi): r""" Compute the frequency response of a digital filter in ZPK form. Given the Zeros, Poles and Gain of a digital filter, compute its frequency response: :math:`H(z)=k \prod_i (z - Z[i]) / \prod_j (z - P[j])` where :math:`k` is the `gain`, :math:`Z` are the `zeros` and :math:`P` are the `poles`. Parameters ---------- z : array_like Zeroes of a linear filter p : array_like Poles of a linear filter k : scalar Gain of a linear filter worN : {None, int, array_like}, optional If a single integer, then compute at that many frequencies (default is N=512). If an array_like, compute the response at the frequencies given. These are in the same units as `fs`. whole : bool, optional Normally, frequencies are computed from 0 to the Nyquist frequency, fs/2 (upper-half of unit-circle). If `whole` is True, compute frequencies from 0 to fs. Ignored if w is array_like. fs : float, optional The sampling frequency of the digital system. Defaults to 2*pi radians/sample (so w is from 0 to pi). .. versionadded:: 1.2.0 Returns ------- w : ndarray The frequencies at which `h` was computed, in the same units as `fs`. By default, `w` is normalized to the range [0, pi) (radians/sample). h : ndarray The frequency response, as complex numbers. See Also -------- freqs : Compute the frequency response of an analog filter in TF form freqs_zpk : Compute the frequency response of an analog filter in ZPK form freqz : Compute the frequency response of a digital filter in TF form Notes ----- .. versionadded:: 0.19.0 Examples -------- Design a 4th-order digital Butterworth filter with cut-off of 100 Hz in a system with sample rate of 1000 Hz, and plot the frequency response: >>> import numpy as np >>> from scipy import signal >>> z, p, k = signal.butter(4, 100, output='zpk', fs=1000) >>> w, h = signal.freqz_zpk(z, p, k, fs=1000) >>> import matplotlib.pyplot as plt >>> fig = plt.figure() >>> ax1 = fig.add_subplot(1, 1, 1) >>> ax1.set_title('Digital filter frequency response') >>> ax1.plot(w, 20 * np.log10(abs(h)), 'b') >>> ax1.set_ylabel('Amplitude [dB]', color='b') >>> ax1.set_xlabel('Frequency [Hz]') >>> ax1.grid(True) >>> ax2 = ax1.twinx() >>> angles = np.unwrap(np.angle(h)) >>> ax2.plot(w, angles, 'g') >>> ax2.set_ylabel('Angle [radians]', color='g') >>> plt.axis('tight') >>> plt.show() """ z, p = map(atleast_1d, (z, p)) if whole: lastpoint = 2 * pi else: lastpoint = pi if worN is None: # For backwards compatibility w = numpy.linspace(0, lastpoint, 512, endpoint=False) elif _is_int_type(worN): w = numpy.linspace(0, lastpoint, worN, endpoint=False) else: w = atleast_1d(worN) w = 2*pi*w/fs zm1 = exp(1j * w) h = k * polyvalfromroots(zm1, z) / polyvalfromroots(zm1, p) w = w*fs/(2*pi) return w, h def group_delay(system, w=512, whole=False, fs=2*pi): r"""Compute the group delay of a digital filter. The group delay measures by how many samples amplitude envelopes of various spectral components of a signal are delayed by a filter. It is formally defined as the derivative of continuous (unwrapped) phase:: d jw D(w) = - -- arg H(e) dw Parameters ---------- system : tuple of array_like (b, a) Numerator and denominator coefficients of a filter transfer function. w : {None, int, array_like}, optional If a single integer, then compute at that many frequencies (default is N=512). If an array_like, compute the delay at the frequencies given. These are in the same units as `fs`. whole : bool, optional Normally, frequencies are computed from 0 to the Nyquist frequency, fs/2 (upper-half of unit-circle). If `whole` is True, compute frequencies from 0 to fs. Ignored if w is array_like. fs : float, optional The sampling frequency of the digital system. Defaults to 2*pi radians/sample (so w is from 0 to pi). .. versionadded:: 1.2.0 Returns ------- w : ndarray The frequencies at which group delay was computed, in the same units as `fs`. By default, `w` is normalized to the range [0, pi) (radians/sample). gd : ndarray The group delay. See Also -------- freqz : Frequency response of a digital filter Notes ----- The similar function in MATLAB is called `grpdelay`. If the transfer function :math:`H(z)` has zeros or poles on the unit circle, the group delay at corresponding frequencies is undefined. When such a case arises the warning is raised and the group delay is set to 0 at those frequencies. For the details of numerical computation of the group delay refer to [1]_. .. versionadded:: 0.16.0 References ---------- .. [1] Richard G. Lyons, "Understanding Digital Signal Processing, 3rd edition", p. 830. Examples -------- >>> from scipy import signal >>> b, a = signal.iirdesign(0.1, 0.3, 5, 50, ftype='cheby1') >>> w, gd = signal.group_delay((b, a)) >>> import matplotlib.pyplot as plt >>> plt.title('Digital filter group delay') >>> plt.plot(w, gd) >>> plt.ylabel('Group delay [samples]') >>> plt.xlabel('Frequency [rad/sample]') >>> plt.show() """ if w is None: # For backwards compatibility w = 512 if _is_int_type(w): if whole: w = np.linspace(0, 2 * pi, w, endpoint=False) else: w = np.linspace(0, pi, w, endpoint=False) else: w = np.atleast_1d(w) w = 2*pi*w/fs b, a = map(np.atleast_1d, system) c = np.convolve(b, a[::-1]) cr = c * np.arange(c.size) z = np.exp(-1j * w) num = np.polyval(cr[::-1], z) den = np.polyval(c[::-1], z) gd = np.real(num / den) - a.size + 1 singular = ~np.isfinite(gd) near_singular = np.absolute(den) < 10 * EPSILON if np.any(singular): gd[singular] = 0 warnings.warn( "The group delay is singular at frequencies [{}], setting to 0". format(", ".join(f"{ws:.3f}" for ws in w[singular])), stacklevel=2 ) elif np.any(near_singular): warnings.warn( "The filter's denominator is extremely small at frequencies [{}], \ around which a singularity may be present". format(", ".join(f"{ws:.3f}" for ws in w[near_singular])), stacklevel=2 ) w = w*fs/(2*pi) return w, gd def _validate_sos(sos): """Helper to validate a SOS input""" sos = np.atleast_2d(sos) if sos.ndim != 2: raise ValueError('sos array must be 2D') n_sections, m = sos.shape if m != 6: raise ValueError('sos array must be shape (n_sections, 6)') if not (sos[:, 3] == 1).all(): raise ValueError('sos[:, 3] should be all ones') return sos, n_sections def sosfreqz(sos, worN=512, whole=False, fs=2*pi): r""" Compute the frequency response of a digital filter in SOS format. Given `sos`, an array with shape (n, 6) of second order sections of a digital filter, compute the frequency response of the system function:: B0(z) B1(z) B{n-1}(z) H(z) = ----- * ----- * ... * --------- A0(z) A1(z) A{n-1}(z) for z = exp(omega*1j), where B{k}(z) and A{k}(z) are numerator and denominator of the transfer function of the k-th second order section. Parameters ---------- sos : array_like Array of second-order filter coefficients, must have shape ``(n_sections, 6)``. Each row corresponds to a second-order section, with the first three columns providing the numerator coefficients and the last three providing the denominator coefficients. worN : {None, int, array_like}, optional If a single integer, then compute at that many frequencies (default is N=512). Using a number that is fast for FFT computations can result in faster computations (see Notes of `freqz`). If an array_like, compute the response at the frequencies given (must be 1-D). These are in the same units as `fs`. whole : bool, optional Normally, frequencies are computed from 0 to the Nyquist frequency, fs/2 (upper-half of unit-circle). If `whole` is True, compute frequencies from 0 to fs. fs : float, optional The sampling frequency of the digital system. Defaults to 2*pi radians/sample (so w is from 0 to pi). .. versionadded:: 1.2.0 Returns ------- w : ndarray The frequencies at which `h` was computed, in the same units as `fs`. By default, `w` is normalized to the range [0, pi) (radians/sample). h : ndarray The frequency response, as complex numbers. See Also -------- freqz, sosfilt Notes ----- .. versionadded:: 0.19.0 Examples -------- Design a 15th-order bandpass filter in SOS format. >>> from scipy import signal >>> import numpy as np >>> sos = signal.ellip(15, 0.5, 60, (0.2, 0.4), btype='bandpass', ... output='sos') Compute the frequency response at 1500 points from DC to Nyquist. >>> w, h = signal.sosfreqz(sos, worN=1500) Plot the response. >>> import matplotlib.pyplot as plt >>> plt.subplot(2, 1, 1) >>> db = 20*np.log10(np.maximum(np.abs(h), 1e-5)) >>> plt.plot(w/np.pi, db) >>> plt.ylim(-75, 5) >>> plt.grid(True) >>> plt.yticks([0, -20, -40, -60]) >>> plt.ylabel('Gain [dB]') >>> plt.title('Frequency Response') >>> plt.subplot(2, 1, 2) >>> plt.plot(w/np.pi, np.angle(h)) >>> plt.grid(True) >>> plt.yticks([-np.pi, -0.5*np.pi, 0, 0.5*np.pi, np.pi], ... [r'$-\pi$', r'$-\pi/2$', '0', r'$\pi/2$', r'$\pi$']) >>> plt.ylabel('Phase [rad]') >>> plt.xlabel('Normalized frequency (1.0 = Nyquist)') >>> plt.show() If the same filter is implemented as a single transfer function, numerical error corrupts the frequency response: >>> b, a = signal.ellip(15, 0.5, 60, (0.2, 0.4), btype='bandpass', ... output='ba') >>> w, h = signal.freqz(b, a, worN=1500) >>> plt.subplot(2, 1, 1) >>> db = 20*np.log10(np.maximum(np.abs(h), 1e-5)) >>> plt.plot(w/np.pi, db) >>> plt.ylim(-75, 5) >>> plt.grid(True) >>> plt.yticks([0, -20, -40, -60]) >>> plt.ylabel('Gain [dB]') >>> plt.title('Frequency Response') >>> plt.subplot(2, 1, 2) >>> plt.plot(w/np.pi, np.angle(h)) >>> plt.grid(True) >>> plt.yticks([-np.pi, -0.5*np.pi, 0, 0.5*np.pi, np.pi], ... [r'$-\pi$', r'$-\pi/2$', '0', r'$\pi/2$', r'$\pi$']) >>> plt.ylabel('Phase [rad]') >>> plt.xlabel('Normalized frequency (1.0 = Nyquist)') >>> plt.show() """ sos, n_sections = _validate_sos(sos) if n_sections == 0: raise ValueError('Cannot compute frequencies with no sections') h = 1. for row in sos: w, rowh = freqz(row[:3], row[3:], worN=worN, whole=whole, fs=fs) h *= rowh return w, h def _cplxreal(z, tol=None): """ Split into complex and real parts, combining conjugate pairs. The 1-D input vector `z` is split up into its complex (`zc`) and real (`zr`) elements. Every complex element must be part of a complex-conjugate pair, which are combined into a single number (with positive imaginary part) in the output. Two complex numbers are considered a conjugate pair if their real and imaginary parts differ in magnitude by less than ``tol * abs(z)``. Parameters ---------- z : array_like Vector of complex numbers to be sorted and split tol : float, optional Relative tolerance for testing realness and conjugate equality. Default is ``100 * spacing(1)`` of `z`'s data type (i.e., 2e-14 for float64) Returns ------- zc : ndarray Complex elements of `z`, with each pair represented by a single value having positive imaginary part, sorted first by real part, and then by magnitude of imaginary part. The pairs are averaged when combined to reduce error. zr : ndarray Real elements of `z` (those having imaginary part less than `tol` times their magnitude), sorted by value. Raises ------ ValueError If there are any complex numbers in `z` for which a conjugate cannot be found. See Also -------- _cplxpair Examples -------- >>> a = [4, 3, 1, 2-2j, 2+2j, 2-1j, 2+1j, 2-1j, 2+1j, 1+1j, 1-1j] >>> zc, zr = _cplxreal(a) >>> print(zc) [ 1.+1.j 2.+1.j 2.+1.j 2.+2.j] >>> print(zr) [ 1. 3. 4.] """ z = atleast_1d(z) if z.size == 0: return z, z elif z.ndim != 1: raise ValueError('_cplxreal only accepts 1-D input') if tol is None: # Get tolerance from dtype of input tol = 100 * np.finfo((1.0 * z).dtype).eps # Sort by real part, magnitude of imaginary part (speed up further sorting) z = z[np.lexsort((abs(z.imag), z.real))] # Split reals from conjugate pairs real_indices = abs(z.imag) <= tol * abs(z) zr = z[real_indices].real if len(zr) == len(z): # Input is entirely real return array([]), zr # Split positive and negative halves of conjugates z = z[~real_indices] zp = z[z.imag > 0] zn = z[z.imag < 0] if len(zp) != len(zn): raise ValueError('Array contains complex value with no matching ' 'conjugate.') # Find runs of (approximately) the same real part same_real = np.diff(zp.real) <= tol * abs(zp[:-1]) diffs = numpy.diff(concatenate(([0], same_real, [0]))) run_starts = numpy.nonzero(diffs > 0)[0] run_stops = numpy.nonzero(diffs < 0)[0] # Sort each run by their imaginary parts for i in range(len(run_starts)): start = run_starts[i] stop = run_stops[i] + 1 for chunk in (zp[start:stop], zn[start:stop]): chunk[...] = chunk[np.lexsort([abs(chunk.imag)])] # Check that negatives match positives if any(abs(zp - zn.conj()) > tol * abs(zn)): raise ValueError('Array contains complex value with no matching ' 'conjugate.') # Average out numerical inaccuracy in real vs imag parts of pairs zc = (zp + zn.conj()) / 2 return zc, zr def _cplxpair(z, tol=None): """ Sort into pairs of complex conjugates. Complex conjugates in `z` are sorted by increasing real part. In each pair, the number with negative imaginary part appears first. If pairs have identical real parts, they are sorted by increasing imaginary magnitude. Two complex numbers are considered a conjugate pair if their real and imaginary parts differ in magnitude by less than ``tol * abs(z)``. The pairs are forced to be exact complex conjugates by averaging the positive and negative values. Purely real numbers are also sorted, but placed after the complex conjugate pairs. A number is considered real if its imaginary part is smaller than `tol` times the magnitude of the number. Parameters ---------- z : array_like 1-D input array to be sorted. tol : float, optional Relative tolerance for testing realness and conjugate equality. Default is ``100 * spacing(1)`` of `z`'s data type (i.e., 2e-14 for float64) Returns ------- y : ndarray Complex conjugate pairs followed by real numbers. Raises ------ ValueError If there are any complex numbers in `z` for which a conjugate cannot be found. See Also -------- _cplxreal Examples -------- >>> a = [4, 3, 1, 2-2j, 2+2j, 2-1j, 2+1j, 2-1j, 2+1j, 1+1j, 1-1j] >>> z = _cplxpair(a) >>> print(z) [ 1.-1.j 1.+1.j 2.-1.j 2.+1.j 2.-1.j 2.+1.j 2.-2.j 2.+2.j 1.+0.j 3.+0.j 4.+0.j] """ z = atleast_1d(z) if z.size == 0 or np.isrealobj(z): return np.sort(z) if z.ndim != 1: raise ValueError('z must be 1-D') zc, zr = _cplxreal(z, tol) # Interleave complex values and their conjugates, with negative imaginary # parts first in each pair zc = np.dstack((zc.conj(), zc)).flatten() z = np.append(zc, zr) return z def tf2zpk(b, a): r"""Return zero, pole, gain (z, p, k) representation from a numerator, denominator representation of a linear filter. Parameters ---------- b : array_like Numerator polynomial coefficients. a : array_like Denominator polynomial coefficients. Returns ------- z : ndarray Zeros of the transfer function. p : ndarray Poles of the transfer function. k : float System gain. Notes ----- If some values of `b` are too close to 0, they are removed. In that case, a BadCoefficients warning is emitted. The `b` and `a` arrays are interpreted as coefficients for positive, descending powers of the transfer function variable. So the inputs :math:`b = [b_0, b_1, ..., b_M]` and :math:`a =[a_0, a_1, ..., a_N]` can represent an analog filter of the form: .. math:: H(s) = \frac {b_0 s^M + b_1 s^{(M-1)} + \cdots + b_M} {a_0 s^N + a_1 s^{(N-1)} + \cdots + a_N} or a discrete-time filter of the form: .. math:: H(z) = \frac {b_0 z^M + b_1 z^{(M-1)} + \cdots + b_M} {a_0 z^N + a_1 z^{(N-1)} + \cdots + a_N} This "positive powers" form is found more commonly in controls engineering. If `M` and `N` are equal (which is true for all filters generated by the bilinear transform), then this happens to be equivalent to the "negative powers" discrete-time form preferred in DSP: .. math:: H(z) = \frac {b_0 + b_1 z^{-1} + \cdots + b_M z^{-M}} {a_0 + a_1 z^{-1} + \cdots + a_N z^{-N}} Although this is true for common filters, remember that this is not true in the general case. If `M` and `N` are not equal, the discrete-time transfer function coefficients must first be converted to the "positive powers" form before finding the poles and zeros. """ b, a = normalize(b, a) b = (b + 0.0) / a[0] a = (a + 0.0) / a[0] k = b[0] b /= b[0] z = roots(b) p = roots(a) return z, p, k def zpk2tf(z, p, k): """ Return polynomial transfer function representation from zeros and poles Parameters ---------- z : array_like Zeros of the transfer function. p : array_like Poles of the transfer function. k : float System gain. Returns ------- b : ndarray Numerator polynomial coefficients. a : ndarray Denominator polynomial coefficients. """ z = atleast_1d(z) k = atleast_1d(k) if len(z.shape) > 1: temp = poly(z[0]) b = np.empty((z.shape[0], z.shape[1] + 1), temp.dtype.char) if len(k) == 1: k = [k[0]] * z.shape[0] for i in range(z.shape[0]): b[i] = k[i] * poly(z[i]) else: b = k * poly(z) a = atleast_1d(poly(p)) # Use real output if possible. Copied from numpy.poly, since # we can't depend on a specific version of numpy. if issubclass(b.dtype.type, numpy.complexfloating): # if complex roots are all complex conjugates, the roots are real. roots = numpy.asarray(z, complex) pos_roots = numpy.compress(roots.imag > 0, roots) neg_roots = numpy.conjugate(numpy.compress(roots.imag < 0, roots)) if len(pos_roots) == len(neg_roots): if numpy.all(numpy.sort_complex(neg_roots) == numpy.sort_complex(pos_roots)): b = b.real.copy() if issubclass(a.dtype.type, numpy.complexfloating): # if complex roots are all complex conjugates, the roots are real. roots = numpy.asarray(p, complex) pos_roots = numpy.compress(roots.imag > 0, roots) neg_roots = numpy.conjugate(numpy.compress(roots.imag < 0, roots)) if len(pos_roots) == len(neg_roots): if numpy.all(numpy.sort_complex(neg_roots) == numpy.sort_complex(pos_roots)): a = a.real.copy() return b, a def tf2sos(b, a, pairing=None, *, analog=False): """ Return second-order sections from transfer function representation Parameters ---------- b : array_like Numerator polynomial coefficients. a : array_like Denominator polynomial coefficients. pairing : {None, 'nearest', 'keep_odd', 'minimal'}, optional The method to use to combine pairs of poles and zeros into sections. See `zpk2sos` for information and restrictions on `pairing` and `analog` arguments. analog : bool, optional If True, system is analog, otherwise discrete. .. versionadded:: 1.8.0 Returns ------- sos : ndarray Array of second-order filter coefficients, with shape ``(n_sections, 6)``. See `sosfilt` for the SOS filter format specification. See Also -------- zpk2sos, sosfilt Notes ----- It is generally discouraged to convert from TF to SOS format, since doing so usually will not improve numerical precision errors. Instead, consider designing filters in ZPK format and converting directly to SOS. TF is converted to SOS by first converting to ZPK format, then converting ZPK to SOS. .. versionadded:: 0.16.0 """ return zpk2sos(*tf2zpk(b, a), pairing=pairing, analog=analog) def sos2tf(sos): """ Return a single transfer function from a series of second-order sections Parameters ---------- sos : array_like Array of second-order filter coefficients, must have shape ``(n_sections, 6)``. See `sosfilt` for the SOS filter format specification. Returns ------- b : ndarray Numerator polynomial coefficients. a : ndarray Denominator polynomial coefficients. Notes ----- .. versionadded:: 0.16.0 """ sos = np.asarray(sos) result_type = sos.dtype if result_type.kind in 'bui': result_type = np.float64 b = np.array([1], dtype=result_type) a = np.array([1], dtype=result_type) n_sections = sos.shape[0] for section in range(n_sections): b = np.polymul(b, sos[section, :3]) a = np.polymul(a, sos[section, 3:]) return b, a def sos2zpk(sos): """ Return zeros, poles, and gain of a series of second-order sections Parameters ---------- sos : array_like Array of second-order filter coefficients, must have shape ``(n_sections, 6)``. See `sosfilt` for the SOS filter format specification. Returns ------- z : ndarray Zeros of the transfer function. p : ndarray Poles of the transfer function. k : float System gain. Notes ----- The number of zeros and poles returned will be ``n_sections * 2`` even if some of these are (effectively) zero. .. versionadded:: 0.16.0 """ sos = np.asarray(sos) n_sections = sos.shape[0] z = np.zeros(n_sections*2, np.complex128) p = np.zeros(n_sections*2, np.complex128) k = 1. for section in range(n_sections): zpk = tf2zpk(sos[section, :3], sos[section, 3:]) z[2*section:2*section+len(zpk[0])] = zpk[0] p[2*section:2*section+len(zpk[1])] = zpk[1] k *= zpk[2] return z, p, k def _nearest_real_complex_idx(fro, to, which): """Get the next closest real or complex element based on distance""" assert which in ('real', 'complex', 'any') order = np.argsort(np.abs(fro - to)) if which == 'any': return order[0] else: mask = np.isreal(fro[order]) if which == 'complex': mask = ~mask return order[np.nonzero(mask)[0][0]] def _single_zpksos(z, p, k): """Create one second-order section from up to two zeros and poles""" sos = np.zeros(6) b, a = zpk2tf(z, p, k) sos[3-len(b):3] = b sos[6-len(a):6] = a return sos def zpk2sos(z, p, k, pairing=None, *, analog=False): """Return second-order sections from zeros, poles, and gain of a system Parameters ---------- z : array_like Zeros of the transfer function. p : array_like Poles of the transfer function. k : float System gain. pairing : {None, 'nearest', 'keep_odd', 'minimal'}, optional The method to use to combine pairs of poles and zeros into sections. If analog is False and pairing is None, pairing is set to 'nearest'; if analog is True, pairing must be 'minimal', and is set to that if it is None. analog : bool, optional If True, system is analog, otherwise discrete. .. versionadded:: 1.8.0 Returns ------- sos : ndarray Array of second-order filter coefficients, with shape ``(n_sections, 6)``. See `sosfilt` for the SOS filter format specification. See Also -------- sosfilt Notes ----- The algorithm used to convert ZPK to SOS format is designed to minimize errors due to numerical precision issues. The pairing algorithm attempts to minimize the peak gain of each biquadratic section. This is done by pairing poles with the nearest zeros, starting with the poles closest to the unit circle for discrete-time systems, and poles closest to the imaginary axis for continuous-time systems. ``pairing='minimal'`` outputs may not be suitable for `sosfilt`, and ``analog=True`` outputs will never be suitable for `sosfilt`. *Algorithms* The steps in the ``pairing='nearest'``, ``pairing='keep_odd'``, and ``pairing='minimal'`` algorithms are mostly shared. The ``'nearest'`` algorithm attempts to minimize the peak gain, while ``'keep_odd'`` minimizes peak gain under the constraint that odd-order systems should retain one section as first order. ``'minimal'`` is similar to ``'keep_odd'``, but no additional poles or zeros are introduced The algorithm steps are as follows: As a pre-processing step for ``pairing='nearest'``, ``pairing='keep_odd'``, add poles or zeros to the origin as necessary to obtain the same number of poles and zeros for pairing. If ``pairing == 'nearest'`` and there are an odd number of poles, add an additional pole and a zero at the origin. The following steps are then iterated over until no more poles or zeros remain: 1. Take the (next remaining) pole (complex or real) closest to the unit circle (or imaginary axis, for ``analog=True``) to begin a new filter section. 2. If the pole is real and there are no other remaining real poles [#]_, add the closest real zero to the section and leave it as a first order section. Note that after this step we are guaranteed to be left with an even number of real poles, complex poles, real zeros, and complex zeros for subsequent pairing iterations. 3. Else: 1. If the pole is complex and the zero is the only remaining real zero*, then pair the pole with the *next* closest zero (guaranteed to be complex). This is necessary to ensure that there will be a real zero remaining to eventually create a first-order section (thus keeping the odd order). 2. Else pair the pole with the closest remaining zero (complex or real). 3. Proceed to complete the second-order section by adding another pole and zero to the current pole and zero in the section: 1. If the current pole and zero are both complex, add their conjugates. 2. Else if the pole is complex and the zero is real, add the conjugate pole and the next closest real zero. 3. Else if the pole is real and the zero is complex, add the conjugate zero and the real pole closest to those zeros. 4. Else (we must have a real pole and real zero) add the next real pole closest to the unit circle, and then add the real zero closest to that pole. .. [#] This conditional can only be met for specific odd-order inputs with the ``pairing = 'keep_odd'`` or ``'minimal'`` methods. .. versionadded:: 0.16.0 Examples -------- Design a 6th order low-pass elliptic digital filter for a system with a sampling rate of 8000 Hz that has a pass-band corner frequency of 1000 Hz. The ripple in the pass-band should not exceed 0.087 dB, and the attenuation in the stop-band should be at least 90 dB. In the following call to `ellip`, we could use ``output='sos'``, but for this example, we'll use ``output='zpk'``, and then convert to SOS format with `zpk2sos`: >>> from scipy import signal >>> import numpy as np >>> z, p, k = signal.ellip(6, 0.087, 90, 1000/(0.5*8000), output='zpk') Now convert to SOS format. >>> sos = signal.zpk2sos(z, p, k) The coefficients of the numerators of the sections: >>> sos[:, :3] array([[0.0014152 , 0.00248677, 0.0014152 ], [1. , 0.72976874, 1. ], [1. , 0.17607852, 1. ]]) The symmetry in the coefficients occurs because all the zeros are on the unit circle. The coefficients of the denominators of the sections: >>> sos[:, 3:] array([[ 1. , -1.32544025, 0.46989976], [ 1. , -1.26118294, 0.62625924], [ 1. , -1.2570723 , 0.8619958 ]]) The next example shows the effect of the `pairing` option. We have a system with three poles and three zeros, so the SOS array will have shape (2, 6). The means there is, in effect, an extra pole and an extra zero at the origin in the SOS representation. >>> z1 = np.array([-1, -0.5-0.5j, -0.5+0.5j]) >>> p1 = np.array([0.75, 0.8+0.1j, 0.8-0.1j]) With ``pairing='nearest'`` (the default), we obtain >>> signal.zpk2sos(z1, p1, 1) array([[ 1. , 1. , 0.5 , 1. , -0.75, 0. ], [ 1. , 1. , 0. , 1. , -1.6 , 0.65]]) The first section has the zeros {-0.5-0.05j, -0.5+0.5j} and the poles {0, 0.75}, and the second section has the zeros {-1, 0} and poles {0.8+0.1j, 0.8-0.1j}. Note that the extra pole and zero at the origin have been assigned to different sections. With ``pairing='keep_odd'``, we obtain: >>> signal.zpk2sos(z1, p1, 1, pairing='keep_odd') array([[ 1. , 1. , 0. , 1. , -0.75, 0. ], [ 1. , 1. , 0.5 , 1. , -1.6 , 0.65]]) The extra pole and zero at the origin are in the same section. The first section is, in effect, a first-order section. With ``pairing='minimal'``, the first-order section doesn't have the extra pole and zero at the origin: >>> signal.zpk2sos(z1, p1, 1, pairing='minimal') array([[ 0. , 1. , 1. , 0. , 1. , -0.75], [ 1. , 1. , 0.5 , 1. , -1.6 , 0.65]]) """ # TODO in the near future: # 1. Add SOS capability to `filtfilt`, `freqz`, etc. somehow (#3259). # 2. Make `decimate` use `sosfilt` instead of `lfilter`. # 3. Make sosfilt automatically simplify sections to first order # when possible. Note this might make `sosfiltfilt` a bit harder (ICs). # 4. Further optimizations of the section ordering / pole-zero pairing. # See the wiki for other potential issues. if pairing is None: pairing = 'minimal' if analog else 'nearest' valid_pairings = ['nearest', 'keep_odd', 'minimal'] if pairing not in valid_pairings: raise ValueError('pairing must be one of %s, not %s' % (valid_pairings, pairing)) if analog and pairing != 'minimal': raise ValueError('for analog zpk2sos conversion, ' 'pairing must be "minimal"') if len(z) == len(p) == 0: if not analog: return np.array([[k, 0., 0., 1., 0., 0.]]) else: return np.array([[0., 0., k, 0., 0., 1.]]) if pairing != 'minimal': # ensure we have the same number of poles and zeros, and make copies p = np.concatenate((p, np.zeros(max(len(z) - len(p), 0)))) z = np.concatenate((z, np.zeros(max(len(p) - len(z), 0)))) n_sections = (max(len(p), len(z)) + 1) // 2 if len(p) % 2 == 1 and pairing == 'nearest': p = np.concatenate((p, [0.])) z = np.concatenate((z, [0.])) assert len(p) == len(z) else: if len(p) < len(z): raise ValueError('for analog zpk2sos conversion, ' 'must have len(p)>=len(z)') n_sections = (len(p) + 1) // 2 # Ensure we have complex conjugate pairs # (note that _cplxreal only gives us one element of each complex pair): z = np.concatenate(_cplxreal(z)) p = np.concatenate(_cplxreal(p)) if not np.isreal(k): raise ValueError('k must be real') k = k.real if not analog: # digital: "worst" is the closest to the unit circle def idx_worst(p): return np.argmin(np.abs(1 - np.abs(p))) else: # analog: "worst" is the closest to the imaginary axis def idx_worst(p): return np.argmin(np.abs(np.real(p))) sos = np.zeros((n_sections, 6)) # Construct the system, reversing order so the "worst" are last for si in range(n_sections-1, -1, -1): # Select the next "worst" pole p1_idx = idx_worst(p) p1 = p[p1_idx] p = np.delete(p, p1_idx) # Pair that pole with a zero if np.isreal(p1) and np.isreal(p).sum() == 0: # Special case (1): last remaining real pole if pairing != 'minimal': z1_idx = _nearest_real_complex_idx(z, p1, 'real') z1 = z[z1_idx] z = np.delete(z, z1_idx) sos[si] = _single_zpksos([z1, 0], [p1, 0], 1) elif len(z) > 0: z1_idx = _nearest_real_complex_idx(z, p1, 'real') z1 = z[z1_idx] z = np.delete(z, z1_idx) sos[si] = _single_zpksos([z1], [p1], 1) else: sos[si] = _single_zpksos([], [p1], 1) elif (len(p) + 1 == len(z) and not np.isreal(p1) and np.isreal(p).sum() == 1 and np.isreal(z).sum() == 1): # Special case (2): there's one real pole and one real zero # left, and an equal number of poles and zeros to pair up. # We *must* pair with a complex zero z1_idx = _nearest_real_complex_idx(z, p1, 'complex') z1 = z[z1_idx] z = np.delete(z, z1_idx) sos[si] = _single_zpksos([z1, z1.conj()], [p1, p1.conj()], 1) else: if np.isreal(p1): prealidx = np.flatnonzero(np.isreal(p)) p2_idx = prealidx[idx_worst(p[prealidx])] p2 = p[p2_idx] p = np.delete(p, p2_idx) else: p2 = p1.conj() # find closest zero if len(z) > 0: z1_idx = _nearest_real_complex_idx(z, p1, 'any') z1 = z[z1_idx] z = np.delete(z, z1_idx) if not np.isreal(z1): sos[si] = _single_zpksos([z1, z1.conj()], [p1, p2], 1) else: if len(z) > 0: z2_idx = _nearest_real_complex_idx(z, p1, 'real') z2 = z[z2_idx] assert np.isreal(z2) z = np.delete(z, z2_idx) sos[si] = _single_zpksos([z1, z2], [p1, p2], 1) else: sos[si] = _single_zpksos([z1], [p1, p2], 1) else: # no more zeros sos[si] = _single_zpksos([], [p1, p2], 1) assert len(p) == len(z) == 0 # we've consumed all poles and zeros del p, z # put gain in first sos sos[0][:3] *= k return sos def _align_nums(nums): """Aligns the shapes of multiple numerators. Given an array of numerator coefficient arrays [[a_1, a_2,..., a_n],..., [b_1, b_2,..., b_m]], this function pads shorter numerator arrays with zero's so that all numerators have the same length. Such alignment is necessary for functions like 'tf2ss', which needs the alignment when dealing with SIMO transfer functions. Parameters ---------- nums: array_like Numerator or list of numerators. Not necessarily with same length. Returns ------- nums: array The numerator. If `nums` input was a list of numerators then a 2-D array with padded zeros for shorter numerators is returned. Otherwise returns ``np.asarray(nums)``. """ try: # The statement can throw a ValueError if one # of the numerators is a single digit and another # is array-like e.g. if nums = [5, [1, 2, 3]] nums = asarray(nums) if not np.issubdtype(nums.dtype, np.number): raise ValueError("dtype of numerator is non-numeric") return nums except ValueError: nums = [np.atleast_1d(num) for num in nums] max_width = max(num.size for num in nums) # pre-allocate aligned_nums = np.zeros((len(nums), max_width)) # Create numerators with padded zeros for index, num in enumerate(nums): aligned_nums[index, -num.size:] = num return aligned_nums def normalize(b, a): """Normalize numerator/denominator of a continuous-time transfer function. If values of `b` are too close to 0, they are removed. In that case, a BadCoefficients warning is emitted. Parameters ---------- b: array_like Numerator of the transfer function. Can be a 2-D array to normalize multiple transfer functions. a: array_like Denominator of the transfer function. At most 1-D. Returns ------- num: array The numerator of the normalized transfer function. At least a 1-D array. A 2-D array if the input `num` is a 2-D array. den: 1-D array The denominator of the normalized transfer function. Notes ----- Coefficients for both the numerator and denominator should be specified in descending exponent order (e.g., ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``). Examples -------- >>> from scipy.signal import normalize Normalize the coefficients of the transfer function ``(3*s^2 - 2*s + 5) / (2*s^2 + 3*s + 1)``: >>> b = [3, -2, 5] >>> a = [2, 3, 1] >>> normalize(b, a) (array([ 1.5, -1. , 2.5]), array([1. , 1.5, 0.5])) A warning is generated if, for example, the first coefficient of `b` is 0. In the following example, the result is as expected: >>> import warnings >>> with warnings.catch_warnings(record=True) as w: ... num, den = normalize([0, 3, 6], [2, -5, 4]) >>> num array([1.5, 3. ]) >>> den array([ 1. , -2.5, 2. ]) >>> print(w[0].message) Badly conditioned filter coefficients (numerator): the results may be meaningless """ num, den = b, a den = np.atleast_1d(den) num = np.atleast_2d(_align_nums(num)) if den.ndim != 1: raise ValueError("Denominator polynomial must be rank-1 array.") if num.ndim > 2: raise ValueError("Numerator polynomial must be rank-1 or" " rank-2 array.") if np.all(den == 0): raise ValueError("Denominator must have at least on nonzero element.") # Trim leading zeros in denominator, leave at least one. den = np.trim_zeros(den, 'f') # Normalize transfer function num, den = num / den[0], den / den[0] # Count numerator columns that are all zero leading_zeros = 0 for col in num.T: if np.allclose(col, 0, atol=1e-14): leading_zeros += 1 else: break # Trim leading zeros of numerator if leading_zeros > 0: warnings.warn("Badly conditioned filter coefficients (numerator): the " "results may be meaningless", BadCoefficients) # Make sure at least one column remains if leading_zeros == num.shape[1]: leading_zeros -= 1 num = num[:, leading_zeros:] # Squeeze first dimension if singular if num.shape[0] == 1: num = num[0, :] return num, den def lp2lp(b, a, wo=1.0): r""" Transform a lowpass filter prototype to a different frequency. Return an analog low-pass filter with cutoff frequency `wo` from an analog low-pass filter prototype with unity cutoff frequency, in transfer function ('ba') representation. Parameters ---------- b : array_like Numerator polynomial coefficients. a : array_like Denominator polynomial coefficients. wo : float Desired cutoff, as angular frequency (e.g. rad/s). Defaults to no change. Returns ------- b : array_like Numerator polynomial coefficients of the transformed low-pass filter. a : array_like Denominator polynomial coefficients of the transformed low-pass filter. See Also -------- lp2hp, lp2bp, lp2bs, bilinear lp2lp_zpk Notes ----- This is derived from the s-plane substitution .. math:: s \rightarrow \frac{s}{\omega_0} Examples -------- >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> lp = signal.lti([1.0], [1.0, 1.0]) >>> lp2 = signal.lti(*signal.lp2lp(lp.num, lp.den, 2)) >>> w, mag_lp, p_lp = lp.bode() >>> w, mag_lp2, p_lp2 = lp2.bode(w) >>> plt.plot(w, mag_lp, label='Lowpass') >>> plt.plot(w, mag_lp2, label='Transformed Lowpass') >>> plt.semilogx() >>> plt.grid(True) >>> plt.xlabel('Frequency [rad/s]') >>> plt.ylabel('Magnitude [dB]') >>> plt.legend() """ a, b = map(atleast_1d, (a, b)) try: wo = float(wo) except TypeError: wo = float(wo[0]) d = len(a) n = len(b) M = max((d, n)) pwo = pow(wo, numpy.arange(M - 1, -1, -1)) start1 = max((n - d, 0)) start2 = max((d - n, 0)) b = b * pwo[start1] / pwo[start2:] a = a * pwo[start1] / pwo[start1:] return normalize(b, a) def lp2hp(b, a, wo=1.0): r""" Transform a lowpass filter prototype to a highpass filter. Return an analog high-pass filter with cutoff frequency `wo` from an analog low-pass filter prototype with unity cutoff frequency, in transfer function ('ba') representation. Parameters ---------- b : array_like Numerator polynomial coefficients. a : array_like Denominator polynomial coefficients. wo : float Desired cutoff, as angular frequency (e.g., rad/s). Defaults to no change. Returns ------- b : array_like Numerator polynomial coefficients of the transformed high-pass filter. a : array_like Denominator polynomial coefficients of the transformed high-pass filter. See Also -------- lp2lp, lp2bp, lp2bs, bilinear lp2hp_zpk Notes ----- This is derived from the s-plane substitution .. math:: s \rightarrow \frac{\omega_0}{s} This maintains symmetry of the lowpass and highpass responses on a logarithmic scale. Examples -------- >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> lp = signal.lti([1.0], [1.0, 1.0]) >>> hp = signal.lti(*signal.lp2hp(lp.num, lp.den)) >>> w, mag_lp, p_lp = lp.bode() >>> w, mag_hp, p_hp = hp.bode(w) >>> plt.plot(w, mag_lp, label='Lowpass') >>> plt.plot(w, mag_hp, label='Highpass') >>> plt.semilogx() >>> plt.grid(True) >>> plt.xlabel('Frequency [rad/s]') >>> plt.ylabel('Magnitude [dB]') >>> plt.legend() """ a, b = map(atleast_1d, (a, b)) try: wo = float(wo) except TypeError: wo = float(wo[0]) d = len(a) n = len(b) if wo != 1: pwo = pow(wo, numpy.arange(max((d, n)))) else: pwo = numpy.ones(max((d, n)), b.dtype.char) if d >= n: outa = a[::-1] * pwo outb = resize(b, (d,)) outb[n:] = 0.0 outb[:n] = b[::-1] * pwo[:n] else: outb = b[::-1] * pwo outa = resize(a, (n,)) outa[d:] = 0.0 outa[:d] = a[::-1] * pwo[:d] return normalize(outb, outa) def lp2bp(b, a, wo=1.0, bw=1.0): r""" Transform a lowpass filter prototype to a bandpass filter. Return an analog band-pass filter with center frequency `wo` and bandwidth `bw` from an analog low-pass filter prototype with unity cutoff frequency, in transfer function ('ba') representation. Parameters ---------- b : array_like Numerator polynomial coefficients. a : array_like Denominator polynomial coefficients. wo : float Desired passband center, as angular frequency (e.g., rad/s). Defaults to no change. bw : float Desired passband width, as angular frequency (e.g., rad/s). Defaults to 1. Returns ------- b : array_like Numerator polynomial coefficients of the transformed band-pass filter. a : array_like Denominator polynomial coefficients of the transformed band-pass filter. See Also -------- lp2lp, lp2hp, lp2bs, bilinear lp2bp_zpk Notes ----- This is derived from the s-plane substitution .. math:: s \rightarrow \frac{s^2 + {\omega_0}^2}{s \cdot \mathrm{BW}} This is the "wideband" transformation, producing a passband with geometric (log frequency) symmetry about `wo`. Examples -------- >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> lp = signal.lti([1.0], [1.0, 1.0]) >>> bp = signal.lti(*signal.lp2bp(lp.num, lp.den)) >>> w, mag_lp, p_lp = lp.bode() >>> w, mag_bp, p_bp = bp.bode(w) >>> plt.plot(w, mag_lp, label='Lowpass') >>> plt.plot(w, mag_bp, label='Bandpass') >>> plt.semilogx() >>> plt.grid(True) >>> plt.xlabel('Frequency [rad/s]') >>> plt.ylabel('Magnitude [dB]') >>> plt.legend() """ a, b = map(atleast_1d, (a, b)) D = len(a) - 1 N = len(b) - 1 artype = mintypecode((a, b)) ma = max([N, D]) Np = N + ma Dp = D + ma bprime = numpy.empty(Np + 1, artype) aprime = numpy.empty(Dp + 1, artype) wosq = wo * wo for j in range(Np + 1): val = 0.0 for i in range(0, N + 1): for k in range(0, i + 1): if ma - i + 2 * k == j: val += comb(i, k) * b[N - i] * (wosq) ** (i - k) / bw ** i bprime[Np - j] = val for j in range(Dp + 1): val = 0.0 for i in range(0, D + 1): for k in range(0, i + 1): if ma - i + 2 * k == j: val += comb(i, k) * a[D - i] * (wosq) ** (i - k) / bw ** i aprime[Dp - j] = val return normalize(bprime, aprime) def lp2bs(b, a, wo=1.0, bw=1.0): r""" Transform a lowpass filter prototype to a bandstop filter. Return an analog band-stop filter with center frequency `wo` and bandwidth `bw` from an analog low-pass filter prototype with unity cutoff frequency, in transfer function ('ba') representation. Parameters ---------- b : array_like Numerator polynomial coefficients. a : array_like Denominator polynomial coefficients. wo : float Desired stopband center, as angular frequency (e.g., rad/s). Defaults to no change. bw : float Desired stopband width, as angular frequency (e.g., rad/s). Defaults to 1. Returns ------- b : array_like Numerator polynomial coefficients of the transformed band-stop filter. a : array_like Denominator polynomial coefficients of the transformed band-stop filter. See Also -------- lp2lp, lp2hp, lp2bp, bilinear lp2bs_zpk Notes ----- This is derived from the s-plane substitution .. math:: s \rightarrow \frac{s \cdot \mathrm{BW}}{s^2 + {\omega_0}^2} This is the "wideband" transformation, producing a stopband with geometric (log frequency) symmetry about `wo`. Examples -------- >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> lp = signal.lti([1.0], [1.0, 1.5]) >>> bs = signal.lti(*signal.lp2bs(lp.num, lp.den)) >>> w, mag_lp, p_lp = lp.bode() >>> w, mag_bs, p_bs = bs.bode(w) >>> plt.plot(w, mag_lp, label='Lowpass') >>> plt.plot(w, mag_bs, label='Bandstop') >>> plt.semilogx() >>> plt.grid(True) >>> plt.xlabel('Frequency [rad/s]') >>> plt.ylabel('Magnitude [dB]') >>> plt.legend() """ a, b = map(atleast_1d, (a, b)) D = len(a) - 1 N = len(b) - 1 artype = mintypecode((a, b)) M = max([N, D]) Np = M + M Dp = M + M bprime = numpy.empty(Np + 1, artype) aprime = numpy.empty(Dp + 1, artype) wosq = wo * wo for j in range(Np + 1): val = 0.0 for i in range(0, N + 1): for k in range(0, M - i + 1): if i + 2 * k == j: val += (comb(M - i, k) * b[N - i] * (wosq) ** (M - i - k) * bw ** i) bprime[Np - j] = val for j in range(Dp + 1): val = 0.0 for i in range(0, D + 1): for k in range(0, M - i + 1): if i + 2 * k == j: val += (comb(M - i, k) * a[D - i] * (wosq) ** (M - i - k) * bw ** i) aprime[Dp - j] = val return normalize(bprime, aprime) def bilinear(b, a, fs=1.0): r""" Return a digital IIR filter from an analog one using a bilinear transform. Transform a set of poles and zeros from the analog s-plane to the digital z-plane using Tustin's method, which substitutes ``2*fs*(z-1) / (z+1)`` for ``s``, maintaining the shape of the frequency response. Parameters ---------- b : array_like Numerator of the analog filter transfer function. a : array_like Denominator of the analog filter transfer function. fs : float Sample rate, as ordinary frequency (e.g., hertz). No prewarping is done in this function. Returns ------- b : ndarray Numerator of the transformed digital filter transfer function. a : ndarray Denominator of the transformed digital filter transfer function. See Also -------- lp2lp, lp2hp, lp2bp, lp2bs bilinear_zpk Examples -------- >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> import numpy as np >>> fs = 100 >>> bf = 2 * np.pi * np.array([7, 13]) >>> filts = signal.lti(*signal.butter(4, bf, btype='bandpass', ... analog=True)) >>> filtz = signal.lti(*signal.bilinear(filts.num, filts.den, fs)) >>> wz, hz = signal.freqz(filtz.num, filtz.den) >>> ws, hs = signal.freqs(filts.num, filts.den, worN=fs*wz) >>> plt.semilogx(wz*fs/(2*np.pi), 20*np.log10(np.abs(hz).clip(1e-15)), ... label=r'$|H_z(e^{j \omega})|$') >>> plt.semilogx(wz*fs/(2*np.pi), 20*np.log10(np.abs(hs).clip(1e-15)), ... label=r'$|H(j \omega)|$') >>> plt.legend() >>> plt.xlabel('Frequency [Hz]') >>> plt.ylabel('Magnitude [dB]') >>> plt.grid(True) """ fs = float(fs) a, b = map(atleast_1d, (a, b)) D = len(a) - 1 N = len(b) - 1 artype = float M = max([N, D]) Np = M Dp = M bprime = numpy.empty(Np + 1, artype) aprime = numpy.empty(Dp + 1, artype) for j in range(Np + 1): val = 0.0 for i in range(N + 1): for k in range(i + 1): for l in range(M - i + 1): if k + l == j: val += (comb(i, k) * comb(M - i, l) * b[N - i] * pow(2 * fs, i) * (-1) ** k) bprime[j] = real(val) for j in range(Dp + 1): val = 0.0 for i in range(D + 1): for k in range(i + 1): for l in range(M - i + 1): if k + l == j: val += (comb(i, k) * comb(M - i, l) * a[D - i] * pow(2 * fs, i) * (-1) ** k) aprime[j] = real(val) return normalize(bprime, aprime) def _validate_gpass_gstop(gpass, gstop): if gpass <= 0.0: raise ValueError("gpass should be larger than 0.0") elif gstop <= 0.0: raise ValueError("gstop should be larger than 0.0") elif gpass > gstop: raise ValueError("gpass should be smaller than gstop") def iirdesign(wp, ws, gpass, gstop, analog=False, ftype='ellip', output='ba', fs=None): """Complete IIR digital and analog filter design. Given passband and stopband frequencies and gains, construct an analog or digital IIR filter of minimum order for a given basic type. Return the output in numerator, denominator ('ba'), pole-zero ('zpk') or second order sections ('sos') form. Parameters ---------- wp, ws : float or array like, shape (2,) Passband and stopband edge frequencies. Possible values are scalars (for lowpass and highpass filters) or ranges (for bandpass and bandstop filters). For digital filters, these are in the same units as `fs`. By default, `fs` is 2 half-cycles/sample, so these are normalized from 0 to 1, where 1 is the Nyquist frequency. For example: - Lowpass: wp = 0.2, ws = 0.3 - Highpass: wp = 0.3, ws = 0.2 - Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6] - Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5] For analog filters, `wp` and `ws` are angular frequencies (e.g., rad/s). Note, that for bandpass and bandstop filters passband must lie strictly inside stopband or vice versa. gpass : float The maximum loss in the passband (dB). gstop : float The minimum attenuation in the stopband (dB). analog : bool, optional When True, return an analog filter, otherwise a digital filter is returned. ftype : str, optional The type of IIR filter to design: - Butterworth : 'butter' - Chebyshev I : 'cheby1' - Chebyshev II : 'cheby2' - Cauer/elliptic: 'ellip' output : {'ba', 'zpk', 'sos'}, optional Filter form of the output: - second-order sections (recommended): 'sos' - numerator/denominator (default) : 'ba' - pole-zero : 'zpk' In general the second-order sections ('sos') form is recommended because inferring the coefficients for the numerator/denominator form ('ba') suffers from numerical instabilities. For reasons of backward compatibility the default form is the numerator/denominator form ('ba'), where the 'b' and the 'a' in 'ba' refer to the commonly used names of the coefficients used. Note: Using the second-order sections form ('sos') is sometimes associated with additional computational costs: for data-intense use cases it is therefore recommended to also investigate the numerator/denominator form ('ba'). fs : float, optional The sampling frequency of the digital system. .. versionadded:: 1.2.0 Returns ------- b, a : ndarray, ndarray Numerator (`b`) and denominator (`a`) polynomials of the IIR filter. Only returned if ``output='ba'``. z, p, k : ndarray, ndarray, float Zeros, poles, and system gain of the IIR filter transfer function. Only returned if ``output='zpk'``. sos : ndarray Second-order sections representation of the IIR filter. Only returned if ``output='sos'``. See Also -------- butter : Filter design using order and critical points cheby1, cheby2, ellip, bessel buttord : Find order and critical points from passband and stopband spec cheb1ord, cheb2ord, ellipord iirfilter : General filter design using order and critical frequencies Notes ----- The ``'sos'`` output parameter was added in 0.16.0. Examples -------- >>> import numpy as np >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> import matplotlib.ticker >>> wp = 0.2 >>> ws = 0.3 >>> gpass = 1 >>> gstop = 40 >>> system = signal.iirdesign(wp, ws, gpass, gstop) >>> w, h = signal.freqz(*system) >>> fig, ax1 = plt.subplots() >>> ax1.set_title('Digital filter frequency response') >>> ax1.plot(w, 20 * np.log10(abs(h)), 'b') >>> ax1.set_ylabel('Amplitude [dB]', color='b') >>> ax1.set_xlabel('Frequency [rad/sample]') >>> ax1.grid(True) >>> ax1.set_ylim([-120, 20]) >>> ax2 = ax1.twinx() >>> angles = np.unwrap(np.angle(h)) >>> ax2.plot(w, angles, 'g') >>> ax2.set_ylabel('Angle (radians)', color='g') >>> ax2.grid(True) >>> ax2.axis('tight') >>> ax2.set_ylim([-6, 1]) >>> nticks = 8 >>> ax1.yaxis.set_major_locator(matplotlib.ticker.LinearLocator(nticks)) >>> ax2.yaxis.set_major_locator(matplotlib.ticker.LinearLocator(nticks)) """ try: ordfunc = filter_dict[ftype][1] except KeyError as e: raise ValueError("Invalid IIR filter type: %s" % ftype) from e except IndexError as e: raise ValueError(("%s does not have order selection. Use " "iirfilter function.") % ftype) from e _validate_gpass_gstop(gpass, gstop) wp = atleast_1d(wp) ws = atleast_1d(ws) if wp.shape[0] != ws.shape[0] or wp.shape not in [(1,), (2,)]: raise ValueError("wp and ws must have one or two elements each, and" "the same shape, got %s and %s" % (wp.shape, ws.shape)) if any(wp <= 0) or any(ws <= 0): raise ValueError("Values for wp, ws must be greater than 0") if not analog: if fs is None: if any(wp >= 1) or any(ws >= 1): raise ValueError("Values for wp, ws must be less than 1") elif any(wp >= fs/2) or any(ws >= fs/2): raise ValueError("Values for wp, ws must be less than fs/2" " (fs={} -> fs/2={})".format(fs, fs/2)) if wp.shape[0] == 2: if not ((ws[0] < wp[0] and wp[1] < ws[1]) or (wp[0] < ws[0] and ws[1] < wp[1])): raise ValueError("Passband must lie strictly inside stopband" " or vice versa") band_type = 2 * (len(wp) - 1) band_type += 1 if wp[0] >= ws[0]: band_type += 1 btype = {1: 'lowpass', 2: 'highpass', 3: 'bandstop', 4: 'bandpass'}[band_type] N, Wn = ordfunc(wp, ws, gpass, gstop, analog=analog, fs=fs) return iirfilter(N, Wn, rp=gpass, rs=gstop, analog=analog, btype=btype, ftype=ftype, output=output, fs=fs) def iirfilter(N, Wn, rp=None, rs=None, btype='band', analog=False, ftype='butter', output='ba', fs=None): """ IIR digital and analog filter design given order and critical points. Design an Nth-order digital or analog filter and return the filter coefficients. Parameters ---------- N : int The order of the filter. Wn : array_like A scalar or length-2 sequence giving the critical frequencies. For digital filters, `Wn` are in the same units as `fs`. By default, `fs` is 2 half-cycles/sample, so these are normalized from 0 to 1, where 1 is the Nyquist frequency. (`Wn` is thus in half-cycles / sample.) For analog filters, `Wn` is an angular frequency (e.g., rad/s). When Wn is a length-2 sequence, ``Wn[0]`` must be less than ``Wn[1]``. rp : float, optional For Chebyshev and elliptic filters, provides the maximum ripple in the passband. (dB) rs : float, optional For Chebyshev and elliptic filters, provides the minimum attenuation in the stop band. (dB) btype : {'bandpass', 'lowpass', 'highpass', 'bandstop'}, optional The type of filter. Default is 'bandpass'. analog : bool, optional When True, return an analog filter, otherwise a digital filter is returned. ftype : str, optional The type of IIR filter to design: - Butterworth : 'butter' - Chebyshev I : 'cheby1' - Chebyshev II : 'cheby2' - Cauer/elliptic: 'ellip' - Bessel/Thomson: 'bessel' output : {'ba', 'zpk', 'sos'}, optional Filter form of the output: - second-order sections (recommended): 'sos' - numerator/denominator (default) : 'ba' - pole-zero : 'zpk' In general the second-order sections ('sos') form is recommended because inferring the coefficients for the numerator/denominator form ('ba') suffers from numerical instabilities. For reasons of backward compatibility the default form is the numerator/denominator form ('ba'), where the 'b' and the 'a' in 'ba' refer to the commonly used names of the coefficients used. Note: Using the second-order sections form ('sos') is sometimes associated with additional computational costs: for data-intense use cases it is therefore recommended to also investigate the numerator/denominator form ('ba'). fs : float, optional The sampling frequency of the digital system. .. versionadded:: 1.2.0 Returns ------- b, a : ndarray, ndarray Numerator (`b`) and denominator (`a`) polynomials of the IIR filter. Only returned if ``output='ba'``. z, p, k : ndarray, ndarray, float Zeros, poles, and system gain of the IIR filter transfer function. Only returned if ``output='zpk'``. sos : ndarray Second-order sections representation of the IIR filter. Only returned if ``output='sos'``. See Also -------- butter : Filter design using order and critical points cheby1, cheby2, ellip, bessel buttord : Find order and critical points from passband and stopband spec cheb1ord, cheb2ord, ellipord iirdesign : General filter design using passband and stopband spec Notes ----- The ``'sos'`` output parameter was added in 0.16.0. Examples -------- Generate a 17th-order Chebyshev II analog bandpass filter from 50 Hz to 200 Hz and plot the frequency response: >>> import numpy as np >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> b, a = signal.iirfilter(17, [2*np.pi*50, 2*np.pi*200], rs=60, ... btype='band', analog=True, ftype='cheby2') >>> w, h = signal.freqs(b, a, 1000) >>> fig = plt.figure() >>> ax = fig.add_subplot(1, 1, 1) >>> ax.semilogx(w / (2*np.pi), 20 * np.log10(np.maximum(abs(h), 1e-5))) >>> ax.set_title('Chebyshev Type II bandpass frequency response') >>> ax.set_xlabel('Frequency [Hz]') >>> ax.set_ylabel('Amplitude [dB]') >>> ax.axis((10, 1000, -100, 10)) >>> ax.grid(which='both', axis='both') >>> plt.show() Create a digital filter with the same properties, in a system with sampling rate of 2000 Hz, and plot the frequency response. (Second-order sections implementation is required to ensure stability of a filter of this order): >>> sos = signal.iirfilter(17, [50, 200], rs=60, btype='band', ... analog=False, ftype='cheby2', fs=2000, ... output='sos') >>> w, h = signal.sosfreqz(sos, 2000, fs=2000) >>> fig = plt.figure() >>> ax = fig.add_subplot(1, 1, 1) >>> ax.semilogx(w, 20 * np.log10(np.maximum(abs(h), 1e-5))) >>> ax.set_title('Chebyshev Type II bandpass frequency response') >>> ax.set_xlabel('Frequency [Hz]') >>> ax.set_ylabel('Amplitude [dB]') >>> ax.axis((10, 1000, -100, 10)) >>> ax.grid(which='both', axis='both') >>> plt.show() """ ftype, btype, output = (x.lower() for x in (ftype, btype, output)) Wn = asarray(Wn) if fs is not None: if analog: raise ValueError("fs cannot be specified for an analog filter") Wn = 2*Wn/fs if numpy.any(Wn <= 0): raise ValueError("filter critical frequencies must be greater than 0") if Wn.size > 1 and not Wn[0] < Wn[1]: raise ValueError("Wn[0] must be less than Wn[1]") try: btype = band_dict[btype] except KeyError as e: raise ValueError("'%s' is an invalid bandtype for filter." % btype) from e try: typefunc = filter_dict[ftype][0] except KeyError as e: raise ValueError("'%s' is not a valid basic IIR filter." % ftype) from e if output not in ['ba', 'zpk', 'sos']: raise ValueError("'%s' is not a valid output form." % output) if rp is not None and rp < 0: raise ValueError("passband ripple (rp) must be positive") if rs is not None and rs < 0: raise ValueError("stopband attenuation (rs) must be positive") # Get analog lowpass prototype if typefunc == buttap: z, p, k = typefunc(N) elif typefunc == besselap: z, p, k = typefunc(N, norm=bessel_norms[ftype]) elif typefunc == cheb1ap: if rp is None: raise ValueError("passband ripple (rp) must be provided to " "design a Chebyshev I filter.") z, p, k = typefunc(N, rp) elif typefunc == cheb2ap: if rs is None: raise ValueError("stopband attenuation (rs) must be provided to " "design an Chebyshev II filter.") z, p, k = typefunc(N, rs) elif typefunc == ellipap: if rs is None or rp is None: raise ValueError("Both rp and rs must be provided to design an " "elliptic filter.") z, p, k = typefunc(N, rp, rs) else: raise NotImplementedError("'%s' not implemented in iirfilter." % ftype) # Pre-warp frequencies for digital filter design if not analog: if numpy.any(Wn <= 0) or numpy.any(Wn >= 1): if fs is not None: raise ValueError("Digital filter critical frequencies must " f"be 0 < Wn < fs/2 (fs={fs} -> fs/2={fs/2})") raise ValueError("Digital filter critical frequencies " "must be 0 < Wn < 1") fs = 2.0 warped = 2 * fs * tan(pi * Wn / fs) else: warped = Wn # transform to lowpass, bandpass, highpass, or bandstop if btype in ('lowpass', 'highpass'): if numpy.size(Wn) != 1: raise ValueError('Must specify a single critical frequency Wn ' 'for lowpass or highpass filter') if btype == 'lowpass': z, p, k = lp2lp_zpk(z, p, k, wo=warped) elif btype == 'highpass': z, p, k = lp2hp_zpk(z, p, k, wo=warped) elif btype in ('bandpass', 'bandstop'): try: bw = warped[1] - warped[0] wo = sqrt(warped[0] * warped[1]) except IndexError as e: raise ValueError('Wn must specify start and stop frequencies for ' 'bandpass or bandstop filter') from e if btype == 'bandpass': z, p, k = lp2bp_zpk(z, p, k, wo=wo, bw=bw) elif btype == 'bandstop': z, p, k = lp2bs_zpk(z, p, k, wo=wo, bw=bw) else: raise NotImplementedError("'%s' not implemented in iirfilter." % btype) # Find discrete equivalent if necessary if not analog: z, p, k = bilinear_zpk(z, p, k, fs=fs) # Transform to proper out type (pole-zero, state-space, numer-denom) if output == 'zpk': return z, p, k elif output == 'ba': return zpk2tf(z, p, k) elif output == 'sos': return zpk2sos(z, p, k, analog=analog) def _relative_degree(z, p): """ Return relative degree of transfer function from zeros and poles """ degree = len(p) - len(z) if degree < 0: raise ValueError("Improper transfer function. " "Must have at least as many poles as zeros.") else: return degree def bilinear_zpk(z, p, k, fs): r""" Return a digital IIR filter from an analog one using a bilinear transform. Transform a set of poles and zeros from the analog s-plane to the digital z-plane using Tustin's method, which substitutes ``2*fs*(z-1) / (z+1)`` for ``s``, maintaining the shape of the frequency response. Parameters ---------- z : array_like Zeros of the analog filter transfer function. p : array_like Poles of the analog filter transfer function. k : float System gain of the analog filter transfer function. fs : float Sample rate, as ordinary frequency (e.g., hertz). No prewarping is done in this function. Returns ------- z : ndarray Zeros of the transformed digital filter transfer function. p : ndarray Poles of the transformed digital filter transfer function. k : float System gain of the transformed digital filter. See Also -------- lp2lp_zpk, lp2hp_zpk, lp2bp_zpk, lp2bs_zpk bilinear Notes ----- .. versionadded:: 1.1.0 Examples -------- >>> import numpy as np >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> fs = 100 >>> bf = 2 * np.pi * np.array([7, 13]) >>> filts = signal.lti(*signal.butter(4, bf, btype='bandpass', analog=True, ... output='zpk')) >>> filtz = signal.lti(*signal.bilinear_zpk(filts.zeros, filts.poles, ... filts.gain, fs)) >>> wz, hz = signal.freqz_zpk(filtz.zeros, filtz.poles, filtz.gain) >>> ws, hs = signal.freqs_zpk(filts.zeros, filts.poles, filts.gain, ... worN=fs*wz) >>> plt.semilogx(wz*fs/(2*np.pi), 20*np.log10(np.abs(hz).clip(1e-15)), ... label=r'$|H_z(e^{j \omega})|$') >>> plt.semilogx(wz*fs/(2*np.pi), 20*np.log10(np.abs(hs).clip(1e-15)), ... label=r'$|H(j \omega)|$') >>> plt.legend() >>> plt.xlabel('Frequency [Hz]') >>> plt.ylabel('Magnitude [dB]') >>> plt.grid(True) """ z = atleast_1d(z) p = atleast_1d(p) degree = _relative_degree(z, p) fs2 = 2.0*fs # Bilinear transform the poles and zeros z_z = (fs2 + z) / (fs2 - z) p_z = (fs2 + p) / (fs2 - p) # Any zeros that were at infinity get moved to the Nyquist frequency z_z = append(z_z, -ones(degree)) # Compensate for gain change k_z = k * real(prod(fs2 - z) / prod(fs2 - p)) return z_z, p_z, k_z def lp2lp_zpk(z, p, k, wo=1.0): r""" Transform a lowpass filter prototype to a different frequency. Return an analog low-pass filter with cutoff frequency `wo` from an analog low-pass filter prototype with unity cutoff frequency, using zeros, poles, and gain ('zpk') representation. Parameters ---------- z : array_like Zeros of the analog filter transfer function. p : array_like Poles of the analog filter transfer function. k : float System gain of the analog filter transfer function. wo : float Desired cutoff, as angular frequency (e.g., rad/s). Defaults to no change. Returns ------- z : ndarray Zeros of the transformed low-pass filter transfer function. p : ndarray Poles of the transformed low-pass filter transfer function. k : float System gain of the transformed low-pass filter. See Also -------- lp2hp_zpk, lp2bp_zpk, lp2bs_zpk, bilinear lp2lp Notes ----- This is derived from the s-plane substitution .. math:: s \rightarrow \frac{s}{\omega_0} .. versionadded:: 1.1.0 """ z = atleast_1d(z) p = atleast_1d(p) wo = float(wo) # Avoid int wraparound degree = _relative_degree(z, p) # Scale all points radially from origin to shift cutoff frequency z_lp = wo * z p_lp = wo * p # Each shifted pole decreases gain by wo, each shifted zero increases it. # Cancel out the net change to keep overall gain the same k_lp = k * wo**degree return z_lp, p_lp, k_lp def lp2hp_zpk(z, p, k, wo=1.0): r""" Transform a lowpass filter prototype to a highpass filter. Return an analog high-pass filter with cutoff frequency `wo` from an analog low-pass filter prototype with unity cutoff frequency, using zeros, poles, and gain ('zpk') representation. Parameters ---------- z : array_like Zeros of the analog filter transfer function. p : array_like Poles of the analog filter transfer function. k : float System gain of the analog filter transfer function. wo : float Desired cutoff, as angular frequency (e.g., rad/s). Defaults to no change. Returns ------- z : ndarray Zeros of the transformed high-pass filter transfer function. p : ndarray Poles of the transformed high-pass filter transfer function. k : float System gain of the transformed high-pass filter. See Also -------- lp2lp_zpk, lp2bp_zpk, lp2bs_zpk, bilinear lp2hp Notes ----- This is derived from the s-plane substitution .. math:: s \rightarrow \frac{\omega_0}{s} This maintains symmetry of the lowpass and highpass responses on a logarithmic scale. .. versionadded:: 1.1.0 """ z = atleast_1d(z) p = atleast_1d(p) wo = float(wo) degree = _relative_degree(z, p) # Invert positions radially about unit circle to convert LPF to HPF # Scale all points radially from origin to shift cutoff frequency z_hp = wo / z p_hp = wo / p # If lowpass had zeros at infinity, inverting moves them to origin. z_hp = append(z_hp, zeros(degree)) # Cancel out gain change caused by inversion k_hp = k * real(prod(-z) / prod(-p)) return z_hp, p_hp, k_hp def lp2bp_zpk(z, p, k, wo=1.0, bw=1.0): r""" Transform a lowpass filter prototype to a bandpass filter. Return an analog band-pass filter with center frequency `wo` and bandwidth `bw` from an analog low-pass filter prototype with unity cutoff frequency, using zeros, poles, and gain ('zpk') representation. Parameters ---------- z : array_like Zeros of the analog filter transfer function. p : array_like Poles of the analog filter transfer function. k : float System gain of the analog filter transfer function. wo : float Desired passband center, as angular frequency (e.g., rad/s). Defaults to no change. bw : float Desired passband width, as angular frequency (e.g., rad/s). Defaults to 1. Returns ------- z : ndarray Zeros of the transformed band-pass filter transfer function. p : ndarray Poles of the transformed band-pass filter transfer function. k : float System gain of the transformed band-pass filter. See Also -------- lp2lp_zpk, lp2hp_zpk, lp2bs_zpk, bilinear lp2bp Notes ----- This is derived from the s-plane substitution .. math:: s \rightarrow \frac{s^2 + {\omega_0}^2}{s \cdot \mathrm{BW}} This is the "wideband" transformation, producing a passband with geometric (log frequency) symmetry about `wo`. .. versionadded:: 1.1.0 """ z = atleast_1d(z) p = atleast_1d(p) wo = float(wo) bw = float(bw) degree = _relative_degree(z, p) # Scale poles and zeros to desired bandwidth z_lp = z * bw/2 p_lp = p * bw/2 # Square root needs to produce complex result, not NaN z_lp = z_lp.astype(complex) p_lp = p_lp.astype(complex) # Duplicate poles and zeros and shift from baseband to +wo and -wo z_bp = concatenate((z_lp + sqrt(z_lp**2 - wo**2), z_lp - sqrt(z_lp**2 - wo**2))) p_bp = concatenate((p_lp + sqrt(p_lp**2 - wo**2), p_lp - sqrt(p_lp**2 - wo**2))) # Move degree zeros to origin, leaving degree zeros at infinity for BPF z_bp = append(z_bp, zeros(degree)) # Cancel out gain change from frequency scaling k_bp = k * bw**degree return z_bp, p_bp, k_bp def lp2bs_zpk(z, p, k, wo=1.0, bw=1.0): r""" Transform a lowpass filter prototype to a bandstop filter. Return an analog band-stop filter with center frequency `wo` and stopband width `bw` from an analog low-pass filter prototype with unity cutoff frequency, using zeros, poles, and gain ('zpk') representation. Parameters ---------- z : array_like Zeros of the analog filter transfer function. p : array_like Poles of the analog filter transfer function. k : float System gain of the analog filter transfer function. wo : float Desired stopband center, as angular frequency (e.g., rad/s). Defaults to no change. bw : float Desired stopband width, as angular frequency (e.g., rad/s). Defaults to 1. Returns ------- z : ndarray Zeros of the transformed band-stop filter transfer function. p : ndarray Poles of the transformed band-stop filter transfer function. k : float System gain of the transformed band-stop filter. See Also -------- lp2lp_zpk, lp2hp_zpk, lp2bp_zpk, bilinear lp2bs Notes ----- This is derived from the s-plane substitution .. math:: s \rightarrow \frac{s \cdot \mathrm{BW}}{s^2 + {\omega_0}^2} This is the "wideband" transformation, producing a stopband with geometric (log frequency) symmetry about `wo`. .. versionadded:: 1.1.0 """ z = atleast_1d(z) p = atleast_1d(p) wo = float(wo) bw = float(bw) degree = _relative_degree(z, p) # Invert to a highpass filter with desired bandwidth z_hp = (bw/2) / z p_hp = (bw/2) / p # Square root needs to produce complex result, not NaN z_hp = z_hp.astype(complex) p_hp = p_hp.astype(complex) # Duplicate poles and zeros and shift from baseband to +wo and -wo z_bs = concatenate((z_hp + sqrt(z_hp**2 - wo**2), z_hp - sqrt(z_hp**2 - wo**2))) p_bs = concatenate((p_hp + sqrt(p_hp**2 - wo**2), p_hp - sqrt(p_hp**2 - wo**2))) # Move any zeros that were at infinity to the center of the stopband z_bs = append(z_bs, full(degree, +1j*wo)) z_bs = append(z_bs, full(degree, -1j*wo)) # Cancel out gain change caused by inversion k_bs = k * real(prod(-z) / prod(-p)) return z_bs, p_bs, k_bs def butter(N, Wn, btype='low', analog=False, output='ba', fs=None): """ Butterworth digital and analog filter design. Design an Nth-order digital or analog Butterworth filter and return the filter coefficients. Parameters ---------- N : int The order of the filter. For 'bandpass' and 'bandstop' filters, the resulting order of the final second-order sections ('sos') matrix is ``2*N``, with `N` the number of biquad sections of the desired system. Wn : array_like The critical frequency or frequencies. For lowpass and highpass filters, Wn is a scalar; for bandpass and bandstop filters, Wn is a length-2 sequence. For a Butterworth filter, this is the point at which the gain drops to 1/sqrt(2) that of the passband (the "-3 dB point"). For digital filters, if `fs` is not specified, `Wn` units are normalized from 0 to 1, where 1 is the Nyquist frequency (`Wn` is thus in half cycles / sample and defined as 2*critical frequencies / `fs`). If `fs` is specified, `Wn` is in the same units as `fs`. For analog filters, `Wn` is an angular frequency (e.g. rad/s). btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional The type of filter. Default is 'lowpass'. analog : bool, optional When True, return an analog filter, otherwise a digital filter is returned. output : {'ba', 'zpk', 'sos'}, optional Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or second-order sections ('sos'). Default is 'ba' for backwards compatibility, but 'sos' should be used for general-purpose filtering. fs : float, optional The sampling frequency of the digital system. .. versionadded:: 1.2.0 Returns ------- b, a : ndarray, ndarray Numerator (`b`) and denominator (`a`) polynomials of the IIR filter. Only returned if ``output='ba'``. z, p, k : ndarray, ndarray, float Zeros, poles, and system gain of the IIR filter transfer function. Only returned if ``output='zpk'``. sos : ndarray Second-order sections representation of the IIR filter. Only returned if ``output='sos'``. See Also -------- buttord, buttap Notes ----- The Butterworth filter has maximally flat frequency response in the passband. The ``'sos'`` output parameter was added in 0.16.0. If the transfer function form ``[b, a]`` is requested, numerical problems can occur since the conversion between roots and the polynomial coefficients is a numerically sensitive operation, even for N >= 4. It is recommended to work with the SOS representation. .. warning:: Designing high-order and narrowband IIR filters in TF form can result in unstable or incorrect filtering due to floating point numerical precision issues. Consider inspecting output filter characteristics `freqz` or designing the filters with second-order sections via ``output='sos'``. Examples -------- Design an analog filter and plot its frequency response, showing the critical points: >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> import numpy as np >>> b, a = signal.butter(4, 100, 'low', analog=True) >>> w, h = signal.freqs(b, a) >>> plt.semilogx(w, 20 * np.log10(abs(h))) >>> plt.title('Butterworth filter frequency response') >>> plt.xlabel('Frequency [radians / second]') >>> plt.ylabel('Amplitude [dB]') >>> plt.margins(0, 0.1) >>> plt.grid(which='both', axis='both') >>> plt.axvline(100, color='green') # cutoff frequency >>> plt.show() Generate a signal made up of 10 Hz and 20 Hz, sampled at 1 kHz >>> t = np.linspace(0, 1, 1000, False) # 1 second >>> sig = np.sin(2*np.pi*10*t) + np.sin(2*np.pi*20*t) >>> fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True) >>> ax1.plot(t, sig) >>> ax1.set_title('10 Hz and 20 Hz sinusoids') >>> ax1.axis([0, 1, -2, 2]) Design a digital high-pass filter at 15 Hz to remove the 10 Hz tone, and apply it to the signal. (It's recommended to use second-order sections format when filtering, to avoid numerical error with transfer function (``ba``) format): >>> sos = signal.butter(10, 15, 'hp', fs=1000, output='sos') >>> filtered = signal.sosfilt(sos, sig) >>> ax2.plot(t, filtered) >>> ax2.set_title('After 15 Hz high-pass filter') >>> ax2.axis([0, 1, -2, 2]) >>> ax2.set_xlabel('Time [seconds]') >>> plt.tight_layout() >>> plt.show() """ return iirfilter(N, Wn, btype=btype, analog=analog, output=output, ftype='butter', fs=fs) def cheby1(N, rp, Wn, btype='low', analog=False, output='ba', fs=None): """ Chebyshev type I digital and analog filter design. Design an Nth-order digital or analog Chebyshev type I filter and return the filter coefficients. Parameters ---------- N : int The order of the filter. rp : float The maximum ripple allowed below unity gain in the passband. Specified in decibels, as a positive number. Wn : array_like A scalar or length-2 sequence giving the critical frequencies. For Type I filters, this is the point in the transition band at which the gain first drops below -`rp`. For digital filters, `Wn` are in the same units as `fs`. By default, `fs` is 2 half-cycles/sample, so these are normalized from 0 to 1, where 1 is the Nyquist frequency. (`Wn` is thus in half-cycles / sample.) For analog filters, `Wn` is an angular frequency (e.g., rad/s). btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional The type of filter. Default is 'lowpass'. analog : bool, optional When True, return an analog filter, otherwise a digital filter is returned. output : {'ba', 'zpk', 'sos'}, optional Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or second-order sections ('sos'). Default is 'ba' for backwards compatibility, but 'sos' should be used for general-purpose filtering. fs : float, optional The sampling frequency of the digital system. .. versionadded:: 1.2.0 Returns ------- b, a : ndarray, ndarray Numerator (`b`) and denominator (`a`) polynomials of the IIR filter. Only returned if ``output='ba'``. z, p, k : ndarray, ndarray, float Zeros, poles, and system gain of the IIR filter transfer function. Only returned if ``output='zpk'``. sos : ndarray Second-order sections representation of the IIR filter. Only returned if ``output='sos'``. See Also -------- cheb1ord, cheb1ap Notes ----- The Chebyshev type I filter maximizes the rate of cutoff between the frequency response's passband and stopband, at the expense of ripple in the passband and increased ringing in the step response. Type I filters roll off faster than Type II (`cheby2`), but Type II filters do not have any ripple in the passband. The equiripple passband has N maxima or minima (for example, a 5th-order filter has 3 maxima and 2 minima). Consequently, the DC gain is unity for odd-order filters, or -rp dB for even-order filters. The ``'sos'`` output parameter was added in 0.16.0. Examples -------- Design an analog filter and plot its frequency response, showing the critical points: >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> import numpy as np >>> b, a = signal.cheby1(4, 5, 100, 'low', analog=True) >>> w, h = signal.freqs(b, a) >>> plt.semilogx(w, 20 * np.log10(abs(h))) >>> plt.title('Chebyshev Type I frequency response (rp=5)') >>> plt.xlabel('Frequency [radians / second]') >>> plt.ylabel('Amplitude [dB]') >>> plt.margins(0, 0.1) >>> plt.grid(which='both', axis='both') >>> plt.axvline(100, color='green') # cutoff frequency >>> plt.axhline(-5, color='green') # rp >>> plt.show() Generate a signal made up of 10 Hz and 20 Hz, sampled at 1 kHz >>> t = np.linspace(0, 1, 1000, False) # 1 second >>> sig = np.sin(2*np.pi*10*t) + np.sin(2*np.pi*20*t) >>> fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True) >>> ax1.plot(t, sig) >>> ax1.set_title('10 Hz and 20 Hz sinusoids') >>> ax1.axis([0, 1, -2, 2]) Design a digital high-pass filter at 15 Hz to remove the 10 Hz tone, and apply it to the signal. (It's recommended to use second-order sections format when filtering, to avoid numerical error with transfer function (``ba``) format): >>> sos = signal.cheby1(10, 1, 15, 'hp', fs=1000, output='sos') >>> filtered = signal.sosfilt(sos, sig) >>> ax2.plot(t, filtered) >>> ax2.set_title('After 15 Hz high-pass filter') >>> ax2.axis([0, 1, -2, 2]) >>> ax2.set_xlabel('Time [seconds]') >>> plt.tight_layout() >>> plt.show() """ return iirfilter(N, Wn, rp=rp, btype=btype, analog=analog, output=output, ftype='cheby1', fs=fs) def cheby2(N, rs, Wn, btype='low', analog=False, output='ba', fs=None): """ Chebyshev type II digital and analog filter design. Design an Nth-order digital or analog Chebyshev type II filter and return the filter coefficients. Parameters ---------- N : int The order of the filter. rs : float The minimum attenuation required in the stop band. Specified in decibels, as a positive number. Wn : array_like A scalar or length-2 sequence giving the critical frequencies. For Type II filters, this is the point in the transition band at which the gain first reaches -`rs`. For digital filters, `Wn` are in the same units as `fs`. By default, `fs` is 2 half-cycles/sample, so these are normalized from 0 to 1, where 1 is the Nyquist frequency. (`Wn` is thus in half-cycles / sample.) For analog filters, `Wn` is an angular frequency (e.g., rad/s). btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional The type of filter. Default is 'lowpass'. analog : bool, optional When True, return an analog filter, otherwise a digital filter is returned. output : {'ba', 'zpk', 'sos'}, optional Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or second-order sections ('sos'). Default is 'ba' for backwards compatibility, but 'sos' should be used for general-purpose filtering. fs : float, optional The sampling frequency of the digital system. .. versionadded:: 1.2.0 Returns ------- b, a : ndarray, ndarray Numerator (`b`) and denominator (`a`) polynomials of the IIR filter. Only returned if ``output='ba'``. z, p, k : ndarray, ndarray, float Zeros, poles, and system gain of the IIR filter transfer function. Only returned if ``output='zpk'``. sos : ndarray Second-order sections representation of the IIR filter. Only returned if ``output='sos'``. See Also -------- cheb2ord, cheb2ap Notes ----- The Chebyshev type II filter maximizes the rate of cutoff between the frequency response's passband and stopband, at the expense of ripple in the stopband and increased ringing in the step response. Type II filters do not roll off as fast as Type I (`cheby1`). The ``'sos'`` output parameter was added in 0.16.0. Examples -------- Design an analog filter and plot its frequency response, showing the critical points: >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> import numpy as np >>> b, a = signal.cheby2(4, 40, 100, 'low', analog=True) >>> w, h = signal.freqs(b, a) >>> plt.semilogx(w, 20 * np.log10(abs(h))) >>> plt.title('Chebyshev Type II frequency response (rs=40)') >>> plt.xlabel('Frequency [radians / second]') >>> plt.ylabel('Amplitude [dB]') >>> plt.margins(0, 0.1) >>> plt.grid(which='both', axis='both') >>> plt.axvline(100, color='green') # cutoff frequency >>> plt.axhline(-40, color='green') # rs >>> plt.show() Generate a signal made up of 10 Hz and 20 Hz, sampled at 1 kHz >>> t = np.linspace(0, 1, 1000, False) # 1 second >>> sig = np.sin(2*np.pi*10*t) + np.sin(2*np.pi*20*t) >>> fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True) >>> ax1.plot(t, sig) >>> ax1.set_title('10 Hz and 20 Hz sinusoids') >>> ax1.axis([0, 1, -2, 2]) Design a digital high-pass filter at 17 Hz to remove the 10 Hz tone, and apply it to the signal. (It's recommended to use second-order sections format when filtering, to avoid numerical error with transfer function (``ba``) format): >>> sos = signal.cheby2(12, 20, 17, 'hp', fs=1000, output='sos') >>> filtered = signal.sosfilt(sos, sig) >>> ax2.plot(t, filtered) >>> ax2.set_title('After 17 Hz high-pass filter') >>> ax2.axis([0, 1, -2, 2]) >>> ax2.set_xlabel('Time [seconds]') >>> plt.show() """ return iirfilter(N, Wn, rs=rs, btype=btype, analog=analog, output=output, ftype='cheby2', fs=fs) def ellip(N, rp, rs, Wn, btype='low', analog=False, output='ba', fs=None): """ Elliptic (Cauer) digital and analog filter design. Design an Nth-order digital or analog elliptic filter and return the filter coefficients. Parameters ---------- N : int The order of the filter. rp : float The maximum ripple allowed below unity gain in the passband. Specified in decibels, as a positive number. rs : float The minimum attenuation required in the stop band. Specified in decibels, as a positive number. Wn : array_like A scalar or length-2 sequence giving the critical frequencies. For elliptic filters, this is the point in the transition band at which the gain first drops below -`rp`. For digital filters, `Wn` are in the same units as `fs`. By default, `fs` is 2 half-cycles/sample, so these are normalized from 0 to 1, where 1 is the Nyquist frequency. (`Wn` is thus in half-cycles / sample.) For analog filters, `Wn` is an angular frequency (e.g., rad/s). btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional The type of filter. Default is 'lowpass'. analog : bool, optional When True, return an analog filter, otherwise a digital filter is returned. output : {'ba', 'zpk', 'sos'}, optional Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or second-order sections ('sos'). Default is 'ba' for backwards compatibility, but 'sos' should be used for general-purpose filtering. fs : float, optional The sampling frequency of the digital system. .. versionadded:: 1.2.0 Returns ------- b, a : ndarray, ndarray Numerator (`b`) and denominator (`a`) polynomials of the IIR filter. Only returned if ``output='ba'``. z, p, k : ndarray, ndarray, float Zeros, poles, and system gain of the IIR filter transfer function. Only returned if ``output='zpk'``. sos : ndarray Second-order sections representation of the IIR filter. Only returned if ``output='sos'``. See Also -------- ellipord, ellipap Notes ----- Also known as Cauer or Zolotarev filters, the elliptical filter maximizes the rate of transition between the frequency response's passband and stopband, at the expense of ripple in both, and increased ringing in the step response. As `rp` approaches 0, the elliptical filter becomes a Chebyshev type II filter (`cheby2`). As `rs` approaches 0, it becomes a Chebyshev type I filter (`cheby1`). As both approach 0, it becomes a Butterworth filter (`butter`). The equiripple passband has N maxima or minima (for example, a 5th-order filter has 3 maxima and 2 minima). Consequently, the DC gain is unity for odd-order filters, or -rp dB for even-order filters. The ``'sos'`` output parameter was added in 0.16.0. Examples -------- Design an analog filter and plot its frequency response, showing the critical points: >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> import numpy as np >>> b, a = signal.ellip(4, 5, 40, 100, 'low', analog=True) >>> w, h = signal.freqs(b, a) >>> plt.semilogx(w, 20 * np.log10(abs(h))) >>> plt.title('Elliptic filter frequency response (rp=5, rs=40)') >>> plt.xlabel('Frequency [radians / second]') >>> plt.ylabel('Amplitude [dB]') >>> plt.margins(0, 0.1) >>> plt.grid(which='both', axis='both') >>> plt.axvline(100, color='green') # cutoff frequency >>> plt.axhline(-40, color='green') # rs >>> plt.axhline(-5, color='green') # rp >>> plt.show() Generate a signal made up of 10 Hz and 20 Hz, sampled at 1 kHz >>> t = np.linspace(0, 1, 1000, False) # 1 second >>> sig = np.sin(2*np.pi*10*t) + np.sin(2*np.pi*20*t) >>> fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True) >>> ax1.plot(t, sig) >>> ax1.set_title('10 Hz and 20 Hz sinusoids') >>> ax1.axis([0, 1, -2, 2]) Design a digital high-pass filter at 17 Hz to remove the 10 Hz tone, and apply it to the signal. (It's recommended to use second-order sections format when filtering, to avoid numerical error with transfer function (``ba``) format): >>> sos = signal.ellip(8, 1, 100, 17, 'hp', fs=1000, output='sos') >>> filtered = signal.sosfilt(sos, sig) >>> ax2.plot(t, filtered) >>> ax2.set_title('After 17 Hz high-pass filter') >>> ax2.axis([0, 1, -2, 2]) >>> ax2.set_xlabel('Time [seconds]') >>> plt.tight_layout() >>> plt.show() """ return iirfilter(N, Wn, rs=rs, rp=rp, btype=btype, analog=analog, output=output, ftype='elliptic', fs=fs) def bessel(N, Wn, btype='low', analog=False, output='ba', norm='phase', fs=None): """ Bessel/Thomson digital and analog filter design. Design an Nth-order digital or analog Bessel filter and return the filter coefficients. Parameters ---------- N : int The order of the filter. Wn : array_like A scalar or length-2 sequence giving the critical frequencies (defined by the `norm` parameter). For analog filters, `Wn` is an angular frequency (e.g., rad/s). For digital filters, `Wn` are in the same units as `fs`. By default, `fs` is 2 half-cycles/sample, so these are normalized from 0 to 1, where 1 is the Nyquist frequency. (`Wn` is thus in half-cycles / sample.) btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional The type of filter. Default is 'lowpass'. analog : bool, optional When True, return an analog filter, otherwise a digital filter is returned. (See Notes.) output : {'ba', 'zpk', 'sos'}, optional Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or second-order sections ('sos'). Default is 'ba'. norm : {'phase', 'delay', 'mag'}, optional Critical frequency normalization: ``phase`` The filter is normalized such that the phase response reaches its midpoint at angular (e.g. rad/s) frequency `Wn`. This happens for both low-pass and high-pass filters, so this is the "phase-matched" case. The magnitude response asymptotes are the same as a Butterworth filter of the same order with a cutoff of `Wn`. This is the default, and matches MATLAB's implementation. ``delay`` The filter is normalized such that the group delay in the passband is 1/`Wn` (e.g., seconds). This is the "natural" type obtained by solving Bessel polynomials. ``mag`` The filter is normalized such that the gain magnitude is -3 dB at angular frequency `Wn`. .. versionadded:: 0.18.0 fs : float, optional The sampling frequency of the digital system. .. versionadded:: 1.2.0 Returns ------- b, a : ndarray, ndarray Numerator (`b`) and denominator (`a`) polynomials of the IIR filter. Only returned if ``output='ba'``. z, p, k : ndarray, ndarray, float Zeros, poles, and system gain of the IIR filter transfer function. Only returned if ``output='zpk'``. sos : ndarray Second-order sections representation of the IIR filter. Only returned if ``output='sos'``. Notes ----- Also known as a Thomson filter, the analog Bessel filter has maximally flat group delay and maximally linear phase response, with very little ringing in the step response. [1]_ The Bessel is inherently an analog filter. This function generates digital Bessel filters using the bilinear transform, which does not preserve the phase response of the analog filter. As such, it is only approximately correct at frequencies below about fs/4. To get maximally-flat group delay at higher frequencies, the analog Bessel filter must be transformed using phase-preserving techniques. See `besselap` for implementation details and references. The ``'sos'`` output parameter was added in 0.16.0. References ---------- .. [1] Thomson, W.E., "Delay Networks having Maximally Flat Frequency Characteristics", Proceedings of the Institution of Electrical Engineers, Part III, November 1949, Vol. 96, No. 44, pp. 487-490. Examples -------- Plot the phase-normalized frequency response, showing the relationship to the Butterworth's cutoff frequency (green): >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> import numpy as np >>> b, a = signal.butter(4, 100, 'low', analog=True) >>> w, h = signal.freqs(b, a) >>> plt.semilogx(w, 20 * np.log10(np.abs(h)), color='silver', ls='dashed') >>> b, a = signal.bessel(4, 100, 'low', analog=True, norm='phase') >>> w, h = signal.freqs(b, a) >>> plt.semilogx(w, 20 * np.log10(np.abs(h))) >>> plt.title('Bessel filter magnitude response (with Butterworth)') >>> plt.xlabel('Frequency [radians / second]') >>> plt.ylabel('Amplitude [dB]') >>> plt.margins(0, 0.1) >>> plt.grid(which='both', axis='both') >>> plt.axvline(100, color='green') # cutoff frequency >>> plt.show() and the phase midpoint: >>> plt.figure() >>> plt.semilogx(w, np.unwrap(np.angle(h))) >>> plt.axvline(100, color='green') # cutoff frequency >>> plt.axhline(-np.pi, color='red') # phase midpoint >>> plt.title('Bessel filter phase response') >>> plt.xlabel('Frequency [radians / second]') >>> plt.ylabel('Phase [radians]') >>> plt.margins(0, 0.1) >>> plt.grid(which='both', axis='both') >>> plt.show() Plot the magnitude-normalized frequency response, showing the -3 dB cutoff: >>> b, a = signal.bessel(3, 10, 'low', analog=True, norm='mag') >>> w, h = signal.freqs(b, a) >>> plt.semilogx(w, 20 * np.log10(np.abs(h))) >>> plt.axhline(-3, color='red') # -3 dB magnitude >>> plt.axvline(10, color='green') # cutoff frequency >>> plt.title('Magnitude-normalized Bessel filter frequency response') >>> plt.xlabel('Frequency [radians / second]') >>> plt.ylabel('Amplitude [dB]') >>> plt.margins(0, 0.1) >>> plt.grid(which='both', axis='both') >>> plt.show() Plot the delay-normalized filter, showing the maximally-flat group delay at 0.1 seconds: >>> b, a = signal.bessel(5, 1/0.1, 'low', analog=True, norm='delay') >>> w, h = signal.freqs(b, a) >>> plt.figure() >>> plt.semilogx(w[1:], -np.diff(np.unwrap(np.angle(h)))/np.diff(w)) >>> plt.axhline(0.1, color='red') # 0.1 seconds group delay >>> plt.title('Bessel filter group delay') >>> plt.xlabel('Frequency [radians / second]') >>> plt.ylabel('Group delay [seconds]') >>> plt.margins(0, 0.1) >>> plt.grid(which='both', axis='both') >>> plt.show() """ return iirfilter(N, Wn, btype=btype, analog=analog, output=output, ftype='bessel_'+norm, fs=fs) def maxflat(): pass def yulewalk(): pass def band_stop_obj(wp, ind, passb, stopb, gpass, gstop, type): """ Band Stop Objective Function for order minimization. Returns the non-integer order for an analog band stop filter. Parameters ---------- wp : scalar Edge of passband `passb`. ind : int, {0, 1} Index specifying which `passb` edge to vary (0 or 1). passb : ndarray Two element sequence of fixed passband edges. stopb : ndarray Two element sequence of fixed stopband edges. gstop : float Amount of attenuation in stopband in dB. gpass : float Amount of ripple in the passband in dB. type : {'butter', 'cheby', 'ellip'} Type of filter. Returns ------- n : scalar Filter order (possibly non-integer). """ _validate_gpass_gstop(gpass, gstop) passbC = passb.copy() passbC[ind] = wp nat = (stopb * (passbC[0] - passbC[1]) / (stopb ** 2 - passbC[0] * passbC[1])) nat = min(abs(nat)) if type == 'butter': GSTOP = 10 ** (0.1 * abs(gstop)) GPASS = 10 ** (0.1 * abs(gpass)) n = (log10((GSTOP - 1.0) / (GPASS - 1.0)) / (2 * log10(nat))) elif type == 'cheby': GSTOP = 10 ** (0.1 * abs(gstop)) GPASS = 10 ** (0.1 * abs(gpass)) n = arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) / arccosh(nat) elif type == 'ellip': GSTOP = 10 ** (0.1 * gstop) GPASS = 10 ** (0.1 * gpass) arg1 = sqrt((GPASS - 1.0) / (GSTOP - 1.0)) arg0 = 1.0 / nat d0 = special.ellipk([arg0 ** 2, 1 - arg0 ** 2]) d1 = special.ellipk([arg1 ** 2, 1 - arg1 ** 2]) n = (d0[0] * d1[1] / (d0[1] * d1[0])) else: raise ValueError("Incorrect type: %s" % type) return n def _pre_warp(wp, ws, analog): # Pre-warp frequencies for digital filter design if not analog: passb = np.tan(pi * wp / 2.0) stopb = np.tan(pi * ws / 2.0) else: passb = wp * 1.0 stopb = ws * 1.0 return passb, stopb def _validate_wp_ws(wp, ws, fs, analog): wp = atleast_1d(wp) ws = atleast_1d(ws) if fs is not None: if analog: raise ValueError("fs cannot be specified for an analog filter") wp = 2 * wp / fs ws = 2 * ws / fs filter_type = 2 * (len(wp) - 1) + 1 if wp[0] >= ws[0]: filter_type += 1 return wp, ws, filter_type def _find_nat_freq(stopb, passb, gpass, gstop, filter_type, filter_kind): if filter_type == 1: # low nat = stopb / passb elif filter_type == 2: # high nat = passb / stopb elif filter_type == 3: # stop ### breakpoint() wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12, args=(0, passb, stopb, gpass, gstop, filter_kind), disp=0) passb[0] = wp0 wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1], args=(1, passb, stopb, gpass, gstop, filter_kind), disp=0) passb[1] = wp1 nat = ((stopb * (passb[0] - passb[1])) / (stopb ** 2 - passb[0] * passb[1])) elif filter_type == 4: # pass nat = ((stopb ** 2 - passb[0] * passb[1]) / (stopb * (passb[0] - passb[1]))) else: raise ValueError(f"should not happen: {filter_type =}.") nat = min(abs(nat)) return nat, passb def _postprocess_wn(WN, analog, fs): wn = WN if analog else np.arctan(WN) * 2.0 / pi if len(wn) == 1: wn = wn[0] if fs is not None: wn = wn * fs / 2 return wn def buttord(wp, ws, gpass, gstop, analog=False, fs=None): """Butterworth filter order selection. Return the order of the lowest order digital or analog Butterworth filter that loses no more than `gpass` dB in the passband and has at least `gstop` dB attenuation in the stopband. Parameters ---------- wp, ws : float Passband and stopband edge frequencies. For digital filters, these are in the same units as `fs`. By default, `fs` is 2 half-cycles/sample, so these are normalized from 0 to 1, where 1 is the Nyquist frequency. (`wp` and `ws` are thus in half-cycles / sample.) For example: - Lowpass: wp = 0.2, ws = 0.3 - Highpass: wp = 0.3, ws = 0.2 - Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6] - Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5] For analog filters, `wp` and `ws` are angular frequencies (e.g., rad/s). gpass : float The maximum loss in the passband (dB). gstop : float The minimum attenuation in the stopband (dB). analog : bool, optional When True, return an analog filter, otherwise a digital filter is returned. fs : float, optional The sampling frequency of the digital system. .. versionadded:: 1.2.0 Returns ------- ord : int The lowest order for a Butterworth filter which meets specs. wn : ndarray or float The Butterworth natural frequency (i.e. the "3dB frequency"). Should be used with `butter` to give filter results. If `fs` is specified, this is in the same units, and `fs` must also be passed to `butter`. See Also -------- butter : Filter design using order and critical points cheb1ord : Find order and critical points from passband and stopband spec cheb2ord, ellipord iirfilter : General filter design using order and critical frequencies iirdesign : General filter design using passband and stopband spec Examples -------- Design an analog bandpass filter with passband within 3 dB from 20 to 50 rad/s, while rejecting at least -40 dB below 14 and above 60 rad/s. Plot its frequency response, showing the passband and stopband constraints in gray. >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> import numpy as np >>> N, Wn = signal.buttord([20, 50], [14, 60], 3, 40, True) >>> b, a = signal.butter(N, Wn, 'band', True) >>> w, h = signal.freqs(b, a, np.logspace(1, 2, 500)) >>> plt.semilogx(w, 20 * np.log10(abs(h))) >>> plt.title('Butterworth bandpass filter fit to constraints') >>> plt.xlabel('Frequency [radians / second]') >>> plt.ylabel('Amplitude [dB]') >>> plt.grid(which='both', axis='both') >>> plt.fill([1, 14, 14, 1], [-40, -40, 99, 99], '0.9', lw=0) # stop >>> plt.fill([20, 20, 50, 50], [-99, -3, -3, -99], '0.9', lw=0) # pass >>> plt.fill([60, 60, 1e9, 1e9], [99, -40, -40, 99], '0.9', lw=0) # stop >>> plt.axis([10, 100, -60, 3]) >>> plt.show() """ _validate_gpass_gstop(gpass, gstop) wp, ws, filter_type = _validate_wp_ws(wp, ws, fs, analog) passb, stopb = _pre_warp(wp, ws, analog) nat, passb = _find_nat_freq(stopb, passb, gpass, gstop, filter_type, 'butter') GSTOP = 10 ** (0.1 * abs(gstop)) GPASS = 10 ** (0.1 * abs(gpass)) ord = int(ceil(log10((GSTOP - 1.0) / (GPASS - 1.0)) / (2 * log10(nat)))) # Find the Butterworth natural frequency WN (or the "3dB" frequency") # to give exactly gpass at passb. try: W0 = (GPASS - 1.0) ** (-1.0 / (2.0 * ord)) except ZeroDivisionError: W0 = 1.0 warnings.warn("Order is zero...check input parameters.", RuntimeWarning, 2) # now convert this frequency back from lowpass prototype # to the original analog filter if filter_type == 1: # low WN = W0 * passb elif filter_type == 2: # high WN = passb / W0 elif filter_type == 3: # stop WN = numpy.empty(2, float) discr = sqrt((passb[1] - passb[0]) ** 2 + 4 * W0 ** 2 * passb[0] * passb[1]) WN[0] = ((passb[1] - passb[0]) + discr) / (2 * W0) WN[1] = ((passb[1] - passb[0]) - discr) / (2 * W0) WN = numpy.sort(abs(WN)) elif filter_type == 4: # pass W0 = numpy.array([-W0, W0], float) WN = (-W0 * (passb[1] - passb[0]) / 2.0 + sqrt(W0 ** 2 / 4.0 * (passb[1] - passb[0]) ** 2 + passb[0] * passb[1])) WN = numpy.sort(abs(WN)) else: raise ValueError("Bad type: %s" % filter_type) wn = _postprocess_wn(WN, analog, fs) return ord, wn def cheb1ord(wp, ws, gpass, gstop, analog=False, fs=None): """Chebyshev type I filter order selection. Return the order of the lowest order digital or analog Chebyshev Type I filter that loses no more than `gpass` dB in the passband and has at least `gstop` dB attenuation in the stopband. Parameters ---------- wp, ws : float Passband and stopband edge frequencies. For digital filters, these are in the same units as `fs`. By default, `fs` is 2 half-cycles/sample, so these are normalized from 0 to 1, where 1 is the Nyquist frequency. (`wp` and `ws` are thus in half-cycles / sample.) For example: - Lowpass: wp = 0.2, ws = 0.3 - Highpass: wp = 0.3, ws = 0.2 - Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6] - Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5] For analog filters, `wp` and `ws` are angular frequencies (e.g., rad/s). gpass : float The maximum loss in the passband (dB). gstop : float The minimum attenuation in the stopband (dB). analog : bool, optional When True, return an analog filter, otherwise a digital filter is returned. fs : float, optional The sampling frequency of the digital system. .. versionadded:: 1.2.0 Returns ------- ord : int The lowest order for a Chebyshev type I filter that meets specs. wn : ndarray or float The Chebyshev natural frequency (the "3dB frequency") for use with `cheby1` to give filter results. If `fs` is specified, this is in the same units, and `fs` must also be passed to `cheby1`. See Also -------- cheby1 : Filter design using order and critical points buttord : Find order and critical points from passband and stopband spec cheb2ord, ellipord iirfilter : General filter design using order and critical frequencies iirdesign : General filter design using passband and stopband spec Examples -------- Design a digital lowpass filter such that the passband is within 3 dB up to 0.2*(fs/2), while rejecting at least -40 dB above 0.3*(fs/2). Plot its frequency response, showing the passband and stopband constraints in gray. >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> import numpy as np >>> N, Wn = signal.cheb1ord(0.2, 0.3, 3, 40) >>> b, a = signal.cheby1(N, 3, Wn, 'low') >>> w, h = signal.freqz(b, a) >>> plt.semilogx(w / np.pi, 20 * np.log10(abs(h))) >>> plt.title('Chebyshev I lowpass filter fit to constraints') >>> plt.xlabel('Normalized frequency') >>> plt.ylabel('Amplitude [dB]') >>> plt.grid(which='both', axis='both') >>> plt.fill([.01, 0.2, 0.2, .01], [-3, -3, -99, -99], '0.9', lw=0) # stop >>> plt.fill([0.3, 0.3, 2, 2], [ 9, -40, -40, 9], '0.9', lw=0) # pass >>> plt.axis([0.08, 1, -60, 3]) >>> plt.show() """ _validate_gpass_gstop(gpass, gstop) wp, ws, filter_type = _validate_wp_ws(wp, ws, fs, analog) passb, stopb = _pre_warp(wp, ws, analog) nat, passb = _find_nat_freq(stopb, passb, gpass, gstop, filter_type, 'cheby') GSTOP = 10 ** (0.1 * abs(gstop)) GPASS = 10 ** (0.1 * abs(gpass)) v_pass_stop = np.arccosh(np.sqrt((GSTOP - 1.0) / (GPASS - 1.0))) ord = int(ceil(v_pass_stop / np.arccosh(nat))) # Natural frequencies are just the passband edges wn = _postprocess_wn(passb, analog, fs) return ord, wn def cheb2ord(wp, ws, gpass, gstop, analog=False, fs=None): """Chebyshev type II filter order selection. Return the order of the lowest order digital or analog Chebyshev Type II filter that loses no more than `gpass` dB in the passband and has at least `gstop` dB attenuation in the stopband. Parameters ---------- wp, ws : float Passband and stopband edge frequencies. For digital filters, these are in the same units as `fs`. By default, `fs` is 2 half-cycles/sample, so these are normalized from 0 to 1, where 1 is the Nyquist frequency. (`wp` and `ws` are thus in half-cycles / sample.) For example: - Lowpass: wp = 0.2, ws = 0.3 - Highpass: wp = 0.3, ws = 0.2 - Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6] - Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5] For analog filters, `wp` and `ws` are angular frequencies (e.g., rad/s). gpass : float The maximum loss in the passband (dB). gstop : float The minimum attenuation in the stopband (dB). analog : bool, optional When True, return an analog filter, otherwise a digital filter is returned. fs : float, optional The sampling frequency of the digital system. .. versionadded:: 1.2.0 Returns ------- ord : int The lowest order for a Chebyshev type II filter that meets specs. wn : ndarray or float The Chebyshev natural frequency (the "3dB frequency") for use with `cheby2` to give filter results. If `fs` is specified, this is in the same units, and `fs` must also be passed to `cheby2`. See Also -------- cheby2 : Filter design using order and critical points buttord : Find order and critical points from passband and stopband spec cheb1ord, ellipord iirfilter : General filter design using order and critical frequencies iirdesign : General filter design using passband and stopband spec Examples -------- Design a digital bandstop filter which rejects -60 dB from 0.2*(fs/2) to 0.5*(fs/2), while staying within 3 dB below 0.1*(fs/2) or above 0.6*(fs/2). Plot its frequency response, showing the passband and stopband constraints in gray. >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> import numpy as np >>> N, Wn = signal.cheb2ord([0.1, 0.6], [0.2, 0.5], 3, 60) >>> b, a = signal.cheby2(N, 60, Wn, 'stop') >>> w, h = signal.freqz(b, a) >>> plt.semilogx(w / np.pi, 20 * np.log10(abs(h))) >>> plt.title('Chebyshev II bandstop filter fit to constraints') >>> plt.xlabel('Normalized frequency') >>> plt.ylabel('Amplitude [dB]') >>> plt.grid(which='both', axis='both') >>> plt.fill([.01, .1, .1, .01], [-3, -3, -99, -99], '0.9', lw=0) # stop >>> plt.fill([.2, .2, .5, .5], [ 9, -60, -60, 9], '0.9', lw=0) # pass >>> plt.fill([.6, .6, 2, 2], [-99, -3, -3, -99], '0.9', lw=0) # stop >>> plt.axis([0.06, 1, -80, 3]) >>> plt.show() """ _validate_gpass_gstop(gpass, gstop) wp, ws, filter_type = _validate_wp_ws(wp, ws, fs, analog) passb, stopb = _pre_warp(wp, ws, analog) nat, passb = _find_nat_freq(stopb, passb, gpass, gstop, filter_type, 'cheby') GSTOP = 10 ** (0.1 * abs(gstop)) GPASS = 10 ** (0.1 * abs(gpass)) v_pass_stop = np.arccosh(np.sqrt((GSTOP - 1.0) / (GPASS - 1.0))) ord = int(ceil(v_pass_stop / arccosh(nat))) # Find frequency where analog response is -gpass dB. # Then convert back from low-pass prototype to the original filter. new_freq = cosh(1.0 / ord * v_pass_stop) new_freq = 1.0 / new_freq if filter_type == 1: nat = passb / new_freq elif filter_type == 2: nat = passb * new_freq elif filter_type == 3: nat = numpy.empty(2, float) nat[0] = (new_freq / 2.0 * (passb[0] - passb[1]) + sqrt(new_freq ** 2 * (passb[1] - passb[0]) ** 2 / 4.0 + passb[1] * passb[0])) nat[1] = passb[1] * passb[0] / nat[0] elif filter_type == 4: nat = numpy.empty(2, float) nat[0] = (1.0 / (2.0 * new_freq) * (passb[0] - passb[1]) + sqrt((passb[1] - passb[0]) ** 2 / (4.0 * new_freq ** 2) + passb[1] * passb[0])) nat[1] = passb[0] * passb[1] / nat[0] wn = _postprocess_wn(nat, analog, fs) return ord, wn _POW10_LOG10 = np.log(10) def _pow10m1(x): """10 ** x - 1 for x near 0""" return np.expm1(_POW10_LOG10 * x) def ellipord(wp, ws, gpass, gstop, analog=False, fs=None): """Elliptic (Cauer) filter order selection. Return the order of the lowest order digital or analog elliptic filter that loses no more than `gpass` dB in the passband and has at least `gstop` dB attenuation in the stopband. Parameters ---------- wp, ws : float Passband and stopband edge frequencies. For digital filters, these are in the same units as `fs`. By default, `fs` is 2 half-cycles/sample, so these are normalized from 0 to 1, where 1 is the Nyquist frequency. (`wp` and `ws` are thus in half-cycles / sample.) For example: - Lowpass: wp = 0.2, ws = 0.3 - Highpass: wp = 0.3, ws = 0.2 - Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6] - Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5] For analog filters, `wp` and `ws` are angular frequencies (e.g., rad/s). gpass : float The maximum loss in the passband (dB). gstop : float The minimum attenuation in the stopband (dB). analog : bool, optional When True, return an analog filter, otherwise a digital filter is returned. fs : float, optional The sampling frequency of the digital system. .. versionadded:: 1.2.0 Returns ------- ord : int The lowest order for an Elliptic (Cauer) filter that meets specs. wn : ndarray or float The Chebyshev natural frequency (the "3dB frequency") for use with `ellip` to give filter results. If `fs` is specified, this is in the same units, and `fs` must also be passed to `ellip`. See Also -------- ellip : Filter design using order and critical points buttord : Find order and critical points from passband and stopband spec cheb1ord, cheb2ord iirfilter : General filter design using order and critical frequencies iirdesign : General filter design using passband and stopband spec Examples -------- Design an analog highpass filter such that the passband is within 3 dB above 30 rad/s, while rejecting -60 dB at 10 rad/s. Plot its frequency response, showing the passband and stopband constraints in gray. >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> import numpy as np >>> N, Wn = signal.ellipord(30, 10, 3, 60, True) >>> b, a = signal.ellip(N, 3, 60, Wn, 'high', True) >>> w, h = signal.freqs(b, a, np.logspace(0, 3, 500)) >>> plt.semilogx(w, 20 * np.log10(abs(h))) >>> plt.title('Elliptical highpass filter fit to constraints') >>> plt.xlabel('Frequency [radians / second]') >>> plt.ylabel('Amplitude [dB]') >>> plt.grid(which='both', axis='both') >>> plt.fill([.1, 10, 10, .1], [1e4, 1e4, -60, -60], '0.9', lw=0) # stop >>> plt.fill([30, 30, 1e9, 1e9], [-99, -3, -3, -99], '0.9', lw=0) # pass >>> plt.axis([1, 300, -80, 3]) >>> plt.show() """ _validate_gpass_gstop(gpass, gstop) wp, ws, filter_type = _validate_wp_ws(wp, ws, fs, analog) passb, stopb = _pre_warp(wp, ws, analog) nat, passb = _find_nat_freq(stopb, passb, gpass, gstop, filter_type, 'ellip') arg1_sq = _pow10m1(0.1 * gpass) / _pow10m1(0.1 * gstop) arg0 = 1.0 / nat d0 = special.ellipk(arg0 ** 2), special.ellipkm1(arg0 ** 2) d1 = special.ellipk(arg1_sq), special.ellipkm1(arg1_sq) ord = int(ceil(d0[0] * d1[1] / (d0[1] * d1[0]))) wn = _postprocess_wn(passb, analog, fs) return ord, wn def buttap(N): """Return (z,p,k) for analog prototype of Nth-order Butterworth filter. The filter will have an angular (e.g., rad/s) cutoff frequency of 1. See Also -------- butter : Filter design function using this prototype """ if abs(int(N)) != N: raise ValueError("Filter order must be a nonnegative integer") z = numpy.array([]) m = numpy.arange(-N+1, N, 2) # Middle value is 0 to ensure an exactly real pole p = -numpy.exp(1j * pi * m / (2 * N)) k = 1 return z, p, k def cheb1ap(N, rp): """ Return (z,p,k) for Nth-order Chebyshev type I analog lowpass filter. The returned filter prototype has `rp` decibels of ripple in the passband. The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1, defined as the point at which the gain first drops below ``-rp``. See Also -------- cheby1 : Filter design function using this prototype """ if abs(int(N)) != N: raise ValueError("Filter order must be a nonnegative integer") elif N == 0: # Avoid divide-by-zero error # Even order filters have DC gain of -rp dB return numpy.array([]), numpy.array([]), 10**(-rp/20) z = numpy.array([]) # Ripple factor (epsilon) eps = numpy.sqrt(10 ** (0.1 * rp) - 1.0) mu = 1.0 / N * arcsinh(1 / eps) # Arrange poles in an ellipse on the left half of the S-plane m = numpy.arange(-N+1, N, 2) theta = pi * m / (2*N) p = -sinh(mu + 1j*theta) k = numpy.prod(-p, axis=0).real if N % 2 == 0: k = k / sqrt(1 + eps * eps) return z, p, k def cheb2ap(N, rs): """ Return (z,p,k) for Nth-order Chebyshev type I analog lowpass filter. The returned filter prototype has `rs` decibels of ripple in the stopband. The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1, defined as the point at which the gain first reaches ``-rs``. See Also -------- cheby2 : Filter design function using this prototype """ if abs(int(N)) != N: raise ValueError("Filter order must be a nonnegative integer") elif N == 0: # Avoid divide-by-zero warning return numpy.array([]), numpy.array([]), 1 # Ripple factor (epsilon) de = 1.0 / sqrt(10 ** (0.1 * rs) - 1) mu = arcsinh(1.0 / de) / N if N % 2: m = numpy.concatenate((numpy.arange(-N+1, 0, 2), numpy.arange(2, N, 2))) else: m = numpy.arange(-N+1, N, 2) z = -conjugate(1j / sin(m * pi / (2.0 * N))) # Poles around the unit circle like Butterworth p = -exp(1j * pi * numpy.arange(-N+1, N, 2) / (2 * N)) # Warp into Chebyshev II p = sinh(mu) * p.real + 1j * cosh(mu) * p.imag p = 1.0 / p k = (numpy.prod(-p, axis=0) / numpy.prod(-z, axis=0)).real return z, p, k EPSILON = 2e-16 # number of terms in solving degree equation _ELLIPDEG_MMAX = 7 def _ellipdeg(n, m1): """Solve degree equation using nomes Given n, m1, solve n * K(m) / K'(m) = K1(m1) / K1'(m1) for m See [1], Eq. (49) References ---------- .. [1] Orfanidis, "Lecture Notes on Elliptic Filter Design", https://www.ece.rutgers.edu/~orfanidi/ece521/notes.pdf """ K1 = special.ellipk(m1) K1p = special.ellipkm1(m1) q1 = np.exp(-np.pi * K1p / K1) q = q1 ** (1/n) mnum = np.arange(_ELLIPDEG_MMAX + 1) mden = np.arange(1, _ELLIPDEG_MMAX + 2) num = np.sum(q ** (mnum * (mnum+1))) den = 1 + 2 * np.sum(q ** (mden**2)) return 16 * q * (num / den) ** 4 # Maximum number of iterations in Landen transformation recursion # sequence. 10 is conservative; unit tests pass with 4, Orfanidis # (see _arc_jac_cn [1]) suggests 5. _ARC_JAC_SN_MAXITER = 10 def _arc_jac_sn(w, m): """Inverse Jacobian elliptic sn Solve for z in w = sn(z, m) Parameters ---------- w : complex scalar argument m : scalar modulus; in interval [0, 1] See [1], Eq. (56) References ---------- .. [1] Orfanidis, "Lecture Notes on Elliptic Filter Design", https://www.ece.rutgers.edu/~orfanidi/ece521/notes.pdf """ def _complement(kx): # (1-k**2) ** 0.5; the expression below # works for small kx return ((1 - kx) * (1 + kx)) ** 0.5 k = m ** 0.5 if k > 1: return np.nan elif k == 1: return np.arctanh(w) ks = [k] niter = 0 while ks[-1] != 0: k_ = ks[-1] k_p = _complement(k_) ks.append((1 - k_p) / (1 + k_p)) niter += 1 if niter > _ARC_JAC_SN_MAXITER: raise ValueError('Landen transformation not converging') K = np.prod(1 + np.array(ks[1:])) * np.pi/2 wns = [w] for kn, knext in zip(ks[:-1], ks[1:]): wn = wns[-1] wnext = (2 * wn / ((1 + knext) * (1 + _complement(kn * wn)))) wns.append(wnext) u = 2 / np.pi * np.arcsin(wns[-1]) z = K * u return z def _arc_jac_sc1(w, m): """Real inverse Jacobian sc, with complementary modulus Solve for z in w = sc(z, 1-m) w - real scalar m - modulus From [1], sc(z, m) = -i * sn(i * z, 1 - m) References ---------- # noqa: E501 .. [1] https://functions.wolfram.com/EllipticFunctions/JacobiSC/introductions/JacobiPQs/ShowAll.html, "Representations through other Jacobi functions" """ zcomplex = _arc_jac_sn(1j * w, m) if abs(zcomplex.real) > 1e-14: raise ValueError return zcomplex.imag def ellipap(N, rp, rs): """Return (z,p,k) of Nth-order elliptic analog lowpass filter. The filter is a normalized prototype that has `rp` decibels of ripple in the passband and a stopband `rs` decibels down. The filter's angular (e.g., rad/s) cutoff frequency is normalized to 1, defined as the point at which the gain first drops below ``-rp``. See Also -------- ellip : Filter design function using this prototype References ---------- .. [1] Lutova, Tosic, and Evans, "Filter Design for Signal Processing", Chapters 5 and 12. .. [2] Orfanidis, "Lecture Notes on Elliptic Filter Design", https://www.ece.rutgers.edu/~orfanidi/ece521/notes.pdf """ if abs(int(N)) != N: raise ValueError("Filter order must be a nonnegative integer") elif N == 0: # Avoid divide-by-zero warning # Even order filters have DC gain of -rp dB return numpy.array([]), numpy.array([]), 10**(-rp/20) elif N == 1: p = -sqrt(1.0 / _pow10m1(0.1 * rp)) k = -p z = [] return asarray(z), asarray(p), k eps_sq = _pow10m1(0.1 * rp) eps = np.sqrt(eps_sq) ck1_sq = eps_sq / _pow10m1(0.1 * rs) if ck1_sq == 0: raise ValueError("Cannot design a filter with given rp and rs" " specifications.") val = special.ellipk(ck1_sq), special.ellipkm1(ck1_sq) m = _ellipdeg(N, ck1_sq) capk = special.ellipk(m) j = numpy.arange(1 - N % 2, N, 2) jj = len(j) [s, c, d, phi] = special.ellipj(j * capk / N, m * numpy.ones(jj)) snew = numpy.compress(abs(s) > EPSILON, s, axis=-1) z = 1.0 / (sqrt(m) * snew) z = 1j * z z = numpy.concatenate((z, conjugate(z))) r = _arc_jac_sc1(1. / eps, ck1_sq) v0 = capk * r / (N * val[0]) [sv, cv, dv, phi] = special.ellipj(v0, 1 - m) p = -(c * d * sv * cv + 1j * s * dv) / (1 - (d * sv) ** 2.0) if N % 2: newp = numpy.compress(abs(p.imag) > EPSILON * numpy.sqrt(numpy.sum(p * numpy.conjugate(p), axis=0).real), p, axis=-1) p = numpy.concatenate((p, conjugate(newp))) else: p = numpy.concatenate((p, conjugate(p))) k = (numpy.prod(-p, axis=0) / numpy.prod(-z, axis=0)).real if N % 2 == 0: k = k / numpy.sqrt(1 + eps_sq) return z, p, k # TODO: Make this a real public function scipy.misc.ff def _falling_factorial(x, n): r""" Return the factorial of `x` to the `n` falling. This is defined as: .. math:: x^\underline n = (x)_n = x (x-1) \cdots (x-n+1) This can more efficiently calculate ratios of factorials, since: n!/m! == falling_factorial(n, n-m) where n >= m skipping the factors that cancel out the usual factorial n! == ff(n, n) """ val = 1 for k in range(x - n + 1, x + 1): val *= k return val def _bessel_poly(n, reverse=False): """ Return the coefficients of Bessel polynomial of degree `n` If `reverse` is true, a reverse Bessel polynomial is output. Output is a list of coefficients: [1] = 1 [1, 1] = 1*s + 1 [1, 3, 3] = 1*s^2 + 3*s + 3 [1, 6, 15, 15] = 1*s^3 + 6*s^2 + 15*s + 15 [1, 10, 45, 105, 105] = 1*s^4 + 10*s^3 + 45*s^2 + 105*s + 105 etc. Output is a Python list of arbitrary precision long ints, so n is only limited by your hardware's memory. Sequence is http://oeis.org/A001498, and output can be confirmed to match http://oeis.org/A001498/b001498.txt : >>> i = 0 >>> for n in range(51): ... for x in _bessel_poly(n, reverse=True): ... print(i, x) ... i += 1 """ if abs(int(n)) != n: raise ValueError("Polynomial order must be a nonnegative integer") else: n = int(n) # np.int32 doesn't work, for instance out = [] for k in range(n + 1): num = _falling_factorial(2*n - k, n) den = 2**(n - k) * math.factorial(k) out.append(num // den) if reverse: return out[::-1] else: return out def _campos_zeros(n): """ Return approximate zero locations of Bessel polynomials y_n(x) for order `n` using polynomial fit (Campos-Calderon 2011) """ if n == 1: return asarray([-1+0j]) s = npp_polyval(n, [0, 0, 2, 0, -3, 1]) b3 = npp_polyval(n, [16, -8]) / s b2 = npp_polyval(n, [-24, -12, 12]) / s b1 = npp_polyval(n, [8, 24, -12, -2]) / s b0 = npp_polyval(n, [0, -6, 0, 5, -1]) / s r = npp_polyval(n, [0, 0, 2, 1]) a1 = npp_polyval(n, [-6, -6]) / r a2 = 6 / r k = np.arange(1, n+1) x = npp_polyval(k, [0, a1, a2]) y = npp_polyval(k, [b0, b1, b2, b3]) return x + 1j*y def _aberth(f, fp, x0, tol=1e-15, maxiter=50): """ Given a function `f`, its first derivative `fp`, and a set of initial guesses `x0`, simultaneously find the roots of the polynomial using the Aberth-Ehrlich method. ``len(x0)`` should equal the number of roots of `f`. (This is not a complete implementation of Bini's algorithm.) """ N = len(x0) x = array(x0, complex) beta = np.empty_like(x0) for iteration in range(maxiter): alpha = -f(x) / fp(x) # Newton's method # Model "repulsion" between zeros for k in range(N): beta[k] = np.sum(1/(x[k] - x[k+1:])) beta[k] += np.sum(1/(x[k] - x[:k])) x += alpha / (1 + alpha * beta) if not all(np.isfinite(x)): raise RuntimeError('Root-finding calculation failed') # Mekwi: The iterative process can be stopped when |hn| has become # less than the largest error one is willing to permit in the root. if all(abs(alpha) <= tol): break else: raise Exception('Zeros failed to converge') return x def _bessel_zeros(N): """ Find zeros of ordinary Bessel polynomial of order `N`, by root-finding of modified Bessel function of the second kind """ if N == 0: return asarray([]) # Generate starting points x0 = _campos_zeros(N) # Zeros are the same for exp(1/x)*K_{N+0.5}(1/x) and Nth-order ordinary # Bessel polynomial y_N(x) def f(x): return special.kve(N+0.5, 1/x) # First derivative of above def fp(x): return (special.kve(N-0.5, 1/x)/(2*x**2) - special.kve(N+0.5, 1/x)/(x**2) + special.kve(N+1.5, 1/x)/(2*x**2)) # Starting points converge to true zeros x = _aberth(f, fp, x0) # Improve precision using Newton's method on each for i in range(len(x)): x[i] = optimize.newton(f, x[i], fp, tol=1e-15) # Average complex conjugates to make them exactly symmetrical x = np.mean((x, x[::-1].conj()), 0) # Zeros should sum to -1 if abs(np.sum(x) + 1) > 1e-15: raise RuntimeError('Generated zeros are inaccurate') return x def _norm_factor(p, k): """ Numerically find frequency shift to apply to delay-normalized filter such that -3 dB point is at 1 rad/sec. `p` is an array_like of polynomial poles `k` is a float gain First 10 values are listed in "Bessel Scale Factors" table, "Bessel Filters Polynomials, Poles and Circuit Elements 2003, C. Bond." """ p = asarray(p, dtype=complex) def G(w): """ Gain of filter """ return abs(k / prod(1j*w - p)) def cutoff(w): """ When gain = -3 dB, return 0 """ return G(w) - 1/np.sqrt(2) return optimize.newton(cutoff, 1.5) def besselap(N, norm='phase'): """ Return (z,p,k) for analog prototype of an Nth-order Bessel filter. Parameters ---------- N : int The order of the filter. norm : {'phase', 'delay', 'mag'}, optional Frequency normalization: ``phase`` The filter is normalized such that the phase response reaches its midpoint at an angular (e.g., rad/s) cutoff frequency of 1. This happens for both low-pass and high-pass filters, so this is the "phase-matched" case. [6]_ The magnitude response asymptotes are the same as a Butterworth filter of the same order with a cutoff of `Wn`. This is the default, and matches MATLAB's implementation. ``delay`` The filter is normalized such that the group delay in the passband is 1 (e.g., 1 second). This is the "natural" type obtained by solving Bessel polynomials ``mag`` The filter is normalized such that the gain magnitude is -3 dB at angular frequency 1. This is called "frequency normalization" by Bond. [1]_ .. versionadded:: 0.18.0 Returns ------- z : ndarray Zeros of the transfer function. Is always an empty array. p : ndarray Poles of the transfer function. k : scalar Gain of the transfer function. For phase-normalized, this is always 1. See Also -------- bessel : Filter design function using this prototype Notes ----- To find the pole locations, approximate starting points are generated [2]_ for the zeros of the ordinary Bessel polynomial [3]_, then the Aberth-Ehrlich method [4]_ [5]_ is used on the Kv(x) Bessel function to calculate more accurate zeros, and these locations are then inverted about the unit circle. References ---------- .. [1] C.R. Bond, "Bessel Filter Constants", http://www.crbond.com/papers/bsf.pdf .. [2] Campos and Calderon, "Approximate closed-form formulas for the zeros of the Bessel Polynomials", :arXiv:`1105.0957`. .. [3] Thomson, W.E., "Delay Networks having Maximally Flat Frequency Characteristics", Proceedings of the Institution of Electrical Engineers, Part III, November 1949, Vol. 96, No. 44, pp. 487-490. .. [4] Aberth, "Iteration Methods for Finding all Zeros of a Polynomial Simultaneously", Mathematics of Computation, Vol. 27, No. 122, April 1973 .. [5] Ehrlich, "A modified Newton method for polynomials", Communications of the ACM, Vol. 10, Issue 2, pp. 107-108, Feb. 1967, :DOI:`10.1145/363067.363115` .. [6] Miller and Bohn, "A Bessel Filter Crossover, and Its Relation to Others", RaneNote 147, 1998, https://www.ranecommercial.com/legacy/note147.html """ if abs(int(N)) != N: raise ValueError("Filter order must be a nonnegative integer") N = int(N) # calculation below doesn't always fit in np.int64 if N == 0: p = [] k = 1 else: # Find roots of reverse Bessel polynomial p = 1/_bessel_zeros(N) a_last = _falling_factorial(2*N, N) // 2**N # Shift them to a different normalization if required if norm in ('delay', 'mag'): # Normalized for group delay of 1 k = a_last if norm == 'mag': # -3 dB magnitude point is at 1 rad/sec norm_factor = _norm_factor(p, k) p /= norm_factor k = norm_factor**-N * a_last elif norm == 'phase': # Phase-matched (1/2 max phase shift at 1 rad/sec) # Asymptotes are same as Butterworth filter p *= 10**(-math.log10(a_last)/N) k = 1 else: raise ValueError('normalization not understood') return asarray([]), asarray(p, dtype=complex), float(k) def iirnotch(w0, Q, fs=2.0): """ Design second-order IIR notch digital filter. A notch filter is a band-stop filter with a narrow bandwidth (high quality factor). It rejects a narrow frequency band and leaves the rest of the spectrum little changed. Parameters ---------- w0 : float Frequency to remove from a signal. If `fs` is specified, this is in the same units as `fs`. By default, it is a normalized scalar that must satisfy ``0 < w0 < 1``, with ``w0 = 1`` corresponding to half of the sampling frequency. Q : float Quality factor. Dimensionless parameter that characterizes notch filter -3 dB bandwidth ``bw`` relative to its center frequency, ``Q = w0/bw``. fs : float, optional The sampling frequency of the digital system. .. versionadded:: 1.2.0 Returns ------- b, a : ndarray, ndarray Numerator (``b``) and denominator (``a``) polynomials of the IIR filter. See Also -------- iirpeak Notes ----- .. versionadded:: 0.19.0 References ---------- .. [1] Sophocles J. Orfanidis, "Introduction To Signal Processing", Prentice-Hall, 1996 Examples -------- Design and plot filter to remove the 60 Hz component from a signal sampled at 200 Hz, using a quality factor Q = 30 >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> import numpy as np >>> fs = 200.0 # Sample frequency (Hz) >>> f0 = 60.0 # Frequency to be removed from signal (Hz) >>> Q = 30.0 # Quality factor >>> # Design notch filter >>> b, a = signal.iirnotch(f0, Q, fs) >>> # Frequency response >>> freq, h = signal.freqz(b, a, fs=fs) >>> # Plot >>> fig, ax = plt.subplots(2, 1, figsize=(8, 6)) >>> ax[0].plot(freq, 20*np.log10(abs(h)), color='blue') >>> ax[0].set_title("Frequency Response") >>> ax[0].set_ylabel("Amplitude (dB)", color='blue') >>> ax[0].set_xlim([0, 100]) >>> ax[0].set_ylim([-25, 10]) >>> ax[0].grid(True) >>> ax[1].plot(freq, np.unwrap(np.angle(h))*180/np.pi, color='green') >>> ax[1].set_ylabel("Angle (degrees)", color='green') >>> ax[1].set_xlabel("Frequency (Hz)") >>> ax[1].set_xlim([0, 100]) >>> ax[1].set_yticks([-90, -60, -30, 0, 30, 60, 90]) >>> ax[1].set_ylim([-90, 90]) >>> ax[1].grid(True) >>> plt.show() """ return _design_notch_peak_filter(w0, Q, "notch", fs) def iirpeak(w0, Q, fs=2.0): """ Design second-order IIR peak (resonant) digital filter. A peak filter is a band-pass filter with a narrow bandwidth (high quality factor). It rejects components outside a narrow frequency band. Parameters ---------- w0 : float Frequency to be retained in a signal. If `fs` is specified, this is in the same units as `fs`. By default, it is a normalized scalar that must satisfy ``0 < w0 < 1``, with ``w0 = 1`` corresponding to half of the sampling frequency. Q : float Quality factor. Dimensionless parameter that characterizes peak filter -3 dB bandwidth ``bw`` relative to its center frequency, ``Q = w0/bw``. fs : float, optional The sampling frequency of the digital system. .. versionadded:: 1.2.0 Returns ------- b, a : ndarray, ndarray Numerator (``b``) and denominator (``a``) polynomials of the IIR filter. See Also -------- iirnotch Notes ----- .. versionadded:: 0.19.0 References ---------- .. [1] Sophocles J. Orfanidis, "Introduction To Signal Processing", Prentice-Hall, 1996 Examples -------- Design and plot filter to remove the frequencies other than the 300 Hz component from a signal sampled at 1000 Hz, using a quality factor Q = 30 >>> import numpy as np >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> fs = 1000.0 # Sample frequency (Hz) >>> f0 = 300.0 # Frequency to be retained (Hz) >>> Q = 30.0 # Quality factor >>> # Design peak filter >>> b, a = signal.iirpeak(f0, Q, fs) >>> # Frequency response >>> freq, h = signal.freqz(b, a, fs=fs) >>> # Plot >>> fig, ax = plt.subplots(2, 1, figsize=(8, 6)) >>> ax[0].plot(freq, 20*np.log10(np.maximum(abs(h), 1e-5)), color='blue') >>> ax[0].set_title("Frequency Response") >>> ax[0].set_ylabel("Amplitude (dB)", color='blue') >>> ax[0].set_xlim([0, 500]) >>> ax[0].set_ylim([-50, 10]) >>> ax[0].grid(True) >>> ax[1].plot(freq, np.unwrap(np.angle(h))*180/np.pi, color='green') >>> ax[1].set_ylabel("Angle (degrees)", color='green') >>> ax[1].set_xlabel("Frequency (Hz)") >>> ax[1].set_xlim([0, 500]) >>> ax[1].set_yticks([-90, -60, -30, 0, 30, 60, 90]) >>> ax[1].set_ylim([-90, 90]) >>> ax[1].grid(True) >>> plt.show() """ return _design_notch_peak_filter(w0, Q, "peak", fs) def _design_notch_peak_filter(w0, Q, ftype, fs=2.0): """ Design notch or peak digital filter. Parameters ---------- w0 : float Normalized frequency to remove from a signal. If `fs` is specified, this is in the same units as `fs`. By default, it is a normalized scalar that must satisfy ``0 < w0 < 1``, with ``w0 = 1`` corresponding to half of the sampling frequency. Q : float Quality factor. Dimensionless parameter that characterizes notch filter -3 dB bandwidth ``bw`` relative to its center frequency, ``Q = w0/bw``. ftype : str The type of IIR filter to design: - notch filter : ``notch`` - peak filter : ``peak`` fs : float, optional The sampling frequency of the digital system. .. versionadded:: 1.2.0: Returns ------- b, a : ndarray, ndarray Numerator (``b``) and denominator (``a``) polynomials of the IIR filter. """ # Guarantee that the inputs are floats w0 = float(w0) Q = float(Q) w0 = 2*w0/fs # Checks if w0 is within the range if w0 > 1.0 or w0 < 0.0: raise ValueError("w0 should be such that 0 < w0 < 1") # Get bandwidth bw = w0/Q # Normalize inputs bw = bw*np.pi w0 = w0*np.pi # Compute -3dB attenuation gb = 1/np.sqrt(2) if ftype == "notch": # Compute beta: formula 11.3.4 (p.575) from reference [1] beta = (np.sqrt(1.0-gb**2.0)/gb)*np.tan(bw/2.0) elif ftype == "peak": # Compute beta: formula 11.3.19 (p.579) from reference [1] beta = (gb/np.sqrt(1.0-gb**2.0))*np.tan(bw/2.0) else: raise ValueError("Unknown ftype.") # Compute gain: formula 11.3.6 (p.575) from reference [1] gain = 1.0/(1.0+beta) # Compute numerator b and denominator a # formulas 11.3.7 (p.575) and 11.3.21 (p.579) # from reference [1] if ftype == "notch": b = gain*np.array([1.0, -2.0*np.cos(w0), 1.0]) else: b = (1.0-gain)*np.array([1.0, 0.0, -1.0]) a = np.array([1.0, -2.0*gain*np.cos(w0), (2.0*gain-1.0)]) return b, a def iircomb(w0, Q, ftype='notch', fs=2.0, *, pass_zero=False): """ Design IIR notching or peaking digital comb filter. A notching comb filter consists of regularly-spaced band-stop filters with a narrow bandwidth (high quality factor). Each rejects a narrow frequency band and leaves the rest of the spectrum little changed. A peaking comb filter consists of regularly-spaced band-pass filters with a narrow bandwidth (high quality factor). Each rejects components outside a narrow frequency band. Parameters ---------- w0 : float The fundamental frequency of the comb filter (the spacing between its peaks). This must evenly divide the sampling frequency. If `fs` is specified, this is in the same units as `fs`. By default, it is a normalized scalar that must satisfy ``0 < w0 < 1``, with ``w0 = 1`` corresponding to half of the sampling frequency. Q : float Quality factor. Dimensionless parameter that characterizes notch filter -3 dB bandwidth ``bw`` relative to its center frequency, ``Q = w0/bw``. ftype : {'notch', 'peak'} The type of comb filter generated by the function. If 'notch', then the Q factor applies to the notches. If 'peak', then the Q factor applies to the peaks. Default is 'notch'. fs : float, optional The sampling frequency of the signal. Default is 2.0. pass_zero : bool, optional If False (default), the notches (nulls) of the filter are centered on frequencies [0, w0, 2*w0, ...], and the peaks are centered on the midpoints [w0/2, 3*w0/2, 5*w0/2, ...]. If True, the peaks are centered on [0, w0, 2*w0, ...] (passing zero frequency) and vice versa. .. versionadded:: 1.9.0 Returns ------- b, a : ndarray, ndarray Numerator (``b``) and denominator (``a``) polynomials of the IIR filter. Raises ------ ValueError If `w0` is less than or equal to 0 or greater than or equal to ``fs/2``, if `fs` is not divisible by `w0`, if `ftype` is not 'notch' or 'peak' See Also -------- iirnotch iirpeak Notes ----- For implementation details, see [1]_. The TF implementation of the comb filter is numerically stable even at higher orders due to the use of a single repeated pole, which won't suffer from precision loss. References ---------- .. [1] Sophocles J. Orfanidis, "Introduction To Signal Processing", Prentice-Hall, 1996, ch. 11, "Digital Filter Design" Examples -------- Design and plot notching comb filter at 20 Hz for a signal sampled at 200 Hz, using quality factor Q = 30 >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> import numpy as np >>> fs = 200.0 # Sample frequency (Hz) >>> f0 = 20.0 # Frequency to be removed from signal (Hz) >>> Q = 30.0 # Quality factor >>> # Design notching comb filter >>> b, a = signal.iircomb(f0, Q, ftype='notch', fs=fs) >>> # Frequency response >>> freq, h = signal.freqz(b, a, fs=fs) >>> response = abs(h) >>> # To avoid divide by zero when graphing >>> response[response == 0] = 1e-20 >>> # Plot >>> fig, ax = plt.subplots(2, 1, figsize=(8, 6), sharex=True) >>> ax[0].plot(freq, 20*np.log10(abs(response)), color='blue') >>> ax[0].set_title("Frequency Response") >>> ax[0].set_ylabel("Amplitude (dB)", color='blue') >>> ax[0].set_xlim([0, 100]) >>> ax[0].set_ylim([-30, 10]) >>> ax[0].grid(True) >>> ax[1].plot(freq, (np.angle(h)*180/np.pi+180)%360 - 180, color='green') >>> ax[1].set_ylabel("Angle (degrees)", color='green') >>> ax[1].set_xlabel("Frequency (Hz)") >>> ax[1].set_xlim([0, 100]) >>> ax[1].set_yticks([-90, -60, -30, 0, 30, 60, 90]) >>> ax[1].set_ylim([-90, 90]) >>> ax[1].grid(True) >>> plt.show() Design and plot peaking comb filter at 250 Hz for a signal sampled at 1000 Hz, using quality factor Q = 30 >>> fs = 1000.0 # Sample frequency (Hz) >>> f0 = 250.0 # Frequency to be retained (Hz) >>> Q = 30.0 # Quality factor >>> # Design peaking filter >>> b, a = signal.iircomb(f0, Q, ftype='peak', fs=fs, pass_zero=True) >>> # Frequency response >>> freq, h = signal.freqz(b, a, fs=fs) >>> response = abs(h) >>> # To avoid divide by zero when graphing >>> response[response == 0] = 1e-20 >>> # Plot >>> fig, ax = plt.subplots(2, 1, figsize=(8, 6), sharex=True) >>> ax[0].plot(freq, 20*np.log10(np.maximum(abs(h), 1e-5)), color='blue') >>> ax[0].set_title("Frequency Response") >>> ax[0].set_ylabel("Amplitude (dB)", color='blue') >>> ax[0].set_xlim([0, 500]) >>> ax[0].set_ylim([-80, 10]) >>> ax[0].grid(True) >>> ax[1].plot(freq, (np.angle(h)*180/np.pi+180)%360 - 180, color='green') >>> ax[1].set_ylabel("Angle (degrees)", color='green') >>> ax[1].set_xlabel("Frequency (Hz)") >>> ax[1].set_xlim([0, 500]) >>> ax[1].set_yticks([-90, -60, -30, 0, 30, 60, 90]) >>> ax[1].set_ylim([-90, 90]) >>> ax[1].grid(True) >>> plt.show() """ # Convert w0, Q, and fs to float w0 = float(w0) Q = float(Q) fs = float(fs) # Check for invalid cutoff frequency or filter type ftype = ftype.lower() if not 0 < w0 < fs / 2: raise ValueError("w0 must be between 0 and {}" " (nyquist), but given {}.".format(fs / 2, w0)) if ftype not in ('notch', 'peak'): raise ValueError('ftype must be either notch or peak.') # Compute the order of the filter N = round(fs / w0) # Check for cutoff frequency divisibility if abs(w0 - fs/N)/fs > 1e-14: raise ValueError('fs must be divisible by w0.') # Compute frequency in radians and filter bandwidth # Eq. 11.3.1 (p. 574) from reference [1] w0 = (2 * np.pi * w0) / fs w_delta = w0 / Q # Define base gain values depending on notch or peak filter # Compute -3dB attenuation # Eqs. 11.4.1 and 11.4.2 (p. 582) from reference [1] if ftype == 'notch': G0, G = 1, 0 elif ftype == 'peak': G0, G = 0, 1 GB = 1 / np.sqrt(2) # Compute beta # Eq. 11.5.3 (p. 591) from reference [1] beta = np.sqrt((GB**2 - G0**2) / (G**2 - GB**2)) * np.tan(N * w_delta / 4) # Compute filter coefficients # Eq 11.5.1 (p. 590) variables a, b, c from reference [1] ax = (1 - beta) / (1 + beta) bx = (G0 + G * beta) / (1 + beta) cx = (G0 - G * beta) / (1 + beta) # Last coefficients are negative to get peaking comb that passes zero or # notching comb that doesn't. negative_coef = ((ftype == 'peak' and pass_zero) or (ftype == 'notch' and not pass_zero)) # Compute numerator coefficients # Eq 11.5.1 (p. 590) or Eq 11.5.4 (p. 591) from reference [1] # b - cz^-N or b + cz^-N b = np.zeros(N + 1) b[0] = bx if negative_coef: b[-1] = -cx else: b[-1] = +cx # Compute denominator coefficients # Eq 11.5.1 (p. 590) or Eq 11.5.4 (p. 591) from reference [1] # 1 - az^-N or 1 + az^-N a = np.zeros(N + 1) a[0] = 1 if negative_coef: a[-1] = -ax else: a[-1] = +ax return b, a def _hz_to_erb(hz): """ Utility for converting from frequency (Hz) to the Equivalent Rectangular Bandwidth (ERB) scale ERB = frequency / EarQ + minBW """ EarQ = 9.26449 minBW = 24.7 return hz / EarQ + minBW def gammatone(freq, ftype, order=None, numtaps=None, fs=None): """ Gammatone filter design. This function computes the coefficients of an FIR or IIR gammatone digital filter [1]_. Parameters ---------- freq : float Center frequency of the filter (expressed in the same units as `fs`). ftype : {'fir', 'iir'} The type of filter the function generates. If 'fir', the function will generate an Nth order FIR gammatone filter. If 'iir', the function will generate an 8th order digital IIR filter, modeled as as 4th order gammatone filter. order : int, optional The order of the filter. Only used when ``ftype='fir'``. Default is 4 to model the human auditory system. Must be between 0 and 24. numtaps : int, optional Length of the filter. Only used when ``ftype='fir'``. Default is ``fs*0.015`` if `fs` is greater than 1000, 15 if `fs` is less than or equal to 1000. fs : float, optional The sampling frequency of the signal. `freq` must be between 0 and ``fs/2``. Default is 2. Returns ------- b, a : ndarray, ndarray Numerator (``b``) and denominator (``a``) polynomials of the filter. Raises ------ ValueError If `freq` is less than or equal to 0 or greater than or equal to ``fs/2``, if `ftype` is not 'fir' or 'iir', if `order` is less than or equal to 0 or greater than 24 when ``ftype='fir'`` See Also -------- firwin iirfilter References ---------- .. [1] Slaney, Malcolm, "An Efficient Implementation of the Patterson-Holdsworth Auditory Filter Bank", Apple Computer Technical Report 35, 1993, pp.3-8, 34-39. Examples -------- 16-sample 4th order FIR Gammatone filter centered at 440 Hz >>> from scipy import signal >>> signal.gammatone(440, 'fir', numtaps=16, fs=16000) (array([ 0.00000000e+00, 2.22196719e-07, 1.64942101e-06, 4.99298227e-06, 1.01993969e-05, 1.63125770e-05, 2.14648940e-05, 2.29947263e-05, 1.76776931e-05, 2.04980537e-06, -2.72062858e-05, -7.28455299e-05, -1.36651076e-04, -2.19066855e-04, -3.18905076e-04, -4.33156712e-04]), [1.0]) IIR Gammatone filter centered at 440 Hz >>> import matplotlib.pyplot as plt >>> import numpy as np >>> b, a = signal.gammatone(440, 'iir', fs=16000) >>> w, h = signal.freqz(b, a) >>> plt.plot(w / ((2 * np.pi) / 16000), 20 * np.log10(abs(h))) >>> plt.xscale('log') >>> plt.title('Gammatone filter frequency response') >>> plt.xlabel('Frequency') >>> plt.ylabel('Amplitude [dB]') >>> plt.margins(0, 0.1) >>> plt.grid(which='both', axis='both') >>> plt.axvline(440, color='green') # cutoff frequency >>> plt.show() """ # Converts freq to float freq = float(freq) # Set sampling rate if not passed if fs is None: fs = 2 fs = float(fs) # Check for invalid cutoff frequency or filter type ftype = ftype.lower() filter_types = ['fir', 'iir'] if not 0 < freq < fs / 2: raise ValueError("The frequency must be between 0 and {}" " (nyquist), but given {}.".format(fs / 2, freq)) if ftype not in filter_types: raise ValueError('ftype must be either fir or iir.') # Calculate FIR gammatone filter if ftype == 'fir': # Set order and numtaps if not passed if order is None: order = 4 order = operator.index(order) if numtaps is None: numtaps = max(int(fs * 0.015), 15) numtaps = operator.index(numtaps) # Check for invalid order if not 0 < order <= 24: raise ValueError("Invalid order: order must be > 0 and <= 24.") # Gammatone impulse response settings t = np.arange(numtaps) / fs bw = 1.019 * _hz_to_erb(freq) # Calculate the FIR gammatone filter b = (t ** (order - 1)) * np.exp(-2 * np.pi * bw * t) b *= np.cos(2 * np.pi * freq * t) # Scale the FIR filter so the frequency response is 1 at cutoff scale_factor = 2 * (2 * np.pi * bw) ** (order) scale_factor /= float_factorial(order - 1) scale_factor /= fs b *= scale_factor a = [1.0] # Calculate IIR gammatone filter elif ftype == 'iir': # Raise warning if order and/or numtaps is passed if order is not None: warnings.warn('order is not used for IIR gammatone filter.') if numtaps is not None: warnings.warn('numtaps is not used for IIR gammatone filter.') # Gammatone impulse response settings T = 1./fs bw = 2 * np.pi * 1.019 * _hz_to_erb(freq) fr = 2 * freq * np.pi * T bwT = bw * T # Calculate the gain to normalize the volume at the center frequency g1 = -2 * np.exp(2j * fr) * T g2 = 2 * np.exp(-(bwT) + 1j * fr) * T g3 = np.sqrt(3 + 2 ** (3 / 2)) * np.sin(fr) g4 = np.sqrt(3 - 2 ** (3 / 2)) * np.sin(fr) g5 = np.exp(2j * fr) g = g1 + g2 * (np.cos(fr) - g4) g *= (g1 + g2 * (np.cos(fr) + g4)) g *= (g1 + g2 * (np.cos(fr) - g3)) g *= (g1 + g2 * (np.cos(fr) + g3)) g /= ((-2 / np.exp(2 * bwT) - 2 * g5 + 2 * (1 + g5) / np.exp(bwT)) ** 4) g = np.abs(g) # Create empty filter coefficient lists b = np.empty(5) a = np.empty(9) # Calculate the numerator coefficients b[0] = (T ** 4) / g b[1] = -4 * T ** 4 * np.cos(fr) / np.exp(bw * T) / g b[2] = 6 * T ** 4 * np.cos(2 * fr) / np.exp(2 * bw * T) / g b[3] = -4 * T ** 4 * np.cos(3 * fr) / np.exp(3 * bw * T) / g b[4] = T ** 4 * np.cos(4 * fr) / np.exp(4 * bw * T) / g # Calculate the denominator coefficients a[0] = 1 a[1] = -8 * np.cos(fr) / np.exp(bw * T) a[2] = 4 * (4 + 3 * np.cos(2 * fr)) / np.exp(2 * bw * T) a[3] = -8 * (6 * np.cos(fr) + np.cos(3 * fr)) a[3] /= np.exp(3 * bw * T) a[4] = 2 * (18 + 16 * np.cos(2 * fr) + np.cos(4 * fr)) a[4] /= np.exp(4 * bw * T) a[5] = -8 * (6 * np.cos(fr) + np.cos(3 * fr)) a[5] /= np.exp(5 * bw * T) a[6] = 4 * (4 + 3 * np.cos(2 * fr)) / np.exp(6 * bw * T) a[7] = -8 * np.cos(fr) / np.exp(7 * bw * T) a[8] = np.exp(-8 * bw * T) return b, a filter_dict = {'butter': [buttap, buttord], 'butterworth': [buttap, buttord], 'cauer': [ellipap, ellipord], 'elliptic': [ellipap, ellipord], 'ellip': [ellipap, ellipord], 'bessel': [besselap], 'bessel_phase': [besselap], 'bessel_delay': [besselap], 'bessel_mag': [besselap], 'cheby1': [cheb1ap, cheb1ord], 'chebyshev1': [cheb1ap, cheb1ord], 'chebyshevi': [cheb1ap, cheb1ord], 'cheby2': [cheb2ap, cheb2ord], 'chebyshev2': [cheb2ap, cheb2ord], 'chebyshevii': [cheb2ap, cheb2ord], } band_dict = {'band': 'bandpass', 'bandpass': 'bandpass', 'pass': 'bandpass', 'bp': 'bandpass', 'bs': 'bandstop', 'bandstop': 'bandstop', 'bands': 'bandstop', 'stop': 'bandstop', 'l': 'lowpass', 'low': 'lowpass', 'lowpass': 'lowpass', 'lp': 'lowpass', 'high': 'highpass', 'highpass': 'highpass', 'h': 'highpass', 'hp': 'highpass', } bessel_norms = {'bessel': 'phase', 'bessel_phase': 'phase', 'bessel_delay': 'delay', 'bessel_mag': 'mag'}
181,936
32.145746
105
py
scipy
scipy-main/scipy/signal/_lti_conversion.py
""" ltisys -- a collection of functions to convert linear time invariant systems from one representation to another. """ import numpy import numpy as np from numpy import (r_, eye, atleast_2d, poly, dot, asarray, prod, zeros, array, outer) from scipy import linalg from ._filter_design import tf2zpk, zpk2tf, normalize __all__ = ['tf2ss', 'abcd_normalize', 'ss2tf', 'zpk2ss', 'ss2zpk', 'cont2discrete'] def tf2ss(num, den): r"""Transfer function to state-space representation. Parameters ---------- num, den : array_like Sequences representing the coefficients of the numerator and denominator polynomials, in order of descending degree. The denominator needs to be at least as long as the numerator. Returns ------- A, B, C, D : ndarray State space representation of the system, in controller canonical form. Examples -------- Convert the transfer function: .. math:: H(s) = \frac{s^2 + 3s + 3}{s^2 + 2s + 1} >>> num = [1, 3, 3] >>> den = [1, 2, 1] to the state-space representation: .. math:: \dot{\textbf{x}}(t) = \begin{bmatrix} -2 & -1 \\ 1 & 0 \end{bmatrix} \textbf{x}(t) + \begin{bmatrix} 1 \\ 0 \end{bmatrix} \textbf{u}(t) \\ \textbf{y}(t) = \begin{bmatrix} 1 & 2 \end{bmatrix} \textbf{x}(t) + \begin{bmatrix} 1 \end{bmatrix} \textbf{u}(t) >>> from scipy.signal import tf2ss >>> A, B, C, D = tf2ss(num, den) >>> A array([[-2., -1.], [ 1., 0.]]) >>> B array([[ 1.], [ 0.]]) >>> C array([[ 1., 2.]]) >>> D array([[ 1.]]) """ # Controller canonical state-space representation. # if M+1 = len(num) and K+1 = len(den) then we must have M <= K # states are found by asserting that X(s) = U(s) / D(s) # then Y(s) = N(s) * X(s) # # A, B, C, and D follow quite naturally. # num, den = normalize(num, den) # Strips zeros, checks arrays nn = len(num.shape) if nn == 1: num = asarray([num], num.dtype) M = num.shape[1] K = len(den) if M > K: msg = "Improper transfer function. `num` is longer than `den`." raise ValueError(msg) if M == 0 or K == 0: # Null system return (array([], float), array([], float), array([], float), array([], float)) # pad numerator to have same number of columns has denominator num = np.hstack((np.zeros((num.shape[0], K - M), dtype=num.dtype), num)) if num.shape[-1] > 0: D = atleast_2d(num[:, 0]) else: # We don't assign it an empty array because this system # is not 'null'. It just doesn't have a non-zero D # matrix. Thus, it should have a non-zero shape so that # it can be operated on by functions like 'ss2tf' D = array([[0]], float) if K == 1: D = D.reshape(num.shape) return (zeros((1, 1)), zeros((1, D.shape[1])), zeros((D.shape[0], 1)), D) frow = -array([den[1:]]) A = r_[frow, eye(K - 2, K - 1)] B = eye(K - 1, 1) C = num[:, 1:] - outer(num[:, 0], den[1:]) D = D.reshape((C.shape[0], B.shape[1])) return A, B, C, D def _none_to_empty_2d(arg): if arg is None: return zeros((0, 0)) else: return arg def _atleast_2d_or_none(arg): if arg is not None: return atleast_2d(arg) def _shape_or_none(M): if M is not None: return M.shape else: return (None,) * 2 def _choice_not_none(*args): for arg in args: if arg is not None: return arg def _restore(M, shape): if M.shape == (0, 0): return zeros(shape) else: if M.shape != shape: raise ValueError("The input arrays have incompatible shapes.") return M def abcd_normalize(A=None, B=None, C=None, D=None): """Check state-space matrices and ensure they are 2-D. If enough information on the system is provided, that is, enough properly-shaped arrays are passed to the function, the missing ones are built from this information, ensuring the correct number of rows and columns. Otherwise a ValueError is raised. Parameters ---------- A, B, C, D : array_like, optional State-space matrices. All of them are None (missing) by default. See `ss2tf` for format. Returns ------- A, B, C, D : array Properly shaped state-space matrices. Raises ------ ValueError If not enough information on the system was provided. """ A, B, C, D = map(_atleast_2d_or_none, (A, B, C, D)) MA, NA = _shape_or_none(A) MB, NB = _shape_or_none(B) MC, NC = _shape_or_none(C) MD, ND = _shape_or_none(D) p = _choice_not_none(MA, MB, NC) q = _choice_not_none(NB, ND) r = _choice_not_none(MC, MD) if p is None or q is None or r is None: raise ValueError("Not enough information on the system.") A, B, C, D = map(_none_to_empty_2d, (A, B, C, D)) A = _restore(A, (p, p)) B = _restore(B, (p, q)) C = _restore(C, (r, p)) D = _restore(D, (r, q)) return A, B, C, D def ss2tf(A, B, C, D, input=0): r"""State-space to transfer function. A, B, C, D defines a linear state-space system with `p` inputs, `q` outputs, and `n` state variables. Parameters ---------- A : array_like State (or system) matrix of shape ``(n, n)`` B : array_like Input matrix of shape ``(n, p)`` C : array_like Output matrix of shape ``(q, n)`` D : array_like Feedthrough (or feedforward) matrix of shape ``(q, p)`` input : int, optional For multiple-input systems, the index of the input to use. Returns ------- num : 2-D ndarray Numerator(s) of the resulting transfer function(s). `num` has one row for each of the system's outputs. Each row is a sequence representation of the numerator polynomial. den : 1-D ndarray Denominator of the resulting transfer function(s). `den` is a sequence representation of the denominator polynomial. Examples -------- Convert the state-space representation: .. math:: \dot{\textbf{x}}(t) = \begin{bmatrix} -2 & -1 \\ 1 & 0 \end{bmatrix} \textbf{x}(t) + \begin{bmatrix} 1 \\ 0 \end{bmatrix} \textbf{u}(t) \\ \textbf{y}(t) = \begin{bmatrix} 1 & 2 \end{bmatrix} \textbf{x}(t) + \begin{bmatrix} 1 \end{bmatrix} \textbf{u}(t) >>> A = [[-2, -1], [1, 0]] >>> B = [[1], [0]] # 2-D column vector >>> C = [[1, 2]] # 2-D row vector >>> D = 1 to the transfer function: .. math:: H(s) = \frac{s^2 + 3s + 3}{s^2 + 2s + 1} >>> from scipy.signal import ss2tf >>> ss2tf(A, B, C, D) (array([[1., 3., 3.]]), array([ 1., 2., 1.])) """ # transfer function is C (sI - A)**(-1) B + D # Check consistency and make them all rank-2 arrays A, B, C, D = abcd_normalize(A, B, C, D) nout, nin = D.shape if input >= nin: raise ValueError("System does not have the input specified.") # make SIMO from possibly MIMO system. B = B[:, input:input + 1] D = D[:, input:input + 1] try: den = poly(A) except ValueError: den = 1 if (prod(B.shape, axis=0) == 0) and (prod(C.shape, axis=0) == 0): num = numpy.ravel(D) if (prod(D.shape, axis=0) == 0) and (prod(A.shape, axis=0) == 0): den = [] return num, den num_states = A.shape[0] type_test = A[:, 0] + B[:, 0] + C[0, :] + D + 0.0 num = numpy.empty((nout, num_states + 1), type_test.dtype) for k in range(nout): Ck = atleast_2d(C[k, :]) num[k] = poly(A - dot(B, Ck)) + (D[k] - 1) * den return num, den def zpk2ss(z, p, k): """Zero-pole-gain representation to state-space representation Parameters ---------- z, p : sequence Zeros and poles. k : float System gain. Returns ------- A, B, C, D : ndarray State space representation of the system, in controller canonical form. """ return tf2ss(*zpk2tf(z, p, k)) def ss2zpk(A, B, C, D, input=0): """State-space representation to zero-pole-gain representation. A, B, C, D defines a linear state-space system with `p` inputs, `q` outputs, and `n` state variables. Parameters ---------- A : array_like State (or system) matrix of shape ``(n, n)`` B : array_like Input matrix of shape ``(n, p)`` C : array_like Output matrix of shape ``(q, n)`` D : array_like Feedthrough (or feedforward) matrix of shape ``(q, p)`` input : int, optional For multiple-input systems, the index of the input to use. Returns ------- z, p : sequence Zeros and poles. k : float System gain. """ return tf2zpk(*ss2tf(A, B, C, D, input=input)) def cont2discrete(system, dt, method="zoh", alpha=None): """ Transform a continuous to a discrete state-space system. Parameters ---------- system : a tuple describing the system or an instance of `lti` The following gives the number of elements in the tuple and the interpretation: * 1: (instance of `lti`) * 2: (num, den) * 3: (zeros, poles, gain) * 4: (A, B, C, D) dt : float The discretization time step. method : str, optional Which method to use: * gbt: generalized bilinear transformation * bilinear: Tustin's approximation ("gbt" with alpha=0.5) * euler: Euler (or forward differencing) method ("gbt" with alpha=0) * backward_diff: Backwards differencing ("gbt" with alpha=1.0) * zoh: zero-order hold (default) * foh: first-order hold (*versionadded: 1.3.0*) * impulse: equivalent impulse response (*versionadded: 1.3.0*) alpha : float within [0, 1], optional The generalized bilinear transformation weighting parameter, which should only be specified with method="gbt", and is ignored otherwise Returns ------- sysd : tuple containing the discrete system Based on the input type, the output will be of the form * (num, den, dt) for transfer function input * (zeros, poles, gain, dt) for zeros-poles-gain input * (A, B, C, D, dt) for state-space system input Notes ----- By default, the routine uses a Zero-Order Hold (zoh) method to perform the transformation. Alternatively, a generalized bilinear transformation may be used, which includes the common Tustin's bilinear approximation, an Euler's method technique, or a backwards differencing technique. The Zero-Order Hold (zoh) method is based on [1]_, the generalized bilinear approximation is based on [2]_ and [3]_, the First-Order Hold (foh) method is based on [4]_. References ---------- .. [1] https://en.wikipedia.org/wiki/Discretization#Discretization_of_linear_state_space_models .. [2] http://techteach.no/publications/discretetime_signals_systems/discrete.pdf .. [3] G. Zhang, X. Chen, and T. Chen, Digital redesign via the generalized bilinear transformation, Int. J. Control, vol. 82, no. 4, pp. 741-754, 2009. (https://www.mypolyuweb.hk/~magzhang/Research/ZCC09_IJC.pdf) .. [4] G. F. Franklin, J. D. Powell, and M. L. Workman, Digital control of dynamic systems, 3rd ed. Menlo Park, Calif: Addison-Wesley, pp. 204-206, 1998. Examples -------- We can transform a continuous state-space system to a discrete one: >>> import numpy as np >>> import matplotlib.pyplot as plt >>> from scipy.signal import cont2discrete, lti, dlti, dstep Define a continuous state-space system. >>> A = np.array([[0, 1],[-10., -3]]) >>> B = np.array([[0],[10.]]) >>> C = np.array([[1., 0]]) >>> D = np.array([[0.]]) >>> l_system = lti(A, B, C, D) >>> t, x = l_system.step(T=np.linspace(0, 5, 100)) >>> fig, ax = plt.subplots() >>> ax.plot(t, x, label='Continuous', linewidth=3) Transform it to a discrete state-space system using several methods. >>> dt = 0.1 >>> for method in ['zoh', 'bilinear', 'euler', 'backward_diff', 'foh', 'impulse']: ... d_system = cont2discrete((A, B, C, D), dt, method=method) ... s, x_d = dstep(d_system) ... ax.step(s, np.squeeze(x_d), label=method, where='post') >>> ax.axis([t[0], t[-1], x[0], 1.4]) >>> ax.legend(loc='best') >>> fig.tight_layout() >>> plt.show() """ if len(system) == 1: return system.to_discrete() if len(system) == 2: sysd = cont2discrete(tf2ss(system[0], system[1]), dt, method=method, alpha=alpha) return ss2tf(sysd[0], sysd[1], sysd[2], sysd[3]) + (dt,) elif len(system) == 3: sysd = cont2discrete(zpk2ss(system[0], system[1], system[2]), dt, method=method, alpha=alpha) return ss2zpk(sysd[0], sysd[1], sysd[2], sysd[3]) + (dt,) elif len(system) == 4: a, b, c, d = system else: raise ValueError("First argument must either be a tuple of 2 (tf), " "3 (zpk), or 4 (ss) arrays.") if method == 'gbt': if alpha is None: raise ValueError("Alpha parameter must be specified for the " "generalized bilinear transform (gbt) method") elif alpha < 0 or alpha > 1: raise ValueError("Alpha parameter must be within the interval " "[0,1] for the gbt method") if method == 'gbt': # This parameter is used repeatedly - compute once here ima = np.eye(a.shape[0]) - alpha*dt*a ad = linalg.solve(ima, np.eye(a.shape[0]) + (1.0-alpha)*dt*a) bd = linalg.solve(ima, dt*b) # Similarly solve for the output equation matrices cd = linalg.solve(ima.transpose(), c.transpose()) cd = cd.transpose() dd = d + alpha*np.dot(c, bd) elif method == 'bilinear' or method == 'tustin': return cont2discrete(system, dt, method="gbt", alpha=0.5) elif method == 'euler' or method == 'forward_diff': return cont2discrete(system, dt, method="gbt", alpha=0.0) elif method == 'backward_diff': return cont2discrete(system, dt, method="gbt", alpha=1.0) elif method == 'zoh': # Build an exponential matrix em_upper = np.hstack((a, b)) # Need to stack zeros under the a and b matrices em_lower = np.hstack((np.zeros((b.shape[1], a.shape[0])), np.zeros((b.shape[1], b.shape[1])))) em = np.vstack((em_upper, em_lower)) ms = linalg.expm(dt * em) # Dispose of the lower rows ms = ms[:a.shape[0], :] ad = ms[:, 0:a.shape[1]] bd = ms[:, a.shape[1]:] cd = c dd = d elif method == 'foh': # Size parameters for convenience n = a.shape[0] m = b.shape[1] # Build an exponential matrix similar to 'zoh' method em_upper = linalg.block_diag(np.block([a, b]) * dt, np.eye(m)) em_lower = zeros((m, n + 2 * m)) em = np.block([[em_upper], [em_lower]]) ms = linalg.expm(em) # Get the three blocks from upper rows ms11 = ms[:n, 0:n] ms12 = ms[:n, n:n + m] ms13 = ms[:n, n + m:] ad = ms11 bd = ms12 - ms13 + ms11 @ ms13 cd = c dd = d + c @ ms13 elif method == 'impulse': if not np.allclose(d, 0): raise ValueError("Impulse method is only applicable" "to strictly proper systems") ad = linalg.expm(a * dt) bd = ad @ b * dt cd = c dd = c @ b * dt else: raise ValueError("Unknown transformation method '%s'" % method) return ad, bd, cd, dd, dt
16,142
29.230337
99
py
scipy
scipy-main/scipy/signal/_max_len_seq_inner.py
# Author: Eric Larson # 2014 import numpy as np #pythran export _max_len_seq_inner(int32[], int8[], int, int, int8[]) #pythran export _max_len_seq_inner(int64[], int8[], int, int, int8[]) # Fast inner loop of max_len_seq. def _max_len_seq_inner(taps, state, nbits, length, seq): # Here we compute MLS using a shift register, indexed using a ring buffer # technique (faster than using something like np.roll to shift) n_taps = taps.shape[0] idx = 0 for i in range(length): feedback = state[idx] seq[i] = feedback for ti in range(n_taps): feedback ^= state[(taps[ti] + idx) % nbits] state[idx] = feedback idx = (idx + 1) % nbits # state must be rolled s.t. next run, when idx==0, it's in the right place return np.roll(state, -idx, axis=0)
821
33.25
78
py
scipy
scipy-main/scipy/signal/_short_time_fft.py
"""Implementation of an FFT-based Short-time Fourier Transform. """ # Implementation Notes for this file (as of 2023-07) # -------------------------------------------------- # * MyPy version 1.1.1 does not seem to support decorated property methods # properly. Hence, applying ``@property`` to methods decorated with `@cache`` # (as tried with the ``lower_border_end`` method) causes a mypy error when # accessing it as an index (e.g., ``SFT.lower_border_end[0]``). # * Since the method `stft` and `istft` have identical names as the legacy # functions in the signal module, referencing them as HTML link in the # docstrings has to be done by an explicit `~ShortTimeFFT.stft` instead of an # ambiguous `stft` (The ``~`` hides the class / module name). # * The HTML documentation currently renders each method/property on a separate # page without reference to the parent class. Thus, a link to `ShortTimeFFT` # was added to the "See Also" section of each method/property. These links # can be removed, when SciPy updates ``pydata-sphinx-theme`` to >= 0.13.3 # (currently 0.9). Consult Issue 18512 and PR 16660 for further details. # # Provides typing union operator ``|`` in Python 3.9: from __future__ import annotations # Linter does not allow to import ``Generator`` from ``typing`` module: from collections.abc import Generator from functools import cache, lru_cache, partial from typing import Callable, get_args, Literal, Union import numpy as np import scipy.fft as fft_lib from scipy.signal import detrend from scipy.signal.windows import get_window __all__ = ['ShortTimeFFT'] #: Allowed values for parameter `padding` of method `ShortTimeFFT.stft()`: PAD_TYPE = Literal['zeros', 'edge', 'even', 'odd'] #: Allowed values for property `ShortTimeFFT.fft_mode`: FFT_MODE_TYPE = Literal['twosided', 'centered', 'onesided', 'onesided2X'] def _calc_dual_canonical_window(win: np.ndarray, hop: int) -> np.ndarray: """Calculate canonical dual window for 1d window `win` and a time step of `hop` samples. A ``ValueError`` is raised, if the inversion fails. This is a separate function not a method, since it is also used in the class method ``ShortTimeFFT.from_dual()``. """ if hop > len(win): raise ValueError(f"{hop=} is larger than window length of {len(win)}" + " => STFT not invertible!") if issubclass(win.dtype.type, np.integer): raise ValueError("Parameter 'win' cannot be of integer type, but " + f"{win.dtype=} => STFT not invertible!") # The calculation of `relative_resolution` does not work for ints. # Furthermore, `win / DD` casts the integers away, thus an implicit # cast is avoided, which can always cause confusion when using 32-Bit # floats. w2 = win.real**2 + win.imag**2 # win*win.conj() does not ensure w2 is real DD = w2.copy() for k_ in range(hop, len(win), hop): DD[k_:] += w2[:-k_] DD[:-k_] += w2[k_:] # check DD > 0: relative_resolution = np.finfo(win.dtype).resolution * max(DD) if not np.all(DD >= relative_resolution): raise ValueError("Short-time Fourier Transform not invertible!") return win / DD # noinspection PyShadowingNames class ShortTimeFFT: r"""Provide a parametrized discrete Short-time Fourier transform (stft) and its inverse (istft). .. currentmodule:: scipy.signal.ShortTimeFFT The `~ShortTimeFFT.stft` calculates sequential FFTs by sliding a window (`win`) over an input signal by `hop` increments. It can be used to quantify the change of the spectrum over time. The `~ShortTimeFFT.stft` is represented by a complex-valued matrix S[q,p] where the p-th column represents an FFT with the window centered at the time t[p] = p * `delta_t` = p * `hop` * `T` where `T` is the sampling interval of the input signal. The q-th row represents the values at the frequency f[q] = q * `delta_f` with `delta_f` = 1 / (`mfft` * `T`) being the bin width of the FFT. The inverse STFT `~ShortTimeFFT.istft` is calculated by reversing the steps of the STFT: Take the IFFT of the p-th slice of S[q,p] and multiply the result with the so-called dual window (see `dual_win`). Shift the result by p * `delta_t` and add the result to previous shifted results to reconstruct the signal. If only the dual window is known and the STFT is invertible, `from_dual` can be used to instanciate this class. Due to the convention of time t = 0 being at the first sample of the input signal, the STFT values typically have negative time slots. Hence, negative indexes like `p_min` or `k_min` do not indicate counting backwards from an array's end like in standard Python indexing but being left of t = 0. More detailed information can be found in the :ref:`tutorial_stft` section of the :ref:`user_guide`. Note that all parameters of the initializer, except `scale_to` (which uses `scaling`) have identical named attributes. Parameters ---------- win : np.ndarray The window must be a real- or complex-valued 1d array. hop : int The increment in samples, by which the window is shifted in each step. fs : float Sampling frequency of input signal and window. Its relation to the sampling interval `T` is ``T = 1 / fs``. fft_mode : 'twosided', 'centered', 'onesided', 'onesided2X' Mode of FFT to be used (default 'onesided'). See property `fft_mode` for details. mfft: int | None Length of the FFT used, if a zero padded FFT is desired. If ``None`` (default), the length of the window `win` is used. dual_win : np.ndarray | None The dual window of `win`. If set to ``None``, it is calculated if needed. scale_to : 'magnitude', 'psd' | None If not ``None`` (default) the window function is scaled, so each STFT column represents either a 'magnitude' or a power spectral density ('psd') spectrum. This parameter sets the property `scaling` to the same value. See method `scale_to` for details. phase_shift : int | None If set, add a linear phase `phase_shift` / `mfft` * `f` to each frequency `f`. The default value 0 ensures that there is no phase shift on the zeroth slice (in which t=0 is centered). See property `phase_shift` for more details. Examples -------- The following example shows the magnitude of the STFT of a sine with varying frequency :math:`f_i(t)` (marked by a red dashed line in the plot): >>> import numpy as np >>> import matplotlib.pyplot as plt >>> from scipy.signal import ShortTimeFFT >>> from scipy.signal.windows import gaussian ... >>> T_x, N = 1 / 20, 1000 # 20 Hz sampling rate for 50 s signal >>> t_x = np.arange(N) * T_x # time indexes for signal >>> f_i = 1 * np.arctan((t_x - t_x[N // 2]) / 2) + 5 # varying frequency >>> x = np.sin(2*np.pi*np.cumsum(f_i)*T_x) # the signal The utilized Gaussian window is 50 samples or 2.5 s long. The parameter ``mfft=200`` in `ShortTimeFFT` causes the spectrum to be oversampled by a factor of 4: >>> g_std = 8 # standard deviation for Gaussian window in samples >>> w = gaussian(50, std=g_std, sym=True) # symmetric Gaussian window >>> SFT = ShortTimeFFT(w, hop=10, fs=1/T_x, mfft=200, scale_to='magnitude') >>> Sx = SFT.stft(x) # perform the STFT In the plot, the time extent of the signal `x` is marked by vertical dashed lines. Note that the SFT produces values outside the time range of `x`. The shaded areas on the left and the right indicate border effects caused by the window slices in that area not fully being inside time range of `x`: >>> fig1, ax1 = plt.subplots(figsize=(6., 4.)) # enlarge plot a bit >>> t_lo, t_hi = SFT.extent(N)[:2] # time range of plot >>> ax1.set_title(rf"STFT ({SFT.m_num*SFT.T:g}$\,s$ Gaussian window, " + ... rf"$\sigma_t={g_std*SFT.T}\,$s)") >>> ax1.set(xlabel=f"Time $t$ in seconds ({SFT.p_num(N)} slices, " + ... rf"$\Delta t = {SFT.delta_t:g}\,$s)", ... ylabel=f"Freq. $f$ in Hz ({SFT.f_pts} bins, " + ... rf"$\Delta f = {SFT.delta_f:g}\,$Hz)", ... xlim=(t_lo, t_hi)) ... >>> im1 = ax1.imshow(abs(Sx), origin='lower', aspect='auto', ... extent=SFT.extent(N), cmap='viridis') >>> ax1.plot(t_x, f_i, 'r--', alpha=.5, label='$f_i(t)$') >>> fig1.colorbar(im1, label="Magnitude $|S_x(t, f)|$") ... >>> # Shade areas where window slices stick out to the side: >>> for t0_, t1_ in [(t_lo, SFT.lower_border_end[0] * SFT.T), ... (SFT.upper_border_begin(N)[0] * SFT.T, t_hi)]: ... ax1.axvspan(t0_, t1_, color='w', linewidth=0, alpha=.2) >>> for t_ in [0, N * SFT.T]: # mark signal borders with vertical line: ... ax1.axvline(t_, color='y', linestyle='--', alpha=0.5) >>> ax1.legend() >>> fig1.tight_layout() >>> plt.show() Reconstructing the signal with the `~ShortTimeFFT.istft` is straightforward, but note that the length of `x1` should be specified, since the SFT length increases in `hop` steps: >>> SFT.invertible # check if invertible True >>> x1 = SFT.istft(Sx, k1=N) >>> np.allclose(x, x1) True It is possible to calcluate the SFT of signal parts: >>> p_q = SFT.nearest_k_p(N // 2) >>> Sx0 = SFT.stft(x[:p_q]) >>> Sx1 = SFT.stft(x[p_q:]) When assembling sequential STFT parts together, the overlap needs to be considered: >>> p0_ub = SFT.upper_border_begin(p_q)[1] - SFT.p_min >>> p1_le = SFT.lower_border_end[1] - SFT.p_min >>> Sx01 = np.hstack((Sx0[:, :p0_ub], ... Sx0[:, p0_ub:] + Sx1[:, :p1_le], ... Sx1[:, p1_le:])) >>> np.allclose(Sx01, Sx) # Compare with SFT of complete signal True It is also possible to calculate the `itsft` for signal parts: >>> y_p = SFT.istft(Sx, N//3, N//2) >>> np.allclose(y_p, x[N//3:N//2]) True """ # immutable attributes (only have getters but no setters): _win: np.ndarray # window _dual_win: np.ndarray | None = None # canonical dual window _hop: int # Step of STFT in number of samples # mutable attributes: _fs: float # sampling frequency of input signal and window _fft_mode: FFT_MODE_TYPE = 'onesided' # Mode of FFT to use _mfft: int # length of FFT used - defaults to len(win) _scaling: Literal['magnitude', 'psd'] | None = None # Scaling of _win _phase_shift: int | None # amount to shift phase of FFT in samples # attributes for caching calculated values: _fac_mag: float | None = None _fac_psd: float | None = None _lower_border_end: tuple[int, int] | None = None def __init__(self, win: np.ndarray, hop: int, fs: float, *, fft_mode: FFT_MODE_TYPE = 'onesided', mfft: int | None = None, dual_win: np.ndarray | None = None, scale_to: Literal['magnitude', 'psd'] | None = None, phase_shift: int | None = 0): if not (win.ndim == 1 and win.size > 0): raise ValueError(f"Parameter win must be 1d, but {win.shape=}!") if not all(np.isfinite(win)): raise ValueError("Parameter win must have finite entries!") if not (hop >= 1 and isinstance(hop, int)): raise ValueError(f"Parameter {hop=} is not an integer >= 1!") self._win, self._hop, self.fs = win, hop, fs self.mfft = len(win) if mfft is None else mfft if dual_win is not None: if dual_win.shape != win.shape: raise ValueError(f"{dual_win.shape=} must equal {win.shape=}!") if not all(np.isfinite(dual_win)): raise ValueError("Parameter dual_win must be a finite array!") self._dual_win = dual_win # needs to be set before scaling if scale_to is not None: # needs to be set before fft_mode self.scale_to(scale_to) self.fft_mode, self.phase_shift = fft_mode, phase_shift @classmethod def from_dual(cls, dual_win: np.ndarray, hop: int, fs: float, *, fft_mode: FFT_MODE_TYPE = 'onesided', mfft: int | None = None, scale_to: Literal['magnitude', 'psd'] | None = None, phase_shift: int | None = 0): r"""Instantiate a `ShortTimeFFT` by only providing a dual window. If an STFT is invertible, it is possible to calculate the window `win` from a given dual window `dual_win`. All other parameters have the same meaning as in the initializer of `ShortTimeFFT`. As explained in the :ref:`tutorial_stft` section of the :ref:`user_guide`, an invertible STFT can be interpreted as series expansion of time-shifted and frequency modulated dual windows. E.g., the series coefficient S[q,p] belongs to the term, which shifted `dual_win` by p * `delta_t` and multiplied it by exp( 2 * j * pi * t * q * `delta_f`). Examples -------- The following example discusses decomposing a signal into time- and frequency-shifted Gaussians. A Gaussian with standard deviation of one made up of 51 samples will be used: >>> import numpy as np >>> import matplotlib.pyplot as plt >>> from scipy.signal import ShortTimeFFT >>> from scipy.signal.windows import gaussian ... >>> T, N = 0.1, 51 >>> d_win = gaussian(N, std=1/T, sym=True) # symmetric Gaussian window >>> t = T * (np.arange(N) - N//2) ... >>> fg1, ax1 = plt.subplots() >>> ax1.set_title(r"Dual Window: Gaussian with $\sigma_t=1$") >>> ax1.set(xlabel=f"Time $t$ in seconds ({N} samples, $T={T}$ s)", ... xlim=(t[0], t[-1]), ylim=(0, 1.1*max(d_win))) >>> ax1.plot(t, d_win, 'C0-') The following plot with the overlap of 41, 11 and 2 samples show how the `hop` interval affects the shape of the window `win`: >>> fig2, axx = plt.subplots(3, 1, sharex='all') ... >>> axx[0].set_title(r"Windows for hop$\in\{10, 40, 49\}$") >>> for c_, h_ in enumerate([10, 40, 49]): ... SFT = ShortTimeFFT.from_dual(d_win, h_, 1/T) ... axx[c_].plot(t + h_ * T, SFT.win, 'k--', alpha=.3, label=None) ... axx[c_].plot(t - h_ * T, SFT.win, 'k:', alpha=.3, label=None) ... axx[c_].plot(t, SFT.win, f'C{c_+1}', ... label=r"$\Delta t=%0.1f\,$s" % SFT.delta_t) ... axx[c_].set_ylim(0, 1.1*max(SFT.win)) ... axx[c_].legend(loc='center') >>> axx[-1].set(xlabel=f"Time $t$ in seconds ({N} samples, $T={T}$ s)", ... xlim=(t[0], t[-1])) >>> plt.show() Beside the window `win` centered at t = 0 the previous (t = -`delta_t`) and following window (t = `delta_t`) are depicted. It can be seen that for small `hop` intervals, the window is compact and smooth, having a good time-frequency concentration in the STFT. For the large `hop` interval of 4.9 s, the window has small values around t = 0, which are not covered by the overlap of the adjacent windows, which could lead to numeric inaccuracies. Furthermore, the peaky shape at the beginning and the end of the window points to a higher bandwidth, resulting in a poorer time-frequency resolution of the STFT. Hence, the choice of the `hop` interval will be a compromise between a time-frequency resolution and memory requirements demanded by small `hop` sizes. See Also -------- from_window: Create instance by wrapping `get_window`. ShortTimeFFT: Create instance using standard initalizer. """ win = _calc_dual_canonical_window(dual_win, hop) return cls(win=win, hop=hop, fs=fs, fft_mode=fft_mode, mfft=mfft, dual_win=dual_win, scale_to=scale_to, phase_shift=phase_shift) @classmethod def from_window(cls, win_param: Union[str, tuple, float], fs: float, nperseg: int, noverlap: int, *, symmetric_win: bool = False, fft_mode: FFT_MODE_TYPE = 'onesided', mfft: int | None = None, scale_to: Literal['magnitude', 'psd'] | None = None, phase_shift: int | None = 0): """Instantiate `ShortTimeFFT` by using `get_window`. The method `get_window` is used to create a window of length `nperseg`. The parameter names `noverlap`, and `nperseg` are used here, since they more inline with other classical STFT libraries. Parameters ---------- win_param: Union[str, tuple, float], Parameters passed to `get_window`. For windows with no parameters, it may be a string (e.g., ``'hann'``), for parametrized windows a tuple, (e.g., ``('gaussian', 2.)``) or a single float specifying the shape parameter of a kaiser window (i.e. ``4.`` and ``('kaiser', 4.)`` are equal. See `get_window` for more details. fs : float Sampling frequency of input signal. Its relation to the sampling interval `T` is ``T = 1 / fs``. nperseg: int Window length in samples, which corresponds to the `m_num`. noverlap: int Window overlap in samples. It relates to the `hop` increment by ``hop = npsereg - noverlap``. symmetric_win: bool If ``True`` then a symetric window is generated, else a periodic window is generated (default). Though symmetric windows seem for most applications to be more sensible, the default of a periodic windows was chosen to correspond to the default of `get_window`. fft_mode : 'twosided', 'centered', 'onesided', 'onesided2X' Mode of FFT to be used (default 'onesided'). See property `fft_mode` for details. mfft: int | None Length of the FFT used, if a zero padded FFT is desired. If ``None`` (default), the length of the window `win` is used. scale_to : 'magnitude', 'psd' | None If not ``None`` (default) the window function is scaled, so each STFT column represents either a 'magnitude' or a power spectral density ('psd') spectrum. This parameter sets the property `scaling` to the same value. See method `scale_to` for details. phase_shift : int | None If set, add a linear phase `phase_shift` / `mfft` * `f` to each frequency `f`. The default value 0 ensures that there is no phase shift on the zeroth slice (in which t=0 is centered). See property `phase_shift` for more details. Examples -------- The following instances ``SFT0`` and ``SFT1`` are equivalent: >>> from scipy.signal import ShortTimeFFT, get_window >>> nperseg = 9 # window length >>> w = get_window(('gaussian', 2.), nperseg) >>> fs = 128 # sampling frequency >>> hop = 3 # increment of STFT time slice >>> SFT0 = ShortTimeFFT(w, hop, fs=fs) >>> SFT1 = ShortTimeFFT.from_window(('gaussian', 2.), fs, nperseg, ... noverlap=nperseg-hop) See Also -------- scipy.signal.get_window: Return a window of a given length and type. from_dual: Create instance using dual window. ShortTimeFFT: Create instance using standard initalizer. """ win = get_window(win_param, nperseg, fftbins=not symmetric_win) return cls(win, hop=nperseg-noverlap, fs=fs, fft_mode=fft_mode, mfft=mfft, scale_to=scale_to, phase_shift=phase_shift) @property def win(self) -> np.ndarray: """Window function as real- or complex-valued 1d array. This attribute is read only, since `dual_win` depends on it. See Also -------- dual_win: Canonical dual window. m_num: Number of samples in window `win`. m_num_mid: Center index of window `win`. mfft: Length of input for the FFT used - may be larger than `m_num`. hop: ime increment in signal samples for sliding window. win: Window function as real- or complex-valued 1d array. ShortTimeFFT: Class this property belongs to. """ return self._win @property def hop(self) -> int: """Time increment in signal samples for sliding window. This attribute is read only, since `dual_win` depends on it. See Also -------- delta_t: Time increment of STFT (``hop*T``) m_num: Number of samples in window `win`. m_num_mid: Center index of window `win`. mfft: Length of input for the FFT used - may be larger than `m_num`. T: Sampling interval of input signal and of the window. win: Window function as real- or complex-valued 1d array. ShortTimeFFT: Class this property belongs to. """ return self._hop @property def T(self) -> float: """Sampling interval of input signal and of the window. A ``ValueError`` is raised if it is set to a non-positive value. See Also -------- delta_t: Time increment of STFT (``hop*T``) hop: Time increment in signal samples for sliding window. fs: Sampling frequency (being ``1/T``) t: Times of STFT for an input signal with `n` samples. ShortTimeFFT: Class this property belongs to. """ return 1 / self._fs @T.setter def T(self, v: float): """Sampling interval of input signal and of the window. A ``ValueError`` is raised if it is set to a non-positive value. """ if not (v > 0): raise ValueError(f"Sampling interval T={v} must be positive!") self._fs = 1 / v @property def fs(self) -> float: """Sampling frequency of input signal and of the window. The sampling frequency is the inverse of the sampling interval `T`. A ``ValueError`` is raised if it is set to a non-positive value. See Also -------- delta_t: Time increment of STFT (``hop*T``) hop: Time increment in signal samples for sliding window. T: Sampling interval of input signal and of the window (``1/fs``). ShortTimeFFT: Class this property belongs to. """ return self._fs @fs.setter def fs(self, v: float): """Sampling frequency of input signal and of the window. The sampling frequency is the inverse of the sampling interval `T`. A ``ValueError`` is raised if it is set to a non-positive value. """ if not (v > 0): raise ValueError(f"Sampling frequency fs={v} must be positive!") self._fs = v @property def fft_mode(self) -> FFT_MODE_TYPE: """Mode of utilized FFT ('twosided', 'centered', 'onesided' or 'onesided2X'). It can have the following values: 'twosided': Two-sided FFT, where values for the negative frequencies are in upper half of the array. Corresponds to :func:`scipy.fft.fft()`. 'centered': Two-sided FFT with the values being ordered along monotonically increasing frequencies. Corresponds to applying :func:`scipy.fft.fftshift()` to :func:`scipy.fft.fft()`. 'onesided': Calculates only values for non-negative frequency values. Corresponds to :func:`scipy.fft.rfft()`. 'onesided2X': Like `onesided`, but the non-zero frequencies are doubled if `scaling` is set to 'magnitude' or multiplied by ``sqrt(2)`` if set to 'psd'. If `scaling` is ``None``, setting `fft_mode` to `onesided2X` is not allowed. If the FFT length `mfft` is even, the last FFT value is not paired, and thus it is not scaled. Note that the frequency values can be obtained by reading the `f` property, and the number of samples by accessing the `f_pts` property. See Also -------- delta_f: Width of the frequency bins of the STFT. f: Frequencies values of the STFT. f_pts: Width of the frequency bins of the STFT. onesided_fft: True if a one-sided FFT is used. scaling: Normalization applied to the window function ShortTimeFFT: Class this property belongs to. """ return self._fft_mode @fft_mode.setter def fft_mode(self, t: FFT_MODE_TYPE): """Set mode of FFT. Allowed values are 'twosided', 'centered', 'onesided', 'onesided2X'. See the property `fft_mode` for more details. """ if t not in (fft_mode_types := get_args(FFT_MODE_TYPE)): raise ValueError(f"fft_mode='{t}' not in {fft_mode_types}!") if t in {'onesided', 'onesided2X'} and np.iscomplexobj(self.win): raise ValueError(f"One-sided spectra, i.e., fft_mode='{t}', " + "are not allowed for complex-valued windows!") if t == 'onesided2X' and self.scaling is None: raise ValueError(f"For scaling is None, fft_mode='{t}' is invalid!" "Do scale_to('psd') or scale_to('magnitude')!") self._fft_mode = t @property def mfft(self) -> int: """Length of input for the FFT used - may be larger than window length `m_num`. If not set, `mfft` defaults to the window length `m_num`. See Also -------- f_pts: Number of points along the frequency axis. f: Frequencies values of the STFT. m_num: Number of samples in window `win`. ShortTimeFFT: Class this property belongs to. """ return self._mfft @mfft.setter def mfft(self, n_: int): """Setter for the length of FFT utilized. See the property `mfft` for further details. """ if not (n_ >= self.m_num): raise ValueError(f"Attribute mfft={n_} needs to be at least the " + f"window length m_num={self.m_num}!") self._mfft = n_ @property def scaling(self) -> Literal['magnitude', 'psd'] | None: """Normalization applied to the window function ('magnitude', 'psd' or ``None``). If not ``None``, the FFTs can be either interpreted as a magnitude or a power spectral density spectrum. The window function can be scaled by calling the `scale_to()` method, or it is set by the initializer parameter `scale_to`. See Also -------- fac_magnitude: Scaling factor for to a magnitude spectrum. fac_psd: Scaling factor for to a power spectral density spectrum. fft_mode: Mode of utilized FFT scale_to: Scale window to obtain 'magnitude' or 'psd' scaling. ShortTimeFFT: Class this property belongs to. """ return self._scaling def scale_to(self, scaling: Literal['magnitude', 'psd']): """Scale window to obtain 'magnitude' or 'psd' scaling for the STFT. The window of a 'magnitude' spectrum has an integral of one, i.e., unit area for non-negative windows. This ensures that absolute the values of spectrum does not change if the length of the window changes (given the input signal is stationary). To represent the power spectral density ('psd') for varying length windows the area of the absolute square of the window needs to be unity. The `scaling` property shows the current scaling. The properties `fac_magnitude` and `fac_psd` show the scaling factors required to scale the STFT values to a magnitude or a psd spectrum. This method is called, if the initializer parameter `scale_to` is set. See Also -------- fac_magnitude: Scaling factor for to a magnitude spectrum. fac_psd: Scaling factor for to a power spectral density spectrum. fft_mode: Mode of utilized FFT scaling: Normalization applied to the window function. ShortTimeFFT: Class this method belongs to. """ if scaling not in (scaling_values := {'magnitude', 'psd'}): raise ValueError(f"{scaling=} not in {scaling_values}!") if self._scaling == scaling: # do nothing return s_fac = self.fac_psd if scaling == 'psd' else self.fac_magnitude self._win = self._win * s_fac if self._dual_win is not None: self._dual_win = self._dual_win / s_fac self._fac_mag, self._fac_psd = None, None # reset scaling factors self._scaling = scaling @property def phase_shift(self) -> int | None: """If set, add linear phase `phase_shift` / `mfft` * `f` to each FFT slice of frequency `f`. Shifting (more precisely `rolling`) an `mfft`-point FFT input by `phase_shift` samples results in a multiplication of the output by ``np.exp(2j*np.pi*q*phase_shift/mfft)`` at the frequency q * `delta_f`. The default value 0 ensures that there is no phase shift on the zeroth slice (in which t=0 is centered). No phase shift (``phase_shift is None``) is equivalent to ``phase_shift = -mfft//2``. In this case slices are not shifted before calculating the FFT. The absolute value of `phase_shift` is limited to be less than `mfft`. See Also -------- delta_f: Width of the frequency bins of the STFT. f: Frequencies values of the STFT. mfft: Length of input for the FFT used ShortTimeFFT: Class this property belongs to. """ return self._phase_shift @phase_shift.setter def phase_shift(self, v: int | None): """The absolute value of the phase shift needs to be less than mfft samples. See the `phase_shift` getter method for more details. """ if v is None: self._phase_shift = v return if not isinstance(v, int): raise ValueError(f"phase_shift={v} has the unit samples. Hence " + "it needs to be an int or it may be None!") if not (-self.mfft < v < self.mfft): raise ValueError("-mfft < phase_shift < mfft does not hold " + f"for mfft={self.mfft}, phase_shift={v}!") self._phase_shift = v def _x_slices(self, x: np.ndarray, k_off: int, p0: int, p1: int, padding: PAD_TYPE) -> Generator[np.ndarray, None, None]: """Generate signal slices along last axis of `x`. This method is only used by `stft_detrend`. The parameters are described in `~ShortTimeFFT.stft`. """ if padding not in (padding_types := get_args(PAD_TYPE)): raise ValueError(f"Parameter {padding=} not in {padding_types}!") pad_kws: dict[str, dict] = { # possible keywords to pass to np.pad: 'zeros': dict(mode='constant', constant_values=(0, 0)), 'edge': dict(mode='edge'), 'even': dict(mode='reflect', reflect_type='even'), 'odd': dict(mode='reflect', reflect_type='odd'), } # typing of pad_kws is needed to make mypy happy n, n1 = x.shape[-1], (p1 - p0) * self.hop k0 = p0 * self.hop - self.m_num_mid + k_off # start sample k1 = k0 + n1 + self.m_num # end sample i0, i1 = max(k0, 0), min(k1, n) # indexes to shorten x # dimensions for padding x: pad_width = [(0, 0)] * (x.ndim-1) + [(-min(k0, 0), max(k1 - n, 0))] x1 = np.pad(x[..., i0:i1], pad_width, **pad_kws[padding]) for k_ in range(0, n1, self.hop): yield x1[..., k_:k_ + self.m_num] def stft(self, x: np.ndarray, p0: int | None = None, p1: int | None = None, *, k_offset: int = 0, padding: PAD_TYPE = 'zeros', axis: int = -1) \ -> np.ndarray: """Perform the short-time Fourier transform. A two-dimensional matrix with ``p1-p0`` columns is calculated. The `f_pts` rows represent value at the frequencies `f`. The q-th column of the windowed FFT with the window `win` is centered at t[q]. The columns represent the values at the frequencies `f`. Parameters ---------- x The input signal as real or complex valued array. p0 The first element of the range of slices to calculate. If ``None`` then it is set to :attr:`p_min`, which is the smallest possible slice. p1 The end of the array. If ``None`` then `p_max(n)` is used. k_offset Index of first sample (t = 0) in `x`. padding Kind of values which are added, when the sliding window sticks out on either the lower or upper end of the input `x`. Zeros are added if the default 'zeros' is set. For 'edge' either the first or the last value of `x` is used. 'even' pads by reflecting the signal on the first or last sample and 'odd' additionally multiplies it with -1. axis The axis of `x` over which to compute the STFT. If not given, the last axis is used. Returns ------- S A complex array is returned with the dimension always being larger by one than of `x`. The last axis always represent the time slices of the STFT. `axis` defines the frequency axis (default second to last). E.g., for a one-dimensional `x`, a complex 2d array is returned, with axis 0 representing frequency and axis 1 the time slices. See Also -------- delta_f: Width of the frequency bins of the STFT. delta_t: Time increment of STFT f: Frequencies values of the STFT. invertible: Check if STFT is invertible. :meth:`~ShortTimeFFT.istft`: Inverse short-time Fourier transform. p_range: Determine and validate slice index range. stft_detrend: STFT with detrended segments. t: Times of STFT for an input signal with `n` samples. :class:`scipy.signal.ShortTimeFFT`: Class this method belongs to. """ return self.stft_detrend(x, None, p0, p1, k_offset=k_offset, padding=padding, axis=axis) def stft_detrend(self, x: np.ndarray, detr: Union[Callable[[np.ndarray], np.ndarray], Literal['linear', 'constant'], None], p0: int | None = None, p1: int | None = None, *, k_offset: int = 0, padding: PAD_TYPE = 'zeros', axis: int = -1) \ -> np.ndarray: """Short-time Fourier transform with a trend being subtracted from each segment beforehand. If `detr` is set to 'constant', the mean is subtracted, if set to "linear", the linear trend is removed. This is achieved by calling :func:`scipy.signal.detrend`. If `detr` is a function, `detr` is applied to each segment. All other parameters have the same meaning as in `~ShortTimeFFT.stft`. Note that due to the detrending, the original signal cannot be reconstructed by the `~ShortTimeFFT.istft`. See Also -------- invertible: Check if STFT is invertible. :meth:`~ShortTimeFFT.istft`: Inverse short-time Fourier transform. :meth:`~ShortTimeFFT.stft`: Short-time Fourier transform (without detrending). :class:`scipy.signal.ShortTimeFFT`: Class this method belongs to. """ if isinstance(detr, str): detr = partial(detrend, type=detr) elif not (detr is None or callable(detr)): raise ValueError(f"Parameter {detr=} is not a str, function or " + "None!") n = x.shape[axis] if not (n >= (m2p := self.m_num-self.m_num_mid)): e_str = f'{len(x)=}' if x.ndim == 1 else f'of {axis=} of {x.shape}' raise ValueError(f"{e_str} must be >= ceil(m_num/2) = {m2p}!") if x.ndim > 1: # motivated by the NumPy broadcasting mechanisms: x = np.moveaxis(x, axis, -1) # determine slice index range: p0, p1 = self.p_range(n, p0, p1) S_shape_1d = (self.f_pts, p1 - p0) S_shape = x.shape[:-1] + S_shape_1d if x.ndim > 1 else S_shape_1d S = np.zeros(S_shape, dtype=complex) for p_, x_ in enumerate(self._x_slices(x, k_offset, p0, p1, padding)): if detr is not None: x_ = detr(x_) S[..., :, p_] = self._fft_func(x_ * self.win.conj()) if x.ndim > 1: return np.moveaxis(S, -2, axis if axis >= 0 else axis-1) return S def spectrogram(self, x: np.ndarray, y: np.ndarray | None = None, detr: Union[Callable[[np.ndarray], np.ndarray], Literal['linear', 'constant'], None] = None, *, p0: int | None = None, p1: int | None = None, k_offset: int = 0, padding: PAD_TYPE = 'zeros', axis: int = -1) \ -> np.ndarray: r"""Calculate spectrogram or cross-spectrogram. The spectrogram is the absolute square of the STFT, i.e, it is ``abs(S[q,p])**2`` for given ``S[q,p]`` and thus is always non-negative. For two STFTs ``Sx[q,p], Sy[q,p]``, the cross-spectrogram is defined as ``Sx[q,p] * np.conj(Sx[q,p])`` and is complex-valued. This is a convenience function for calling `~ShortTimeFFT.stft` / `stft_detrend`, hence all parameters are discussed there. If `y` is not ``None`` it needs to have the same shape as `x`. Examples -------- The following example shows the spectrogram of a square wave with varying frequency :math:`f_i(t)` (marked by a green dashed line in the plot) sampled with 20 Hz: >>> import matplotlib.pyplot as plt >>> import numpy as np >>> from scipy.signal import square, ShortTimeFFT >>> from scipy.signal.windows import gaussian ... >>> T_x, N = 1 / 20, 1000 # 20 Hz sampling rate for 50 s signal >>> t_x = np.arange(N) * T_x # time indexes for signal >>> f_i = 5e-3*(t_x - t_x[N // 3])**2 + 1 # varying frequency >>> x = square(2*np.pi*np.cumsum(f_i)*T_x) # the signal The utitlized Gaussian window is 50 samples or 2.5 s long. The parameter ``mfft=800`` (oversampling factor 16) and the `hop` interval of 2 in `ShortTimeFFT` was chosen to produce a sufficient number of points: >>> g_std = 12 # standard deviation for Gaussian window in samples >>> win = gaussian(50, std=g_std, sym=True) # symmetric Gaussian wind. >>> SFT = ShortTimeFFT(win, hop=2, fs=1/T_x, mfft=800, scale_to='psd') >>> Sx2 = SFT.spectrogram(x) # calculate absolute square of STFT The plot's colormap is logarithmically scaled as the power spectral density is in dB. The time extent of the signal `x` is marked by vertical dashed lines and the shaded areas mark the presence of border effects: >>> fig1, ax1 = plt.subplots(figsize=(6., 4.)) # enlarge plot a bit >>> t_lo, t_hi = SFT.extent(N)[:2] # time range of plot >>> ax1.set_title(rf"Spectrogram ({SFT.m_num*SFT.T:g}$\,s$ Gaussian " + ... rf"window, $\sigma_t={g_std*SFT.T:g}\,$s)") >>> ax1.set(xlabel=f"Time $t$ in seconds ({SFT.p_num(N)} slices, " + ... rf"$\Delta t = {SFT.delta_t:g}\,$s)", ... ylabel=f"Freq. $f$ in Hz ({SFT.f_pts} bins, " + ... rf"$\Delta f = {SFT.delta_f:g}\,$Hz)", ... xlim=(t_lo, t_hi)) >>> Sx_dB = 10 * np.log10(np.fmax(Sx2, 1e-4)) # limit range to -40 dB >>> im1 = ax1.imshow(Sx_dB, origin='lower', aspect='auto', ... extent=SFT.extent(N), cmap='magma') >>> ax1.plot(t_x, f_i, 'g--', alpha=.5, label='$f_i(t)$') >>> fig1.colorbar(im1, label='Power Spectral Density ' + ... r"$20\,\log_{10}|S_x(t, f)|$ in dB") ... >>> # Shade areas where window slices stick out to the side: >>> for t0_, t1_ in [(t_lo, SFT.lower_border_end[0] * SFT.T), ... (SFT.upper_border_begin(N)[0] * SFT.T, t_hi)]: ... ax1.axvspan(t0_, t1_, color='w', linewidth=0, alpha=.3) >>> for t_ in [0, N * SFT.T]: # mark signal borders with vertical line ... ax1.axvline(t_, color='c', linestyle='--', alpha=0.5) >>> ax1.legend() >>> fig1.tight_layout() >>> plt.show() The logarithmic scaling reveals the odd harmonics of the square wave, which are reflected at the Nyquist frequency of 10 Hz. This aliasing is also the main source of the noise artifacts in the plot. See Also -------- :meth:`~ShortTimeFFT.stft`: Perform the short-time Fourier transform. stft_detrend: STFT with a trend subtracted from each segment. :class:`scipy.signal.ShortTimeFFT`: Class this method belongs to. """ Sx = self.stft_detrend(x, detr, p0, p1, k_offset=k_offset, padding=padding, axis=axis) if y is None or y is x: # do spectrogram: return Sx.real**2 + Sx.imag**2 # Cross-spectrogram: Sy = self.stft_detrend(y, detr, p0, p1, k_offset=k_offset, padding=padding, axis=axis) return Sx * Sy.conj() @property def dual_win(self) -> np.ndarray: """Canonical dual window. A STFT can be interpreted as the input signal being expressed as a weighted sum of modulated and time-shifted dual windows. Note that for a given window there exist many dual windows. The canonical window is the one with the minimal energy (i.e., :math:`L_2` norm). `dual_win` has same length as `win`, namely `m_num` samples. If the dual window cannot be calculated a ``ValueError`` is raised. This attribute is read only and calculated lazily. See Also -------- dual_win: Canonical dual window. m_num: Number of samples in window `win`. win: Window function as real- or complex-valued 1d array. ShortTimeFFT: Class this property belongs to. """ if self._dual_win is None: self._dual_win = _calc_dual_canonical_window(self.win, self.hop) return self._dual_win @property def invertible(self) -> bool: """Check if STFT is invertible. This is achieved by trying to calculate the canonical dual window. See Also -------- :meth:`~ShortTimeFFT.istft`: Inverse short-time Fourier transform. m_num: Number of samples in window `win` and `dual_win`. dual_win: Canonical dual window. win: Window for STFT. ShortTimeFFT: Class this property belongs to. """ try: return len(self.dual_win) > 0 # call self.dual_win() except ValueError: return False def istft(self, S: np.ndarray, k0: int = 0, k1: int | None = None, *, f_axis: int = -2, t_axis: int = -1) \ -> np.ndarray: """Inverse short-time Fourier transform. It returns an array of dimension ``S.ndim - 1`` which is real if `onesided_fft` is set, else complex. If the STFT is not `invertible`, or the parameters are out of bounds a ``ValueError`` is raised. Parameters ---------- S A complex valued array where `f_axis` denotes the frequency values and the `t-axis` dimension the temporal values of the STFT values. k0, k1 The start and the end index of the reconstructed signal. The default (``k0 = 0``, ``k1 = None``) assumes that the maximum length signal should be reconstructed. f_axis, t_axis The axes in `S` denoting the frequency and the time dimension. Notes ----- It is required that `S` has `f_pts` entries along the `f_axis`. For the `t_axis` it is assumed that the first entry corresponds to `p_min` * `delta_t` (being <= 0). The length of `t_axis` needs to be compatible with `k1`. I.e., ``S.shape[t_axis] >= self.p_max(k1)`` must hold, if `k1` is not ``None``. Else `k1` is set to `k_max` with:: q_max = S.shape[t_range] + self.p_min k_max = (q_max - 1) * self.hop + self.m_num - self.m_num_mid The :ref:`tutorial_stft` section of the :ref:`user_guide` discussed the slicing behavior by means of an example. See Also -------- invertible: Check if STFT is invertible. :meth:`~ShortTimeFFT.stft`: Perform Short-time Fourier transform. :class:`scipy.signal.ShortTimeFFT`: Class this method belongs to. """ if f_axis == t_axis: raise ValueError(f"{f_axis=} may not be equal to {t_axis=}!") if S.shape[f_axis] != self.f_pts: raise ValueError(f"{S.shape[f_axis]=} must be equal to " + f"{self.f_pts=} ({S.shape=})!") n_min = self.m_num-self.m_num_mid # minimum signal length if not (S.shape[t_axis] >= (q_num := self.p_num(n_min))): raise ValueError(f"{S.shape[t_axis]=} needs to have at least " + f"{q_num} slices ({S.shape=})!") if t_axis != S.ndim - 1 or f_axis != S.ndim - 2: t_axis = S.ndim + t_axis if t_axis < 0 else t_axis f_axis = S.ndim + f_axis if f_axis < 0 else f_axis S = np.moveaxis(S, (f_axis, t_axis), (-2, -1)) q_max = S.shape[-1] + self.p_min k_max = (q_max - 1) * self.hop + self.m_num - self.m_num_mid k1 = k_max if k1 is None else k1 if not (self.k_min <= k0 < k1 <= k_max): raise ValueError(f"({self.k_min=}) <= ({k0=}) < ({k1=}) <= " + f"({k_max=}) is false!") if not (num_pts := k1 - k0) >= n_min: raise ValueError(f"({k1=}) - ({k0=}) = {num_pts} has to be at " + f"least the half the window length {n_min}!") q0 = (k0 // self.hop + self.p_min if k0 >= 0 else # p_min always <= 0 k0 // self.hop) q1 = min(self.p_max(k1), q_max) k_q0, k_q1 = self.nearest_k_p(k0), self.nearest_k_p(k1, left=False) n_pts = k_q1 - k_q0 + self.m_num - self.m_num_mid x = np.zeros(S.shape[:-2] + (n_pts,), dtype=float if self.onesided_fft else complex) for q_ in range(q0, q1): xs = self._ifft_func(S[..., :, q_ - self.p_min]) * self.dual_win i0 = q_ * self.hop - self.m_num_mid i1 = min(i0 + self.m_num, n_pts+k0) j0, j1 = 0, i1 - i0 if i0 < k0: # xs sticks out to the left on x: j0 += k0 - i0 i0 = k0 x[..., i0-k0:i1-k0] += xs[..., j0:j1] x = x[..., :k1-k0] if x.ndim > 1: x = np.moveaxis(x, -1, f_axis if f_axis < x.ndim else t_axis) return x @property def fac_magnitude(self) -> float: """Factor to multiply the STFT values by to scale each frequency slice to a magnitude spectrum. It is 1 if attribute ``scaling == 'magnitude'``. The window can be scaled to a magnitude spectrum by using the method `scale_to`. See Also -------- fac_psd: Scaling factor for to a power spectral density spectrum. scale_to: Scale window to obtain 'magnitude' or 'psd' scaling. scaling: Normalization applied to the window function. ShortTimeFFT: Class this property belongs to. """ if self.scaling == 'magnitude': return 1 if self._fac_mag is None: self._fac_mag = 1 / abs(sum(self.win)) return self._fac_mag @property def fac_psd(self) -> float: """Factor to multiply the STFT values by to scale each frequency slice to a power spectral density (PSD). It is 1 if attribute ``scaling == 'psd'``. The window can be scaled to a psd spectrum by using the method `scale_to`. See Also -------- fac_magnitude: Scaling factor for to a magnitude spectrum. scale_to: Scale window to obtain 'magnitude' or 'psd' scaling. scaling: Normalization applied to the window function. ShortTimeFFT: Class this property belongs to. """ if self.scaling == 'psd': return 1 if self._fac_psd is None: self._fac_psd = 1 / np.sqrt( sum(self.win.real**2+self.win.imag**2) / self.T) return self._fac_psd @property def m_num(self) -> int: """Number of samples in window `win`. Note that the FFT can be oversampled by zero-padding. This is achieved by setting the `mfft` property. See Also -------- m_num_mid: Center index of window `win`. mfft: Length of input for the FFT used - may be larger than `m_num`. hop: Time increment in signal samples for sliding window. win: Window function as real- or complex-valued 1d array. ShortTimeFFT: Class this property belongs to. """ return len(self.win) @property def m_num_mid(self) -> int: """Center index of window `win`. For odd `m_num`, ``(m_num - 1) / 2`` is returned and for even `m_num` (per definition) ``m_num / 2`` is returned. See Also -------- m_num: Number of samples in window `win`. mfft: Length of input for the FFT used - may be larger than `m_num`. hop: ime increment in signal samples for sliding window. win: Window function as real- or complex-valued 1d array. ShortTimeFFT: Class this property belongs to. """ return self.m_num // 2 @cache def _pre_padding(self) -> tuple[int, int]: """Smallest signal index and slice index due to padding. Since, per convention, for time t=0, n,q is zero, the returned values are negative or zero. """ w2 = self.win.real**2 + self.win.imag**2 # move window to the left until the overlap with t >= 0 vanishes: n0 = -self.m_num_mid for q_, n_ in enumerate(range(n0, n0-self.m_num-1, -self.hop)): n_next = n_ - self.hop if n_next + self.m_num <= 0 or all(w2[n_next:] == 0): return n_, -q_ raise RuntimeError("This is code line should not have been reached!") # If this case is reached, it probably means the first slice should be # returned, i.e.: return n0, 0 @property def k_min(self) -> int: """The smallest possible signal index of the STFT. `k_min` is the index of the left-most non-zero value of the lowest slice `p_min`. Since the zeroth slice is centered over the zeroth sample of the input signal, `k_min` is never positive. A detailed example is provided in the :ref:`tutorial_stft_sliding_win` section of the :ref:`user_guide`. See Also -------- k_max: First sample index after signal end not touched by a time slice. lower_border_end: Where pre-padding effects end. p_min: The smallest possible slice index. p_max: Index of first non-overlapping upper time slice. p_num: Number of time slices, i.e., `p_max` - `p_min`. p_range: Determine and validate slice index range. upper_border_begin: Where post-padding effects start. ShortTimeFFT: Class this property belongs to. """ return self._pre_padding()[0] @property def p_min(self) -> int: """The smallest possible slice index. `p_min` is the index of the left-most slice, where the window still sticks into the signal, i.e., has non-zero part for t >= 0. `k_min` is the smallest index where the window function of the slice `p_min` is non-zero. Since, per convention the zeroth slice is centered at t=0, `p_min` <= 0 always holds. A detailed example is provided in the :ref:`tutorial_stft_sliding_win` section of the :ref:`user_guide`. See Also -------- k_min: The smallest possible signal index. k_max: First sample index after signal end not touched by a time slice. p_max: Index of first non-overlapping upper time slice. p_num: Number of time slices, i.e., `p_max` - `p_min`. p_range: Determine and validate slice index range. ShortTimeFFT: Class this property belongs to. """ return self._pre_padding()[1] @lru_cache(maxsize=256) def _post_padding(self, n: int) -> tuple[int, int]: """Largest signal index and slice index due to padding.""" w2 = self.win.real**2 + self.win.imag**2 # move window to the right until the overlap for t < t[n] vanishes: q1 = n // self.hop # last slice index with t[p1] <= t[n] k1 = q1 * self.hop - self.m_num_mid for q_, k_ in enumerate(range(k1, n+self.m_num, self.hop), start=q1): n_next = k_ + self.hop if n_next >= n or all(w2[:n-n_next] == 0): return k_ + self.m_num, q_ + 1 raise RuntimeError("This is code line should not have been reached!") # If this case is reached, it probably means the last slice should be # returned, i.e.: return k1 + self.m_num - self.m_num_mid, q1 + 1 def k_max(self, n: int) -> int: """First sample index after signal end not touched by a time slice. `k_max` - 1 is the largest sample index of the slice `p_max` for a given input signal of `n` samples. A detailed example is provided in the :ref:`tutorial_stft_sliding_win` section of the :ref:`user_guide`. See Also -------- k_min: The smallest possible signal index. p_min: The smallest possible slice index. p_max: Index of first non-overlapping upper time slice. p_num: Number of time slices, i.e., `p_max` - `p_min`. p_range: Determine and validate slice index range. ShortTimeFFT: Class this method belongs to. """ return self._post_padding(n)[0] def p_max(self, n: int) -> int: """Index of first non-overlapping upper time slice for `n` sample input. Note that center point t[p_max] = (p_max(n)-1) * `delta_t` is typically larger than last time index t[n-1] == (`n`-1) * `T`. The upper border of samples indexes covered by the window slices is given by `k_max`. Furthermore, `p_max` does not denote the number of slices `p_num` since `p_min` is typically less than zero. A detailed example is provided in the :ref:`tutorial_stft_sliding_win` section of the :ref:`user_guide`. See Also -------- k_min: The smallest possible signal index. k_max: First sample index after signal end not touched by a time slice. p_min: The smallest possible slice index. p_num: Number of time slices, i.e., `p_max` - `p_min`. p_range: Determine and validate slice index range. ShortTimeFFT: Class this method belongs to. """ return self._post_padding(n)[1] def p_num(self, n: int) -> int: """Number of time slices for an input signal with `n` samples. It is given by `p_num` = `p_max` - `p_min` with `p_min` typically being negative. A detailed example is provided in the :ref:`tutorial_stft_sliding_win` section of the :ref:`user_guide`. See Also -------- k_min: The smallest possible signal index. k_max: First sample index after signal end not touched by a time slice. lower_border_end: Where pre-padding effects end. p_min: The smallest possible slice index. p_max: Index of first non-overlapping upper time slice. p_range: Determine and validate slice index range. upper_border_begin: Where post-padding effects start. ShortTimeFFT: Class this method belongs to. """ return self.p_max(n) - self.p_min @property def lower_border_end(self) -> tuple[int, int]: """First signal index and first slice index unaffected by pre-padding. Describes the point where the window does not stick out to the left of the signal domain. A detailed example is provided in the :ref:`tutorial_stft_sliding_win` section of the :ref:`user_guide`. See Also -------- k_min: The smallest possible signal index. k_max: First sample index after signal end not touched by a time slice. lower_border_end: Where pre-padding effects end. p_min: The smallest possible slice index. p_max: Index of first non-overlapping upper time slice. p_num: Number of time slices, i.e., `p_max` - `p_min`. p_range: Determine and validate slice index range. upper_border_begin: Where post-padding effects start. ShortTimeFFT: Class this property belongs to. """ # not using @cache decorator due to MyPy limitations if self._lower_border_end is not None: return self._lower_border_end # first non-zero element in self.win: m0 = np.flatnonzero(self.win.real**2 + self.win.imag**2)[0] # move window to the right until does not stick out to the left: k0 = -self.m_num_mid + m0 for q_, k_ in enumerate(range(k0, self.hop + 1, self.hop)): if k_ + self.hop >= 0: # next entry does not stick out anymore self._lower_border_end = (k_ + self.m_num, q_ + 1) return self._lower_border_end self._lower_border_end = (0, max(self.p_min, 0)) # ends at first slice return self._lower_border_end @lru_cache(maxsize=256) def upper_border_begin(self, n: int) -> tuple[int, int]: """First signal index and first slice index affected by post-padding. Describes the point where the window does begin stick out to the right of the signal domain. A detailed example is given :ref:`tutorial_stft_sliding_win` section of the :ref:`user_guide`. See Also -------- k_min: The smallest possible signal index. k_max: First sample index after signal end not touched by a time slice. lower_border_end: Where pre-padding effects end. p_min: The smallest possible slice index. p_max: Index of first non-overlapping upper time slice. p_num: Number of time slices, i.e., `p_max` - `p_min`. p_range: Determine and validate slice index range. ShortTimeFFT: Class this method belongs to. """ w2 = self.win.real**2 + self.win.imag**2 q2 = n // self.hop + 1 # first t[q] >= t[n] q1 = max((n-self.m_num) // self.hop - 1, -1) # move window left until does not stick out to the right: for q_ in range(q2, q1, -1): k_ = q_ * self.hop + (self.m_num - self.m_num_mid) if k_ < n or all(w2[n-k_:] == 0): return (q_ + 1) * self.hop - self.m_num_mid, q_ + 1 return 0, 0 # border starts at first slice @property def delta_t(self) -> float: """Time increment of STFT. The time increment `delta_t` = `T` * `hop` represents the sample increment `hop` converted to time based on the sampling interval `T`. See Also -------- delta_f: Width of the frequency bins of the STFT. hop: Hop size in signal samples for sliding window. t: Times of STFT for an input signal with `n` samples. T: Sampling interval of input signal and window `win`. ShortTimeFFT: Class this property belongs to """ return self.T * self.hop def p_range(self, n: int, p0: int | None = None, p1: int | None = None) -> tuple[int, int]: """Determine and validate slice index range. Parameters ---------- n : int Number of samples of input signal, assuming t[0] = 0. p0 : int | None First slice index. If 0 then the first slice is centered at t = 0. If ``None`` then `p_min` is used. Note that p0 may be < 0 if slices are left of t = 0. p1 : int | None End of interval (last value is p1-1). If ``None`` then `p_max(n)` is used. Returns ------- p0_ : int The fist slice index p1_ : int End of interval (last value is p1-1). Notes ----- A ``ValueError`` is raised if ``p_min <= p0 < p1 <= p_max(n)`` does not hold. See Also -------- k_min: The smallest possible signal index. k_max: First sample index after signal end not touched by a time slice. lower_border_end: Where pre-padding effects end. p_min: The smallest possible slice index. p_max: Index of first non-overlapping upper time slice. p_num: Number of time slices, i.e., `p_max` - `p_min`. upper_border_begin: Where post-padding effects start. ShortTimeFFT: Class this property belongs to. """ p_max = self.p_max(n) # shorthand p0_ = self.p_min if p0 is None else p0 p1_ = p_max if p1 is None else p1 if not (self.p_min <= p0_ < p1_ <= p_max): raise ValueError(f"Invalid Parameter {p0=}, {p1=}, i.e., " + f"{self.p_min=} <= p0 < p1 <= {p_max=} " + f"does not hold for signal length {n=}!") return p0_, p1_ @lru_cache(maxsize=1) def t(self, n: int, p0: int | None = None, p1: int | None = None, k_offset: int = 0) -> np.ndarray: """Times of STFT for an input signal with `n` samples. Returns a 1d array with times of the `~ShortTimeFFT.stft` values with the same parametrization. Note that the slices are ``delta_t = hop * T`` time units apart. Parameters ---------- n Number of sample of the input signal. x The input signal as real or complex valued array. p0 The first element of the range of slices to calculate. If ``None`` then it is set to :attr:`p_min`, which is the smallest possible slice. p1 The end of the array. If ``None`` then `p_max(n)` is used. k_offset Index of first sample (t = 0) in `x`. See Also -------- delta_t: Time increment of STFT (``hop*T``) hop: Time increment in signal samples for sliding window. nearest_k_p: Nearest sample index k_p for which t[k_p] == t[p] holds. T: Sampling interval of input signal and of the window (``1/fs``). fs: Sampling frequency (being ``1/T``) ShortTimeFFT: Class this method belongs to. """ p0, p1 = self.p_range(n, p0, p1) return np.arange(p0, p1) * self.delta_t + k_offset * self.T def nearest_k_p(self, k: int, left: bool = True) -> int: """Return nearest sample index k_p for which t[k_p] == t[p] holds. The nearest next smaller time sample p (where t[p] is the center position of the window of the p-th slice) is p_k = k // `hop`. If `hop` is a divisor of `k` than `k` is returned. If `left` is set than p_k * `hop` is returned else (p_k+1) * `hop`. This method can be used to slice an input signal into chunks for calculating the STFT and iSTFT incrementally. See Also -------- delta_t: Time increment of STFT (``hop*T``) hop: Time increment in signal samples for sliding window. T: Sampling interval of input signal and of the window (``1/fs``). fs: Sampling frequency (being ``1/T``) t: Times of STFT for an input signal with `n` samples. ShortTimeFFT: Class this method belongs to. """ p_q, remainder = divmod(k, self.hop) if remainder == 0: return k return p_q * self.hop if left else (p_q + 1) * self.hop @property def delta_f(self) -> float: """Width of the frequency bins of the STFT. Return the frequency interval `delta_f` = 1 / (`mfft` * `T`). See Also -------- delta_t: Time increment of STFT. f_pts: Number of points along the frequency axis. f: Frequencies values of the STFT. mfft: Length of the input for FFT used. T: Sampling interval. t: Times of STFT for an input signal with `n` samples. ShortTimeFFT: Class this property belongs to. """ return 1 / (self.mfft * self.T) @property def f_pts(self) -> int: """Number of points along the frequency axis. See Also -------- delta_f: Width of the frequency bins of the STFT. f: Frequencies values of the STFT. mfft: Length of the input for FFT used. ShortTimeFFT: Class this property belongs to. """ return self.mfft // 2 + 1 if self.onesided_fft else self.mfft @property def onesided_fft(self) -> bool: """Return True if a one-sided FFT is used. Returns ``True`` if `fft_mode` is either 'onesided' or 'onesided2X'. See Also -------- fft_mode: Utilized FFT ('twosided', 'centered', 'onesided' or 'onesided2X') ShortTimeFFT: Class this property belongs to. """ return self.fft_mode in {'onesided', 'onesided2X'} @property def f(self) -> np.ndarray: """Frequencies values of the STFT. A 1d array of length `f_pts` with `delta_f` spaced entries is returned. See Also -------- delta_f: Width of the frequency bins of the STFT. f_pts: Number of points along the frequency axis. mfft: Length of the input for FFT used. ShortTimeFFT: Class this property belongs to. """ if self.fft_mode in {'onesided', 'onesided2X'}: return fft_lib.rfftfreq(self.mfft, self.T) elif self.fft_mode == 'twosided': return fft_lib.fftfreq(self.mfft, self.T) elif self.fft_mode == 'centered': return fft_lib.fftshift(fft_lib.fftfreq(self.mfft, self.T)) # This should never happen but makes the Linters happy: fft_modes = get_args(FFT_MODE_TYPE) raise RuntimeError(f"{self.fft_mode=} not in {fft_modes}!") def _fft_func(self, x: np.ndarray) -> np.ndarray: """FFT based on the `fft_mode`, `mfft`, `scaling` and `phase_shift` attributes. For multidimensional arrays the transformation is carried out on the last axis. """ if self.phase_shift is not None: if x.shape[-1] < self.mfft: # zero pad if needed z_shape = list(x.shape) z_shape[-1] = self.mfft - x.shape[-1] x = np.hstack((x, np.zeros(z_shape, dtype=x.dtype))) p_s = (self.phase_shift + self.m_num_mid) % self.m_num x = np.roll(x, -p_s, axis=-1) if self.fft_mode == 'twosided': return fft_lib.fft(x, n=self.mfft, axis=-1) if self.fft_mode == 'centered': return fft_lib.fftshift(fft_lib.fft(x, self.mfft, axis=-1)) if self.fft_mode == 'onesided': return fft_lib.rfft(x, n=self.mfft, axis=-1) if self.fft_mode == 'onesided2X': X = fft_lib.rfft(x, n=self.mfft, axis=-1) # Either squared magnitude (psd) or magnitude is doubled: fac = np.sqrt(2) if self.scaling == 'psd' else 2 # For even input length, the last entry is unpaired: X[..., 1: -1 if self.mfft % 2 == 0 else None] *= fac return X # This should never happen but makes the Linter happy: fft_modes = get_args(FFT_MODE_TYPE) raise RuntimeError(f"{self.fft_mode=} not in {fft_modes}!") def _ifft_func(self, X: np.ndarray) -> np.ndarray: """Inverse to `_fft_func`. Returned is an array of length `m_num`. If the FFT is `onesided` then a float array is returned else a complex array is returned. For multidimensional arrays the transformation is carried out on the last axis. """ if self.fft_mode == 'twosided': x = fft_lib.ifft(X, n=self.mfft, axis=-1) elif self.fft_mode == 'centered': x = fft_lib.ifft(fft_lib.ifftshift(X), n=self.mfft, axis=-1) elif self.fft_mode == 'onesided': x = fft_lib.irfft(X, n=self.mfft, axis=-1) elif self.fft_mode == 'onesided2X': Xc = X.copy() # we do not want to modify function parameters fac = np.sqrt(2) if self.scaling == 'psd' else 2 # For even length X the last value is not paired with a negative # value on the two-sided FFT: q1 = -1 if self.mfft % 2 == 0 else None Xc[..., 1:q1] /= fac x = fft_lib.irfft(Xc, n=self.mfft, axis=-1) else: # This should never happen but makes the Linter happy: error_str = f"{self.fft_mode=} not in {get_args(FFT_MODE_TYPE)}!" raise RuntimeError(error_str) if self.phase_shift is None: return x[:self.m_num] p_s = (self.phase_shift + self.m_num_mid) % self.m_num return np.roll(x, p_s, axis=-1)[:self.m_num] def extent(self, n: int, axes_seq: Literal['tf', 'ft'] = 'tf', center_bins: bool = False) -> tuple[float, float, float, float]: """Return minimum and maximum values time-frequency values. A tuple with four floats ``(t0, t1, f0, f1)`` for 'tf' and ``(f0, f1, t0, t1)`` for 'ft') is returned describing the corners of the time-frequency domain of the `~ShortTimeFFT.stft`. That tuple can be passed to `matplotlib.pyplot.imshow` as a parameter with the same name. Parameters ---------- n : int Number of samples in input signal. axes_seq : {'tf', 'ft'} Return time extent first and then frequency extent or vice-versa. center_bins: bool If set (default ``False``), the values of the time slots and frequency bins are moved from the side the middle. This is useful, when plotting the `~ShortTimeFFT.stft` values as step functions, i.e., with no interpolation. See Also -------- :func:`matplotlib.pyplot.imshow`: Display data as an image. :class:`scipy.signal.ShortTimeFFT`: Class this method belongs to. """ if axes_seq not in ('tf', 'ft'): raise ValueError(f"Parameter {axes_seq=} not in ['tf', 'ft']!") if self.onesided_fft: q0, q1 = 0, self.f_pts elif self.fft_mode == 'centered': q0 = -self.mfft // 2 q1 = self.mfft // 2 - 1 if self.mfft % 2 == 0 else self.mfft // 2 else: raise ValueError(f"Attribute fft_mode={self.fft_mode} must be " + "in ['centered', 'onesided', 'onesided2X']") p0, p1 = self.p_min, self.p_max(n) # shorthand if center_bins: t0, t1 = self.delta_t * (p0 - 0.5), self.delta_t * (p1 - 0.5) f0, f1 = self.delta_f * (q0 - 0.5), self.delta_f * (q1 - 0.5) else: t0, t1 = self.delta_t * p0, self.delta_t * p1 f0, f1 = self.delta_f * q0, self.delta_f * q1 return (t0, t1, f0, f1) if axes_seq == 'tf' else (f0, f1, t0, t1)
73,105
42.671446
79
py
scipy
scipy-main/scipy/signal/_waveforms.py
# Author: Travis Oliphant # 2003 # # Feb. 2010: Updated by Warren Weckesser: # Rewrote much of chirp() # Added sweep_poly() import numpy as np from numpy import asarray, zeros, place, nan, mod, pi, extract, log, sqrt, \ exp, cos, sin, polyval, polyint __all__ = ['sawtooth', 'square', 'gausspulse', 'chirp', 'sweep_poly', 'unit_impulse'] def sawtooth(t, width=1): """ Return a periodic sawtooth or triangle waveform. The sawtooth waveform has a period ``2*pi``, rises from -1 to 1 on the interval 0 to ``width*2*pi``, then drops from 1 to -1 on the interval ``width*2*pi`` to ``2*pi``. `width` must be in the interval [0, 1]. Note that this is not band-limited. It produces an infinite number of harmonics, which are aliased back and forth across the frequency spectrum. Parameters ---------- t : array_like Time. width : array_like, optional Width of the rising ramp as a proportion of the total cycle. Default is 1, producing a rising ramp, while 0 produces a falling ramp. `width` = 0.5 produces a triangle wave. If an array, causes wave shape to change over time, and must be the same length as t. Returns ------- y : ndarray Output array containing the sawtooth waveform. Examples -------- A 5 Hz waveform sampled at 500 Hz for 1 second: >>> import numpy as np >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> t = np.linspace(0, 1, 500) >>> plt.plot(t, signal.sawtooth(2 * np.pi * 5 * t)) """ t, w = asarray(t), asarray(width) w = asarray(w + (t - t)) t = asarray(t + (w - w)) if t.dtype.char in ['fFdD']: ytype = t.dtype.char else: ytype = 'd' y = zeros(t.shape, ytype) # width must be between 0 and 1 inclusive mask1 = (w > 1) | (w < 0) place(y, mask1, nan) # take t modulo 2*pi tmod = mod(t, 2 * pi) # on the interval 0 to width*2*pi function is # tmod / (pi*w) - 1 mask2 = (1 - mask1) & (tmod < w * 2 * pi) tsub = extract(mask2, tmod) wsub = extract(mask2, w) place(y, mask2, tsub / (pi * wsub) - 1) # on the interval width*2*pi to 2*pi function is # (pi*(w+1)-tmod) / (pi*(1-w)) mask3 = (1 - mask1) & (1 - mask2) tsub = extract(mask3, tmod) wsub = extract(mask3, w) place(y, mask3, (pi * (wsub + 1) - tsub) / (pi * (1 - wsub))) return y def square(t, duty=0.5): """ Return a periodic square-wave waveform. The square wave has a period ``2*pi``, has value +1 from 0 to ``2*pi*duty`` and -1 from ``2*pi*duty`` to ``2*pi``. `duty` must be in the interval [0,1]. Note that this is not band-limited. It produces an infinite number of harmonics, which are aliased back and forth across the frequency spectrum. Parameters ---------- t : array_like The input time array. duty : array_like, optional Duty cycle. Default is 0.5 (50% duty cycle). If an array, causes wave shape to change over time, and must be the same length as t. Returns ------- y : ndarray Output array containing the square waveform. Examples -------- A 5 Hz waveform sampled at 500 Hz for 1 second: >>> import numpy as np >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> t = np.linspace(0, 1, 500, endpoint=False) >>> plt.plot(t, signal.square(2 * np.pi * 5 * t)) >>> plt.ylim(-2, 2) A pulse-width modulated sine wave: >>> plt.figure() >>> sig = np.sin(2 * np.pi * t) >>> pwm = signal.square(2 * np.pi * 30 * t, duty=(sig + 1)/2) >>> plt.subplot(2, 1, 1) >>> plt.plot(t, sig) >>> plt.subplot(2, 1, 2) >>> plt.plot(t, pwm) >>> plt.ylim(-1.5, 1.5) """ t, w = asarray(t), asarray(duty) w = asarray(w + (t - t)) t = asarray(t + (w - w)) if t.dtype.char in ['fFdD']: ytype = t.dtype.char else: ytype = 'd' y = zeros(t.shape, ytype) # width must be between 0 and 1 inclusive mask1 = (w > 1) | (w < 0) place(y, mask1, nan) # on the interval 0 to duty*2*pi function is 1 tmod = mod(t, 2 * pi) mask2 = (1 - mask1) & (tmod < w * 2 * pi) place(y, mask2, 1) # on the interval duty*2*pi to 2*pi function is # (pi*(w+1)-tmod) / (pi*(1-w)) mask3 = (1 - mask1) & (1 - mask2) place(y, mask3, -1) return y def gausspulse(t, fc=1000, bw=0.5, bwr=-6, tpr=-60, retquad=False, retenv=False): """ Return a Gaussian modulated sinusoid: ``exp(-a t^2) exp(1j*2*pi*fc*t).`` If `retquad` is True, then return the real and imaginary parts (in-phase and quadrature). If `retenv` is True, then return the envelope (unmodulated signal). Otherwise, return the real part of the modulated sinusoid. Parameters ---------- t : ndarray or the string 'cutoff' Input array. fc : float, optional Center frequency (e.g. Hz). Default is 1000. bw : float, optional Fractional bandwidth in frequency domain of pulse (e.g. Hz). Default is 0.5. bwr : float, optional Reference level at which fractional bandwidth is calculated (dB). Default is -6. tpr : float, optional If `t` is 'cutoff', then the function returns the cutoff time for when the pulse amplitude falls below `tpr` (in dB). Default is -60. retquad : bool, optional If True, return the quadrature (imaginary) as well as the real part of the signal. Default is False. retenv : bool, optional If True, return the envelope of the signal. Default is False. Returns ------- yI : ndarray Real part of signal. Always returned. yQ : ndarray Imaginary part of signal. Only returned if `retquad` is True. yenv : ndarray Envelope of signal. Only returned if `retenv` is True. See Also -------- scipy.signal.morlet Examples -------- Plot real component, imaginary component, and envelope for a 5 Hz pulse, sampled at 100 Hz for 2 seconds: >>> import numpy as np >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> t = np.linspace(-1, 1, 2 * 100, endpoint=False) >>> i, q, e = signal.gausspulse(t, fc=5, retquad=True, retenv=True) >>> plt.plot(t, i, t, q, t, e, '--') """ if fc < 0: raise ValueError("Center frequency (fc=%.2f) must be >=0." % fc) if bw <= 0: raise ValueError("Fractional bandwidth (bw=%.2f) must be > 0." % bw) if bwr >= 0: raise ValueError("Reference level for bandwidth (bwr=%.2f) must " "be < 0 dB" % bwr) # exp(-a t^2) <-> sqrt(pi/a) exp(-pi^2/a * f^2) = g(f) ref = pow(10.0, bwr / 20.0) # fdel = fc*bw/2: g(fdel) = ref --- solve this for a # # pi^2/a * fc^2 * bw^2 /4=-log(ref) a = -(pi * fc * bw) ** 2 / (4.0 * log(ref)) if isinstance(t, str): if t == 'cutoff': # compute cut_off point # Solve exp(-a tc**2) = tref for tc # tc = sqrt(-log(tref) / a) where tref = 10^(tpr/20) if tpr >= 0: raise ValueError("Reference level for time cutoff must " "be < 0 dB") tref = pow(10.0, tpr / 20.0) return sqrt(-log(tref) / a) else: raise ValueError("If `t` is a string, it must be 'cutoff'") yenv = exp(-a * t * t) yI = yenv * cos(2 * pi * fc * t) yQ = yenv * sin(2 * pi * fc * t) if not retquad and not retenv: return yI if not retquad and retenv: return yI, yenv if retquad and not retenv: return yI, yQ if retquad and retenv: return yI, yQ, yenv def chirp(t, f0, t1, f1, method='linear', phi=0, vertex_zero=True): """Frequency-swept cosine generator. In the following, 'Hz' should be interpreted as 'cycles per unit'; there is no requirement here that the unit is one second. The important distinction is that the units of rotation are cycles, not radians. Likewise, `t` could be a measurement of space instead of time. Parameters ---------- t : array_like Times at which to evaluate the waveform. f0 : float Frequency (e.g. Hz) at time t=0. t1 : float Time at which `f1` is specified. f1 : float Frequency (e.g. Hz) of the waveform at time `t1`. method : {'linear', 'quadratic', 'logarithmic', 'hyperbolic'}, optional Kind of frequency sweep. If not given, `linear` is assumed. See Notes below for more details. phi : float, optional Phase offset, in degrees. Default is 0. vertex_zero : bool, optional This parameter is only used when `method` is 'quadratic'. It determines whether the vertex of the parabola that is the graph of the frequency is at t=0 or t=t1. Returns ------- y : ndarray A numpy array containing the signal evaluated at `t` with the requested time-varying frequency. More precisely, the function returns ``cos(phase + (pi/180)*phi)`` where `phase` is the integral (from 0 to `t`) of ``2*pi*f(t)``. ``f(t)`` is defined below. See Also -------- sweep_poly Notes ----- There are four options for the `method`. The following formulas give the instantaneous frequency (in Hz) of the signal generated by `chirp()`. For convenience, the shorter names shown below may also be used. linear, lin, li: ``f(t) = f0 + (f1 - f0) * t / t1`` quadratic, quad, q: The graph of the frequency f(t) is a parabola through (0, f0) and (t1, f1). By default, the vertex of the parabola is at (0, f0). If `vertex_zero` is False, then the vertex is at (t1, f1). The formula is: if vertex_zero is True: ``f(t) = f0 + (f1 - f0) * t**2 / t1**2`` else: ``f(t) = f1 - (f1 - f0) * (t1 - t)**2 / t1**2`` To use a more general quadratic function, or an arbitrary polynomial, use the function `scipy.signal.sweep_poly`. logarithmic, log, lo: ``f(t) = f0 * (f1/f0)**(t/t1)`` f0 and f1 must be nonzero and have the same sign. This signal is also known as a geometric or exponential chirp. hyperbolic, hyp: ``f(t) = f0*f1*t1 / ((f0 - f1)*t + f1*t1)`` f0 and f1 must be nonzero. Examples -------- The following will be used in the examples: >>> import numpy as np >>> from scipy.signal import chirp, spectrogram >>> import matplotlib.pyplot as plt For the first example, we'll plot the waveform for a linear chirp from 6 Hz to 1 Hz over 10 seconds: >>> t = np.linspace(0, 10, 1500) >>> w = chirp(t, f0=6, f1=1, t1=10, method='linear') >>> plt.plot(t, w) >>> plt.title("Linear Chirp, f(0)=6, f(10)=1") >>> plt.xlabel('t (sec)') >>> plt.show() For the remaining examples, we'll use higher frequency ranges, and demonstrate the result using `scipy.signal.spectrogram`. We'll use a 4 second interval sampled at 7200 Hz. >>> fs = 7200 >>> T = 4 >>> t = np.arange(0, int(T*fs)) / fs We'll use this function to plot the spectrogram in each example. >>> def plot_spectrogram(title, w, fs): ... ff, tt, Sxx = spectrogram(w, fs=fs, nperseg=256, nfft=576) ... fig, ax = plt.subplots() ... ax.pcolormesh(tt, ff[:145], Sxx[:145], cmap='gray_r', ... shading='gouraud') ... ax.set_title(title) ... ax.set_xlabel('t (sec)') ... ax.set_ylabel('Frequency (Hz)') ... ax.grid(True) ... Quadratic chirp from 1500 Hz to 250 Hz (vertex of the parabolic curve of the frequency is at t=0): >>> w = chirp(t, f0=1500, f1=250, t1=T, method='quadratic') >>> plot_spectrogram(f'Quadratic Chirp, f(0)=1500, f({T})=250', w, fs) >>> plt.show() Quadratic chirp from 1500 Hz to 250 Hz (vertex of the parabolic curve of the frequency is at t=T): >>> w = chirp(t, f0=1500, f1=250, t1=T, method='quadratic', ... vertex_zero=False) >>> plot_spectrogram(f'Quadratic Chirp, f(0)=1500, f({T})=250\\n' + ... '(vertex_zero=False)', w, fs) >>> plt.show() Logarithmic chirp from 1500 Hz to 250 Hz: >>> w = chirp(t, f0=1500, f1=250, t1=T, method='logarithmic') >>> plot_spectrogram(f'Logarithmic Chirp, f(0)=1500, f({T})=250', w, fs) >>> plt.show() Hyperbolic chirp from 1500 Hz to 250 Hz: >>> w = chirp(t, f0=1500, f1=250, t1=T, method='hyperbolic') >>> plot_spectrogram(f'Hyperbolic Chirp, f(0)=1500, f({T})=250', w, fs) >>> plt.show() """ # 'phase' is computed in _chirp_phase, to make testing easier. phase = _chirp_phase(t, f0, t1, f1, method, vertex_zero) # Convert phi to radians. phi *= pi / 180 return cos(phase + phi) def _chirp_phase(t, f0, t1, f1, method='linear', vertex_zero=True): """ Calculate the phase used by `chirp` to generate its output. See `chirp` for a description of the arguments. """ t = asarray(t) f0 = float(f0) t1 = float(t1) f1 = float(f1) if method in ['linear', 'lin', 'li']: beta = (f1 - f0) / t1 phase = 2 * pi * (f0 * t + 0.5 * beta * t * t) elif method in ['quadratic', 'quad', 'q']: beta = (f1 - f0) / (t1 ** 2) if vertex_zero: phase = 2 * pi * (f0 * t + beta * t ** 3 / 3) else: phase = 2 * pi * (f1 * t + beta * ((t1 - t) ** 3 - t1 ** 3) / 3) elif method in ['logarithmic', 'log', 'lo']: if f0 * f1 <= 0.0: raise ValueError("For a logarithmic chirp, f0 and f1 must be " "nonzero and have the same sign.") if f0 == f1: phase = 2 * pi * f0 * t else: beta = t1 / log(f1 / f0) phase = 2 * pi * beta * f0 * (pow(f1 / f0, t / t1) - 1.0) elif method in ['hyperbolic', 'hyp']: if f0 == 0 or f1 == 0: raise ValueError("For a hyperbolic chirp, f0 and f1 must be " "nonzero.") if f0 == f1: # Degenerate case: constant frequency. phase = 2 * pi * f0 * t else: # Singular point: the instantaneous frequency blows up # when t == sing. sing = -f1 * t1 / (f0 - f1) phase = 2 * pi * (-sing * f0) * log(np.abs(1 - t/sing)) else: raise ValueError("method must be 'linear', 'quadratic', 'logarithmic'," " or 'hyperbolic', but a value of %r was given." % method) return phase def sweep_poly(t, poly, phi=0): """ Frequency-swept cosine generator, with a time-dependent frequency. This function generates a sinusoidal function whose instantaneous frequency varies with time. The frequency at time `t` is given by the polynomial `poly`. Parameters ---------- t : ndarray Times at which to evaluate the waveform. poly : 1-D array_like or instance of numpy.poly1d The desired frequency expressed as a polynomial. If `poly` is a list or ndarray of length n, then the elements of `poly` are the coefficients of the polynomial, and the instantaneous frequency is ``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]`` If `poly` is an instance of numpy.poly1d, then the instantaneous frequency is ``f(t) = poly(t)`` phi : float, optional Phase offset, in degrees, Default: 0. Returns ------- sweep_poly : ndarray A numpy array containing the signal evaluated at `t` with the requested time-varying frequency. More precisely, the function returns ``cos(phase + (pi/180)*phi)``, where `phase` is the integral (from 0 to t) of ``2 * pi * f(t)``; ``f(t)`` is defined above. See Also -------- chirp Notes ----- .. versionadded:: 0.8.0 If `poly` is a list or ndarray of length `n`, then the elements of `poly` are the coefficients of the polynomial, and the instantaneous frequency is: ``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]`` If `poly` is an instance of `numpy.poly1d`, then the instantaneous frequency is: ``f(t) = poly(t)`` Finally, the output `s` is: ``cos(phase + (pi/180)*phi)`` where `phase` is the integral from 0 to `t` of ``2 * pi * f(t)``, ``f(t)`` as defined above. Examples -------- Compute the waveform with instantaneous frequency:: f(t) = 0.025*t**3 - 0.36*t**2 + 1.25*t + 2 over the interval 0 <= t <= 10. >>> import numpy as np >>> from scipy.signal import sweep_poly >>> p = np.poly1d([0.025, -0.36, 1.25, 2.0]) >>> t = np.linspace(0, 10, 5001) >>> w = sweep_poly(t, p) Plot it: >>> import matplotlib.pyplot as plt >>> plt.subplot(2, 1, 1) >>> plt.plot(t, w) >>> plt.title("Sweep Poly\\nwith frequency " + ... "$f(t) = 0.025t^3 - 0.36t^2 + 1.25t + 2$") >>> plt.subplot(2, 1, 2) >>> plt.plot(t, p(t), 'r', label='f(t)') >>> plt.legend() >>> plt.xlabel('t') >>> plt.tight_layout() >>> plt.show() """ # 'phase' is computed in _sweep_poly_phase, to make testing easier. phase = _sweep_poly_phase(t, poly) # Convert to radians. phi *= pi / 180 return cos(phase + phi) def _sweep_poly_phase(t, poly): """ Calculate the phase used by sweep_poly to generate its output. See `sweep_poly` for a description of the arguments. """ # polyint handles lists, ndarrays and instances of poly1d automatically. intpoly = polyint(poly) phase = 2 * pi * polyval(intpoly, t) return phase def unit_impulse(shape, idx=None, dtype=float): """ Unit impulse signal (discrete delta function) or unit basis vector. Parameters ---------- shape : int or tuple of int Number of samples in the output (1-D), or a tuple that represents the shape of the output (N-D). idx : None or int or tuple of int or 'mid', optional Index at which the value is 1. If None, defaults to the 0th element. If ``idx='mid'``, the impulse will be centered at ``shape // 2`` in all dimensions. If an int, the impulse will be at `idx` in all dimensions. dtype : data-type, optional The desired data-type for the array, e.g., ``numpy.int8``. Default is ``numpy.float64``. Returns ------- y : ndarray Output array containing an impulse signal. Notes ----- The 1D case is also known as the Kronecker delta. .. versionadded:: 0.19.0 Examples -------- An impulse at the 0th element (:math:`\\delta[n]`): >>> from scipy import signal >>> signal.unit_impulse(8) array([ 1., 0., 0., 0., 0., 0., 0., 0.]) Impulse offset by 2 samples (:math:`\\delta[n-2]`): >>> signal.unit_impulse(7, 2) array([ 0., 0., 1., 0., 0., 0., 0.]) 2-dimensional impulse, centered: >>> signal.unit_impulse((3, 3), 'mid') array([[ 0., 0., 0.], [ 0., 1., 0.], [ 0., 0., 0.]]) Impulse at (2, 2), using broadcasting: >>> signal.unit_impulse((4, 4), 2) array([[ 0., 0., 0., 0.], [ 0., 0., 0., 0.], [ 0., 0., 1., 0.], [ 0., 0., 0., 0.]]) Plot the impulse response of a 4th-order Butterworth lowpass filter: >>> imp = signal.unit_impulse(100, 'mid') >>> b, a = signal.butter(4, 0.2) >>> response = signal.lfilter(b, a, imp) >>> import numpy as np >>> import matplotlib.pyplot as plt >>> plt.plot(np.arange(-50, 50), imp) >>> plt.plot(np.arange(-50, 50), response) >>> plt.margins(0.1, 0.1) >>> plt.xlabel('Time [samples]') >>> plt.ylabel('Amplitude') >>> plt.grid(True) >>> plt.show() """ out = zeros(shape, dtype) shape = np.atleast_1d(shape) if idx is None: idx = (0,) * len(shape) elif idx == 'mid': idx = tuple(shape // 2) elif not hasattr(idx, "__iter__"): idx = (idx,) * len(shape) out[idx] = 1 return out
20,523
29.496285
79
py
scipy
scipy-main/scipy/signal/ltisys.py
# This file is not meant for public use and will be removed in SciPy v2.0.0. # Use the `scipy.signal` namespace for importing the functions # included below. import warnings from . import _ltisys __all__ = [ # noqa: F822 'lti', 'dlti', 'TransferFunction', 'ZerosPolesGain', 'StateSpace', 'lsim', 'lsim2', 'impulse', 'impulse2', 'step', 'step2', 'bode', 'freqresp', 'place_poles', 'dlsim', 'dstep', 'dimpulse', 'dfreqresp', 'dbode', 's_qr', 'integrate', 'interpolate', 'linalg', 'interp1d', 'tf2zpk', 'zpk2tf', 'normalize', 'freqs', 'freqz', 'freqs_zpk', 'freqz_zpk', 'tf2ss', 'abcd_normalize', 'ss2tf', 'zpk2ss', 'ss2zpk', 'cont2discrete', 'atleast_1d', 'atleast_2d', 'squeeze', 'transpose', 'zeros_like', 'linspace', 'nan_to_num', 'LinearTimeInvariant', 'TransferFunctionContinuous', 'TransferFunctionDiscrete', 'ZerosPolesGainContinuous', 'ZerosPolesGainDiscrete', 'StateSpaceContinuous', 'StateSpaceDiscrete', 'Bunch' ] def __dir__(): return __all__ def __getattr__(name): if name not in __all__: raise AttributeError( "scipy.signal.ltisys is deprecated and has no attribute " f"{name}. Try looking in scipy.signal instead.") warnings.warn(f"Please use `{name}` from the `scipy.signal` namespace, " "the `scipy.signal.ltisys` namespace is deprecated.", category=DeprecationWarning, stacklevel=2) return getattr(_ltisys, name)
1,470
36.717949
76
py
scipy
scipy-main/scipy/signal/setup.py
from scipy._build_utils import numpy_nodepr_api from scipy._build_utils import tempita import os def configuration(parent_package='', top_path=None): from numpy.distutils.misc_util import Configuration from scipy._build_utils.compiler_helper import set_c_flags_hook config = Configuration('signal', parent_package, top_path) config.add_data_dir('tests') config.add_subpackage('windows') # convert the *.c.in files : `_lfilter.c.in -> _lfilter.c` etc srcdir = os.path.join(os.getcwd(), 'scipy', 'signal') tempita.process_tempita(os.path.join(srcdir, '_lfilter.c.in')) tempita.process_tempita(os.path.join(srcdir, '_correlate_nd.c.in')) tempita.process_tempita(os.path.join(srcdir, '_bspline_util.c.in')) sigtools = config.add_extension('_sigtools', sources=['_sigtoolsmodule.c', '_firfilter.c', '_medianfilter.c', '_lfilter.c', '_correlate_nd.c'], depends=['_sigtools.h'], include_dirs=['.'], **numpy_nodepr_api) sigtools._pre_build_hook = set_c_flags_hook if int(os.environ.get('SCIPY_USE_PYTHRAN', 1)): import pythran ext = pythran.dist.PythranExtension( 'scipy.signal._max_len_seq_inner', sources=["scipy/signal/_max_len_seq_inner.py"], config=['compiler.blas=none']) config.ext_modules.append(ext) ext = pythran.dist.PythranExtension( 'scipy.signal._spectral', sources=["scipy/signal/_spectral.py"], config=['compiler.blas=none']) config.ext_modules.append(ext) else: config.add_extension( '_spectral', sources=['_spectral.c']) config.add_extension( '_max_len_seq_inner', sources=['_max_len_seq_inner.c']) config.add_extension( '_peak_finding_utils', sources=['_peak_finding_utils.c']) config.add_extension( '_sosfilt', sources=['_sosfilt.c']) config.add_extension( '_upfirdn_apply', sources=['_upfirdn_apply.c']) spline_src = ['_splinemodule.c', '_bspline_util.c'] config.add_extension('_spline', sources=spline_src, **numpy_nodepr_api) return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict())
2,412
35.560606
75
py
scipy
scipy-main/scipy/signal/lti_conversion.py
# This file is not meant for public use and will be removed in SciPy v2.0.0. # Use the `scipy.signal` namespace for importing the functions # included below. import warnings from . import _lti_conversion __all__ = [ # noqa: F822 'tf2ss', 'abcd_normalize', 'ss2tf', 'zpk2ss', 'ss2zpk', 'cont2discrete','eye', 'atleast_2d', 'poly', 'prod', 'array', 'outer', 'linalg', 'tf2zpk', 'zpk2tf', 'normalize' ] def __dir__(): return __all__ def __getattr__(name): if name not in __all__: raise AttributeError( "scipy.signal.lti_conversion is deprecated and has no attribute " f"{name}. Try looking in scipy.signal instead.") warnings.warn(f"Please use `{name}` from the `scipy.signal` namespace, " "the `scipy.signal.lti_conversion` namespace is deprecated.", category=DeprecationWarning, stacklevel=2) return getattr(_lti_conversion, name)
936
29.225806
79
py
scipy
scipy-main/scipy/signal/_bsplines.py
import warnings from numpy import (logical_and, asarray, pi, zeros_like, piecewise, array, arctan2, tan, zeros, arange, floor) from numpy import (sqrt, exp, greater, less, cos, add, sin, less_equal, greater_equal) # From splinemodule.c from ._spline import cspline2d, sepfir2d from scipy.special import comb from scipy._lib._util import float_factorial from scipy.interpolate import BSpline __all__ = ['spline_filter', 'bspline', 'gauss_spline', 'cubic', 'quadratic', 'cspline1d', 'qspline1d', 'cspline1d_eval', 'qspline1d_eval'] def spline_filter(Iin, lmbda=5.0): """Smoothing spline (cubic) filtering of a rank-2 array. Filter an input data set, `Iin`, using a (cubic) smoothing spline of fall-off `lmbda`. Parameters ---------- Iin : array_like input data set lmbda : float, optional spline smooghing fall-off value, default is `5.0`. Returns ------- res : ndarray filterd input data Examples -------- We can filter an multi dimentional signal (ex: 2D image) using cubic B-spline filter: >>> import numpy as np >>> from scipy.signal import spline_filter >>> import matplotlib.pyplot as plt >>> orig_img = np.eye(20) # create an image >>> orig_img[10, :] = 1.0 >>> sp_filter = spline_filter(orig_img, lmbda=0.1) >>> f, ax = plt.subplots(1, 2, sharex=True) >>> for ind, data in enumerate([[orig_img, "original image"], ... [sp_filter, "spline filter"]]): ... ax[ind].imshow(data[0], cmap='gray_r') ... ax[ind].set_title(data[1]) >>> plt.tight_layout() >>> plt.show() """ intype = Iin.dtype.char hcol = array([1.0, 4.0, 1.0], 'f') / 6.0 if intype in ['F', 'D']: Iin = Iin.astype('F') ckr = cspline2d(Iin.real, lmbda) cki = cspline2d(Iin.imag, lmbda) outr = sepfir2d(ckr, hcol, hcol) outi = sepfir2d(cki, hcol, hcol) out = (outr + 1j * outi).astype(intype) elif intype in ['f', 'd']: ckr = cspline2d(Iin, lmbda) out = sepfir2d(ckr, hcol, hcol) out = out.astype(intype) else: raise TypeError("Invalid data type for Iin") return out _splinefunc_cache = {} def _bspline_piecefunctions(order): """Returns the function defined over the left-side pieces for a bspline of a given order. The 0th piece is the first one less than 0. The last piece is a function identical to 0 (returned as the constant 0). (There are order//2 + 2 total pieces). Also returns the condition functions that when evaluated return boolean arrays for use with `numpy.piecewise`. """ try: return _splinefunc_cache[order] except KeyError: pass def condfuncgen(num, val1, val2): if num == 0: return lambda x: logical_and(less_equal(x, val1), greater_equal(x, val2)) elif num == 2: return lambda x: less_equal(x, val2) else: return lambda x: logical_and(less(x, val1), greater_equal(x, val2)) last = order // 2 + 2 if order % 2: startbound = -1.0 else: startbound = -0.5 condfuncs = [condfuncgen(0, 0, startbound)] bound = startbound for num in range(1, last - 1): condfuncs.append(condfuncgen(1, bound, bound - 1)) bound = bound - 1 condfuncs.append(condfuncgen(2, 0, -(order + 1) / 2.0)) # final value of bound is used in piecefuncgen below # the functions to evaluate are taken from the left-hand side # in the general expression derived from the central difference # operator (because they involve fewer terms). fval = float_factorial(order) def piecefuncgen(num): Mk = order // 2 - num if (Mk < 0): return 0 # final function is 0 coeffs = [(1 - 2 * (k % 2)) * float(comb(order + 1, k, exact=1)) / fval for k in range(Mk + 1)] shifts = [-bound - k for k in range(Mk + 1)] def thefunc(x): res = 0.0 for k in range(Mk + 1): res += coeffs[k] * (x + shifts[k]) ** order return res return thefunc funclist = [piecefuncgen(k) for k in range(last)] _splinefunc_cache[order] = (funclist, condfuncs) return funclist, condfuncs msg_bspline = """`scipy.signal.bspline` is deprecated in SciPy 1.11 and will be removed in SciPy 1.13. The exact equivalent (for a float array `x`) is >>> from scipy.interpolate import BSpline >>> knots = np.arange(-(n+1)/2, (n+3)/2) >>> out = BSpline.basis_element(knots)(x) >>> out[(x < knots[0]) | (x > knots[-1])] = 0.0 """ def bspline(x, n): """ .. deprecated:: 1.11.0 `scipy.signal.bspline` is deprecated in SciPy 1.11 and will be removed in SciPy 1.13. The exact equivalent (for a float array `x`) is:: >>> from scipy.interpolate import BSpline >>> knots = np.arange(-(n+1)/2, (n+3)/2)) >>> out = BSpline.basis_element(knots)(x) >>> out[(x < knots[0]) | (x > knots[-1])] = 0.0 B-spline basis function of order n. Parameters ---------- x : array_like a knot vector n : int The order of the spline. Must be non-negative, i.e., n >= 0 Returns ------- res : ndarray B-spline basis function values See Also -------- cubic : A cubic B-spline. quadratic : A quadratic B-spline. Notes ----- Uses numpy.piecewise and automatic function-generator. Examples -------- We can calculate B-Spline basis function of several orders: >>> import numpy as np >>> from scipy.signal import bspline, cubic, quadratic >>> bspline(0.0, 1) 1 >>> knots = [-1.0, 0.0, -1.0] >>> bspline(knots, 2) array([0.125, 0.75, 0.125]) >>> np.array_equal(bspline(knots, 2), quadratic(knots)) True >>> np.array_equal(bspline(knots, 3), cubic(knots)) True """ warnings.warn(msg_bspline, DeprecationWarning, stacklevel=2) ax = -abs(asarray(x, dtype=float)) # number of pieces on the left-side is (n+1)/2 funclist, condfuncs = _bspline_piecefunctions(n) condlist = [func(ax) for func in condfuncs] return piecewise(ax, condlist, funclist) def gauss_spline(x, n): r"""Gaussian approximation to B-spline basis function of order n. Parameters ---------- x : array_like a knot vector n : int The order of the spline. Must be non-negative, i.e., n >= 0 Returns ------- res : ndarray B-spline basis function values approximated by a zero-mean Gaussian function. Notes ----- The B-spline basis function can be approximated well by a zero-mean Gaussian function with standard-deviation equal to :math:`\sigma=(n+1)/12` for large `n` : .. math:: \frac{1}{\sqrt {2\pi\sigma^2}}exp(-\frac{x^2}{2\sigma}) References ---------- .. [1] Bouma H., Vilanova A., Bescos J.O., ter Haar Romeny B.M., Gerritsen F.A. (2007) Fast and Accurate Gaussian Derivatives Based on B-Splines. In: Sgallari F., Murli A., Paragios N. (eds) Scale Space and Variational Methods in Computer Vision. SSVM 2007. Lecture Notes in Computer Science, vol 4485. Springer, Berlin, Heidelberg .. [2] http://folk.uio.no/inf3330/scripting/doc/python/SciPy/tutorial/old/node24.html Examples -------- We can calculate B-Spline basis functions approximated by a gaussian distribution: >>> import numpy as np >>> from scipy.signal import gauss_spline, bspline >>> knots = np.array([-1.0, 0.0, -1.0]) >>> gauss_spline(knots, 3) array([0.15418033, 0.6909883, 0.15418033]) # may vary >>> bspline(knots, 3) array([0.16666667, 0.66666667, 0.16666667]) # may vary """ x = asarray(x) signsq = (n + 1) / 12.0 return 1 / sqrt(2 * pi * signsq) * exp(-x ** 2 / 2 / signsq) msg_cubic = """`scipy.signal.cubic` is deprecated in SciPy 1.11 and will be removed in SciPy 1.13. The exact equivalent (for a float array `x`) is >>> from scipy.interpolate import BSpline >>> out = BSpline.basis_element([-2, -1, 0, 1, 2])(x) >>> out[(x < -2 | (x > 2)] = 0.0 """ def cubic(x): """ .. deprecated:: 1.11.0 `scipy.signal.cubic` is deprecated in SciPy 1.11 and will be removed in SciPy 1.13. The exact equivalent (for a float array `x`) is:: >>> from scipy.interpolate import BSpline >>> out = BSpline.basis_element([-2, -1, 0, 1, 2])(x) >>> out[(x < -2 | (x > 2)] = 0.0 A cubic B-spline. This is a special case of `bspline`, and equivalent to ``bspline(x, 3)``. Parameters ---------- x : array_like a knot vector Returns ------- res : ndarray Cubic B-spline basis function values See Also -------- bspline : B-spline basis function of order n quadratic : A quadratic B-spline. Examples -------- We can calculate B-Spline basis function of several orders: >>> import numpy as np >>> from scipy.signal import bspline, cubic, quadratic >>> bspline(0.0, 1) 1 >>> knots = [-1.0, 0.0, -1.0] >>> bspline(knots, 2) array([0.125, 0.75, 0.125]) >>> np.array_equal(bspline(knots, 2), quadratic(knots)) True >>> np.array_equal(bspline(knots, 3), cubic(knots)) True """ warnings.warn(msg_cubic, DeprecationWarning, stacklevel=2) ax = abs(asarray(x, dtype=float)) res = zeros_like(ax) cond1 = less(ax, 1) if cond1.any(): ax1 = ax[cond1] res[cond1] = 2.0 / 3 - 1.0 / 2 * ax1 ** 2 * (2 - ax1) cond2 = ~cond1 & less(ax, 2) if cond2.any(): ax2 = ax[cond2] res[cond2] = 1.0 / 6 * (2 - ax2) ** 3 return res def _cubic(x): x = asarray(x, dtype=float) b = BSpline.basis_element([-2, -1, 0, 1, 2], extrapolate=False) out = b(x) out[(x < -2) | (x > 2)] = 0 return out msg_quadratic = """`scipy.signal.quadratic` is deprecated in SciPy 1.11 and will be removed in SciPy 1.13. The exact equivalent (for a float array `x`) is >>> from scipy.interpolate import BSpline >>> out = BSpline.basis_element([-1.5, -0.5, 0.5, 1.5])(x) >>> out[(x < -1.5 | (x > 1.5)] = 0.0 """ def quadratic(x): """ .. deprecated:: 1.11.0 `scipy.signal.quadratic` is deprecated in SciPy 1.11 and will be removed in SciPy 1.13. The exact equivalent (for a float array `x`) is:: >>> from scipy.interpolate import BSpline >>> out = BSpline.basis_element([-1.5, -0.5, 0.5, 1.5])(x) >>> out[(x < -1.5 | (x > 1.5)] = 0.0 A quadratic B-spline. This is a special case of `bspline`, and equivalent to ``bspline(x, 2)``. Parameters ---------- x : array_like a knot vector Returns ------- res : ndarray Quadratic B-spline basis function values See Also -------- bspline : B-spline basis function of order n cubic : A cubic B-spline. Examples -------- We can calculate B-Spline basis function of several orders: >>> import numpy as np >>> from scipy.signal import bspline, cubic, quadratic >>> bspline(0.0, 1) 1 >>> knots = [-1.0, 0.0, -1.0] >>> bspline(knots, 2) array([0.125, 0.75, 0.125]) >>> np.array_equal(bspline(knots, 2), quadratic(knots)) True >>> np.array_equal(bspline(knots, 3), cubic(knots)) True """ warnings.warn(msg_quadratic, DeprecationWarning, stacklevel=2) ax = abs(asarray(x, dtype=float)) res = zeros_like(ax) cond1 = less(ax, 0.5) if cond1.any(): ax1 = ax[cond1] res[cond1] = 0.75 - ax1 ** 2 cond2 = ~cond1 & less(ax, 1.5) if cond2.any(): ax2 = ax[cond2] res[cond2] = (ax2 - 1.5) ** 2 / 2.0 return res def _quadratic(x): x = abs(asarray(x, dtype=float)) b = BSpline.basis_element([-1.5, -0.5, 0.5, 1.5], extrapolate=False) out = b(x) out[(x < -1.5) | (x > 1.5)] = 0 return out def _coeff_smooth(lam): xi = 1 - 96 * lam + 24 * lam * sqrt(3 + 144 * lam) omeg = arctan2(sqrt(144 * lam - 1), sqrt(xi)) rho = (24 * lam - 1 - sqrt(xi)) / (24 * lam) rho = rho * sqrt((48 * lam + 24 * lam * sqrt(3 + 144 * lam)) / xi) return rho, omeg def _hc(k, cs, rho, omega): return (cs / sin(omega) * (rho ** k) * sin(omega * (k + 1)) * greater(k, -1)) def _hs(k, cs, rho, omega): c0 = (cs * cs * (1 + rho * rho) / (1 - rho * rho) / (1 - 2 * rho * rho * cos(2 * omega) + rho ** 4)) gamma = (1 - rho * rho) / (1 + rho * rho) / tan(omega) ak = abs(k) return c0 * rho ** ak * (cos(omega * ak) + gamma * sin(omega * ak)) def _cubic_smooth_coeff(signal, lamb): rho, omega = _coeff_smooth(lamb) cs = 1 - 2 * rho * cos(omega) + rho * rho K = len(signal) yp = zeros((K,), signal.dtype.char) k = arange(K) yp[0] = (_hc(0, cs, rho, omega) * signal[0] + add.reduce(_hc(k + 1, cs, rho, omega) * signal)) yp[1] = (_hc(0, cs, rho, omega) * signal[0] + _hc(1, cs, rho, omega) * signal[1] + add.reduce(_hc(k + 2, cs, rho, omega) * signal)) for n in range(2, K): yp[n] = (cs * signal[n] + 2 * rho * cos(omega) * yp[n - 1] - rho * rho * yp[n - 2]) y = zeros((K,), signal.dtype.char) y[K - 1] = add.reduce((_hs(k, cs, rho, omega) + _hs(k + 1, cs, rho, omega)) * signal[::-1]) y[K - 2] = add.reduce((_hs(k - 1, cs, rho, omega) + _hs(k + 2, cs, rho, omega)) * signal[::-1]) for n in range(K - 3, -1, -1): y[n] = (cs * yp[n] + 2 * rho * cos(omega) * y[n + 1] - rho * rho * y[n + 2]) return y def _cubic_coeff(signal): zi = -2 + sqrt(3) K = len(signal) yplus = zeros((K,), signal.dtype.char) powers = zi ** arange(K) yplus[0] = signal[0] + zi * add.reduce(powers * signal) for k in range(1, K): yplus[k] = signal[k] + zi * yplus[k - 1] output = zeros((K,), signal.dtype) output[K - 1] = zi / (zi - 1) * yplus[K - 1] for k in range(K - 2, -1, -1): output[k] = zi * (output[k + 1] - yplus[k]) return output * 6.0 def _quadratic_coeff(signal): zi = -3 + 2 * sqrt(2.0) K = len(signal) yplus = zeros((K,), signal.dtype.char) powers = zi ** arange(K) yplus[0] = signal[0] + zi * add.reduce(powers * signal) for k in range(1, K): yplus[k] = signal[k] + zi * yplus[k - 1] output = zeros((K,), signal.dtype.char) output[K - 1] = zi / (zi - 1) * yplus[K - 1] for k in range(K - 2, -1, -1): output[k] = zi * (output[k + 1] - yplus[k]) return output * 8.0 def cspline1d(signal, lamb=0.0): """ Compute cubic spline coefficients for rank-1 array. Find the cubic spline coefficients for a 1-D signal assuming mirror-symmetric boundary conditions. To obtain the signal back from the spline representation mirror-symmetric-convolve these coefficients with a length 3 FIR window [1.0, 4.0, 1.0]/ 6.0 . Parameters ---------- signal : ndarray A rank-1 array representing samples of a signal. lamb : float, optional Smoothing coefficient, default is 0.0. Returns ------- c : ndarray Cubic spline coefficients. See Also -------- cspline1d_eval : Evaluate a cubic spline at the new set of points. Examples -------- We can filter a signal to reduce and smooth out high-frequency noise with a cubic spline: >>> import numpy as np >>> import matplotlib.pyplot as plt >>> from scipy.signal import cspline1d, cspline1d_eval >>> rng = np.random.default_rng() >>> sig = np.repeat([0., 1., 0.], 100) >>> sig += rng.standard_normal(len(sig))*0.05 # add noise >>> time = np.linspace(0, len(sig)) >>> filtered = cspline1d_eval(cspline1d(sig), time) >>> plt.plot(sig, label="signal") >>> plt.plot(time, filtered, label="filtered") >>> plt.legend() >>> plt.show() """ if lamb != 0.0: return _cubic_smooth_coeff(signal, lamb) else: return _cubic_coeff(signal) def qspline1d(signal, lamb=0.0): """Compute quadratic spline coefficients for rank-1 array. Parameters ---------- signal : ndarray A rank-1 array representing samples of a signal. lamb : float, optional Smoothing coefficient (must be zero for now). Returns ------- c : ndarray Quadratic spline coefficients. See Also -------- qspline1d_eval : Evaluate a quadratic spline at the new set of points. Notes ----- Find the quadratic spline coefficients for a 1-D signal assuming mirror-symmetric boundary conditions. To obtain the signal back from the spline representation mirror-symmetric-convolve these coefficients with a length 3 FIR window [1.0, 6.0, 1.0]/ 8.0 . Examples -------- We can filter a signal to reduce and smooth out high-frequency noise with a quadratic spline: >>> import numpy as np >>> import matplotlib.pyplot as plt >>> from scipy.signal import qspline1d, qspline1d_eval >>> rng = np.random.default_rng() >>> sig = np.repeat([0., 1., 0.], 100) >>> sig += rng.standard_normal(len(sig))*0.05 # add noise >>> time = np.linspace(0, len(sig)) >>> filtered = qspline1d_eval(qspline1d(sig), time) >>> plt.plot(sig, label="signal") >>> plt.plot(time, filtered, label="filtered") >>> plt.legend() >>> plt.show() """ if lamb != 0.0: raise ValueError("Smoothing quadratic splines not supported yet.") else: return _quadratic_coeff(signal) def cspline1d_eval(cj, newx, dx=1.0, x0=0): """Evaluate a cubic spline at the new set of points. `dx` is the old sample-spacing while `x0` was the old origin. In other-words the old-sample points (knot-points) for which the `cj` represent spline coefficients were at equally-spaced points of: oldx = x0 + j*dx j=0...N-1, with N=len(cj) Edges are handled using mirror-symmetric boundary conditions. Parameters ---------- cj : ndarray cublic spline coefficients newx : ndarray New set of points. dx : float, optional Old sample-spacing, the default value is 1.0. x0 : int, optional Old origin, the default value is 0. Returns ------- res : ndarray Evaluated a cubic spline points. See Also -------- cspline1d : Compute cubic spline coefficients for rank-1 array. Examples -------- We can filter a signal to reduce and smooth out high-frequency noise with a cubic spline: >>> import numpy as np >>> import matplotlib.pyplot as plt >>> from scipy.signal import cspline1d, cspline1d_eval >>> rng = np.random.default_rng() >>> sig = np.repeat([0., 1., 0.], 100) >>> sig += rng.standard_normal(len(sig))*0.05 # add noise >>> time = np.linspace(0, len(sig)) >>> filtered = cspline1d_eval(cspline1d(sig), time) >>> plt.plot(sig, label="signal") >>> plt.plot(time, filtered, label="filtered") >>> plt.legend() >>> plt.show() """ newx = (asarray(newx) - x0) / float(dx) res = zeros_like(newx, dtype=cj.dtype) if res.size == 0: return res N = len(cj) cond1 = newx < 0 cond2 = newx > (N - 1) cond3 = ~(cond1 | cond2) # handle general mirror-symmetry res[cond1] = cspline1d_eval(cj, -newx[cond1]) res[cond2] = cspline1d_eval(cj, 2 * (N - 1) - newx[cond2]) newx = newx[cond3] if newx.size == 0: return res result = zeros_like(newx, dtype=cj.dtype) jlower = floor(newx - 2).astype(int) + 1 for i in range(4): thisj = jlower + i indj = thisj.clip(0, N - 1) # handle edge cases result += cj[indj] * _cubic(newx - thisj) res[cond3] = result return res def qspline1d_eval(cj, newx, dx=1.0, x0=0): """Evaluate a quadratic spline at the new set of points. Parameters ---------- cj : ndarray Quadratic spline coefficients newx : ndarray New set of points. dx : float, optional Old sample-spacing, the default value is 1.0. x0 : int, optional Old origin, the default value is 0. Returns ------- res : ndarray Evaluated a quadratic spline points. See Also -------- qspline1d : Compute quadratic spline coefficients for rank-1 array. Notes ----- `dx` is the old sample-spacing while `x0` was the old origin. In other-words the old-sample points (knot-points) for which the `cj` represent spline coefficients were at equally-spaced points of:: oldx = x0 + j*dx j=0...N-1, with N=len(cj) Edges are handled using mirror-symmetric boundary conditions. Examples -------- We can filter a signal to reduce and smooth out high-frequency noise with a quadratic spline: >>> import numpy as np >>> import matplotlib.pyplot as plt >>> from scipy.signal import qspline1d, qspline1d_eval >>> rng = np.random.default_rng() >>> sig = np.repeat([0., 1., 0.], 100) >>> sig += rng.standard_normal(len(sig))*0.05 # add noise >>> time = np.linspace(0, len(sig)) >>> filtered = qspline1d_eval(qspline1d(sig), time) >>> plt.plot(sig, label="signal") >>> plt.plot(time, filtered, label="filtered") >>> plt.legend() >>> plt.show() """ newx = (asarray(newx) - x0) / dx res = zeros_like(newx) if res.size == 0: return res N = len(cj) cond1 = newx < 0 cond2 = newx > (N - 1) cond3 = ~(cond1 | cond2) # handle general mirror-symmetry res[cond1] = qspline1d_eval(cj, -newx[cond1]) res[cond2] = qspline1d_eval(cj, 2 * (N - 1) - newx[cond2]) newx = newx[cond3] if newx.size == 0: return res result = zeros_like(newx) jlower = floor(newx - 1.5).astype(int) + 1 for i in range(3): thisj = jlower + i indj = thisj.clip(0, N - 1) # handle edge cases result += cj[indj] * _quadratic(newx - thisj) res[cond3] = result return res
22,470
27.845956
89
py
scipy
scipy-main/scipy/signal/fir_filter_design.py
# This file is not meant for public use and will be removed in SciPy v2.0.0. # Use the `scipy.signal` namespace for importing the functions # included below. import warnings from . import _fir_filter_design __all__ = [ # noqa: F822 'kaiser_beta', 'kaiser_atten', 'kaiserord', 'firwin', 'firwin2', 'remez', 'firls', 'minimum_phase', 'ceil', 'log', 'irfft', 'fft', 'ifft', 'sinc', 'toeplitz', 'hankel', 'solve', 'LinAlgError', 'LinAlgWarning', 'lstsq' ] def __dir__(): return __all__ def __getattr__(name): if name not in __all__: raise AttributeError( "scipy.signal.fir_filter_design is deprecated and has no attribute " f"{name}. Try looking in scipy.signal instead.") warnings.warn(f"Please use `{name}` from the `scipy.signal` namespace, " "the `scipy.signal.fir_filter_design` namespace is deprecated.", category=DeprecationWarning, stacklevel=2) return getattr(_fir_filter_design, name)
1,003
28.529412
82
py
scipy
scipy-main/scipy/signal/_max_len_seq.py
# Author: Eric Larson # 2014 """Tools for MLS generation""" import numpy as np from ._max_len_seq_inner import _max_len_seq_inner __all__ = ['max_len_seq'] # These are definitions of linear shift register taps for use in max_len_seq() _mls_taps = {2: [1], 3: [2], 4: [3], 5: [3], 6: [5], 7: [6], 8: [7, 6, 1], 9: [5], 10: [7], 11: [9], 12: [11, 10, 4], 13: [12, 11, 8], 14: [13, 12, 2], 15: [14], 16: [15, 13, 4], 17: [14], 18: [11], 19: [18, 17, 14], 20: [17], 21: [19], 22: [21], 23: [18], 24: [23, 22, 17], 25: [22], 26: [25, 24, 20], 27: [26, 25, 22], 28: [25], 29: [27], 30: [29, 28, 7], 31: [28], 32: [31, 30, 10]} def max_len_seq(nbits, state=None, length=None, taps=None): """ Maximum length sequence (MLS) generator. Parameters ---------- nbits : int Number of bits to use. Length of the resulting sequence will be ``(2**nbits) - 1``. Note that generating long sequences (e.g., greater than ``nbits == 16``) can take a long time. state : array_like, optional If array, must be of length ``nbits``, and will be cast to binary (bool) representation. If None, a seed of ones will be used, producing a repeatable representation. If ``state`` is all zeros, an error is raised as this is invalid. Default: None. length : int, optional Number of samples to compute. If None, the entire length ``(2**nbits) - 1`` is computed. taps : array_like, optional Polynomial taps to use (e.g., ``[7, 6, 1]`` for an 8-bit sequence). If None, taps will be automatically selected (for up to ``nbits == 32``). Returns ------- seq : array Resulting MLS sequence of 0's and 1's. state : array The final state of the shift register. Notes ----- The algorithm for MLS generation is generically described in: https://en.wikipedia.org/wiki/Maximum_length_sequence The default values for taps are specifically taken from the first option listed for each value of ``nbits`` in: https://web.archive.org/web/20181001062252/http://www.newwaveinstruments.com/resources/articles/m_sequence_linear_feedback_shift_register_lfsr.htm .. versionadded:: 0.15.0 Examples -------- MLS uses binary convention: >>> from scipy.signal import max_len_seq >>> max_len_seq(4)[0] array([1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0], dtype=int8) MLS has a white spectrum (except for DC): >>> import numpy as np >>> import matplotlib.pyplot as plt >>> from numpy.fft import fft, ifft, fftshift, fftfreq >>> seq = max_len_seq(6)[0]*2-1 # +1 and -1 >>> spec = fft(seq) >>> N = len(seq) >>> plt.plot(fftshift(fftfreq(N)), fftshift(np.abs(spec)), '.-') >>> plt.margins(0.1, 0.1) >>> plt.grid(True) >>> plt.show() Circular autocorrelation of MLS is an impulse: >>> acorrcirc = ifft(spec * np.conj(spec)).real >>> plt.figure() >>> plt.plot(np.arange(-N/2+1, N/2+1), fftshift(acorrcirc), '.-') >>> plt.margins(0.1, 0.1) >>> plt.grid(True) >>> plt.show() Linear autocorrelation of MLS is approximately an impulse: >>> acorr = np.correlate(seq, seq, 'full') >>> plt.figure() >>> plt.plot(np.arange(-N+1, N), acorr, '.-') >>> plt.margins(0.1, 0.1) >>> plt.grid(True) >>> plt.show() """ taps_dtype = np.int32 if np.intp().itemsize == 4 else np.int64 if taps is None: if nbits not in _mls_taps: known_taps = np.array(list(_mls_taps.keys())) raise ValueError('nbits must be between %s and %s if taps is None' % (known_taps.min(), known_taps.max())) taps = np.array(_mls_taps[nbits], taps_dtype) else: taps = np.unique(np.array(taps, taps_dtype))[::-1] if np.any(taps < 0) or np.any(taps > nbits) or taps.size < 1: raise ValueError('taps must be non-empty with values between ' 'zero and nbits (inclusive)') taps = np.array(taps) # needed for Cython and Pythran n_max = (2**nbits) - 1 if length is None: length = n_max else: length = int(length) if length < 0: raise ValueError('length must be greater than or equal to 0') # We use int8 instead of bool here because NumPy arrays of bools # don't seem to work nicely with Cython if state is None: state = np.ones(nbits, dtype=np.int8, order='c') else: # makes a copy if need be, ensuring it's 0's and 1's state = np.array(state, dtype=bool, order='c').astype(np.int8) if state.ndim != 1 or state.size != nbits: raise ValueError('state must be a 1-D array of size nbits') if np.all(state == 0): raise ValueError('state must not be all zeros') seq = np.empty(length, dtype=np.int8, order='c') state = _max_len_seq_inner(taps, state, nbits, length, seq) return seq, state
5,062
35.164286
154
py
scipy
scipy-main/scipy/signal/_wavelets.py
import numpy as np from scipy.linalg import eig from scipy.special import comb from scipy.signal import convolve __all__ = ['daub', 'qmf', 'cascade', 'morlet', 'ricker', 'morlet2', 'cwt'] def daub(p): """ The coefficients for the FIR low-pass filter producing Daubechies wavelets. p>=1 gives the order of the zero at f=1/2. There are 2p filter coefficients. Parameters ---------- p : int Order of the zero at f=1/2, can have values from 1 to 34. Returns ------- daub : ndarray Return """ sqrt = np.sqrt if p < 1: raise ValueError("p must be at least 1.") if p == 1: c = 1 / sqrt(2) return np.array([c, c]) elif p == 2: f = sqrt(2) / 8 c = sqrt(3) return f * np.array([1 + c, 3 + c, 3 - c, 1 - c]) elif p == 3: tmp = 12 * sqrt(10) z1 = 1.5 + sqrt(15 + tmp) / 6 - 1j * (sqrt(15) + sqrt(tmp - 15)) / 6 z1c = np.conj(z1) f = sqrt(2) / 8 d0 = np.real((1 - z1) * (1 - z1c)) a0 = np.real(z1 * z1c) a1 = 2 * np.real(z1) return f / d0 * np.array([a0, 3 * a0 - a1, 3 * a0 - 3 * a1 + 1, a0 - 3 * a1 + 3, 3 - a1, 1]) elif p < 35: # construct polynomial and factor it if p < 35: P = [comb(p - 1 + k, k, exact=1) for k in range(p)][::-1] yj = np.roots(P) else: # try different polynomial --- needs work P = [comb(p - 1 + k, k, exact=1) / 4.0**k for k in range(p)][::-1] yj = np.roots(P) / 4 # for each root, compute two z roots, select the one with |z|>1 # Build up final polynomial c = np.poly1d([1, 1])**p q = np.poly1d([1]) for k in range(p - 1): yval = yj[k] part = 2 * sqrt(yval * (yval - 1)) const = 1 - 2 * yval z1 = const + part if (abs(z1)) < 1: z1 = const - part q = q * [1, -z1] q = c * np.real(q) # Normalize result q = q / np.sum(q) * sqrt(2) return q.c[::-1] else: raise ValueError("Polynomial factorization does not work " "well for p too large.") def qmf(hk): """ Return high-pass qmf filter from low-pass Parameters ---------- hk : array_like Coefficients of high-pass filter. Returns ------- array_like High-pass filter coefficients. """ N = len(hk) - 1 asgn = [{0: 1, 1: -1}[k % 2] for k in range(N + 1)] return hk[::-1] * np.array(asgn) def cascade(hk, J=7): """ Return (x, phi, psi) at dyadic points ``K/2**J`` from filter coefficients. Parameters ---------- hk : array_like Coefficients of low-pass filter. J : int, optional Values will be computed at grid points ``K/2**J``. Default is 7. Returns ------- x : ndarray The dyadic points ``K/2**J`` for ``K=0...N * (2**J)-1`` where ``len(hk) = len(gk) = N+1``. phi : ndarray The scaling function ``phi(x)`` at `x`: ``phi(x) = sum(hk * phi(2x-k))``, where k is from 0 to N. psi : ndarray, optional The wavelet function ``psi(x)`` at `x`: ``phi(x) = sum(gk * phi(2x-k))``, where k is from 0 to N. `psi` is only returned if `gk` is not None. Notes ----- The algorithm uses the vector cascade algorithm described by Strang and Nguyen in "Wavelets and Filter Banks". It builds a dictionary of values and slices for quick reuse. Then inserts vectors into final vector at the end. """ N = len(hk) - 1 if (J > 30 - np.log2(N + 1)): raise ValueError("Too many levels.") if (J < 1): raise ValueError("Too few levels.") # construct matrices needed nn, kk = np.ogrid[:N, :N] s2 = np.sqrt(2) # append a zero so that take works thk = np.r_[hk, 0] gk = qmf(hk) tgk = np.r_[gk, 0] indx1 = np.clip(2 * nn - kk, -1, N + 1) indx2 = np.clip(2 * nn - kk + 1, -1, N + 1) m = np.empty((2, 2, N, N), 'd') m[0, 0] = np.take(thk, indx1, 0) m[0, 1] = np.take(thk, indx2, 0) m[1, 0] = np.take(tgk, indx1, 0) m[1, 1] = np.take(tgk, indx2, 0) m *= s2 # construct the grid of points x = np.arange(0, N * (1 << J), dtype=float) / (1 << J) phi = 0 * x psi = 0 * x # find phi0, and phi1 lam, v = eig(m[0, 0]) ind = np.argmin(np.absolute(lam - 1)) # a dictionary with a binary representation of the # evaluation points x < 1 -- i.e. position is 0.xxxx v = np.real(v[:, ind]) # need scaling function to integrate to 1 so find # eigenvector normalized to sum(v,axis=0)=1 sm = np.sum(v) if sm < 0: # need scaling function to integrate to 1 v = -v sm = -sm bitdic = {'0': v / sm} bitdic['1'] = np.dot(m[0, 1], bitdic['0']) step = 1 << J phi[::step] = bitdic['0'] phi[(1 << (J - 1))::step] = bitdic['1'] psi[::step] = np.dot(m[1, 0], bitdic['0']) psi[(1 << (J - 1))::step] = np.dot(m[1, 1], bitdic['0']) # descend down the levels inserting more and more values # into bitdic -- store the values in the correct location once we # have computed them -- stored in the dictionary # for quicker use later. prevkeys = ['1'] for level in range(2, J + 1): newkeys = ['%d%s' % (xx, yy) for xx in [0, 1] for yy in prevkeys] fac = 1 << (J - level) for key in newkeys: # convert key to number num = 0 for pos in range(level): if key[pos] == '1': num += (1 << (level - 1 - pos)) pastphi = bitdic[key[1:]] ii = int(key[0]) temp = np.dot(m[0, ii], pastphi) bitdic[key] = temp phi[num * fac::step] = temp psi[num * fac::step] = np.dot(m[1, ii], pastphi) prevkeys = newkeys return x, phi, psi def morlet(M, w=5.0, s=1.0, complete=True): """ Complex Morlet wavelet. Parameters ---------- M : int Length of the wavelet. w : float, optional Omega0. Default is 5 s : float, optional Scaling factor, windowed from ``-s*2*pi`` to ``+s*2*pi``. Default is 1. complete : bool, optional Whether to use the complete or the standard version. Returns ------- morlet : (M,) ndarray See Also -------- morlet2 : Implementation of Morlet wavelet, compatible with `cwt`. scipy.signal.gausspulse Notes ----- The standard version:: pi**-0.25 * exp(1j*w*x) * exp(-0.5*(x**2)) This commonly used wavelet is often referred to simply as the Morlet wavelet. Note that this simplified version can cause admissibility problems at low values of `w`. The complete version:: pi**-0.25 * (exp(1j*w*x) - exp(-0.5*(w**2))) * exp(-0.5*(x**2)) This version has a correction term to improve admissibility. For `w` greater than 5, the correction term is negligible. Note that the energy of the return wavelet is not normalised according to `s`. The fundamental frequency of this wavelet in Hz is given by ``f = 2*s*w*r / M`` where `r` is the sampling rate. Note: This function was created before `cwt` and is not compatible with it. Examples -------- >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> M = 100 >>> s = 4.0 >>> w = 2.0 >>> wavelet = signal.morlet(M, s, w) >>> plt.plot(wavelet.real, label="real") >>> plt.plot(wavelet.imag, label="imag") >>> plt.legend() >>> plt.show() """ x = np.linspace(-s * 2 * np.pi, s * 2 * np.pi, M) output = np.exp(1j * w * x) if complete: output -= np.exp(-0.5 * (w**2)) output *= np.exp(-0.5 * (x**2)) * np.pi**(-0.25) return output def ricker(points, a): """ Return a Ricker wavelet, also known as the "Mexican hat wavelet". It models the function: ``A * (1 - (x/a)**2) * exp(-0.5*(x/a)**2)``, where ``A = 2/(sqrt(3*a)*(pi**0.25))``. Parameters ---------- points : int Number of points in `vector`. Will be centered around 0. a : scalar Width parameter of the wavelet. Returns ------- vector : (N,) ndarray Array of length `points` in shape of ricker curve. Examples -------- >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> points = 100 >>> a = 4.0 >>> vec2 = signal.ricker(points, a) >>> print(len(vec2)) 100 >>> plt.plot(vec2) >>> plt.show() """ A = 2 / (np.sqrt(3 * a) * (np.pi**0.25)) wsq = a**2 vec = np.arange(0, points) - (points - 1.0) / 2 xsq = vec**2 mod = (1 - xsq / wsq) gauss = np.exp(-xsq / (2 * wsq)) total = A * mod * gauss return total def morlet2(M, s, w=5): """ Complex Morlet wavelet, designed to work with `cwt`. Returns the complete version of morlet wavelet, normalised according to `s`:: exp(1j*w*x/s) * exp(-0.5*(x/s)**2) * pi**(-0.25) * sqrt(1/s) Parameters ---------- M : int Length of the wavelet. s : float Width parameter of the wavelet. w : float, optional Omega0. Default is 5 Returns ------- morlet : (M,) ndarray See Also -------- morlet : Implementation of Morlet wavelet, incompatible with `cwt` Notes ----- .. versionadded:: 1.4.0 This function was designed to work with `cwt`. Because `morlet2` returns an array of complex numbers, the `dtype` argument of `cwt` should be set to `complex128` for best results. Note the difference in implementation with `morlet`. The fundamental frequency of this wavelet in Hz is given by:: f = w*fs / (2*s*np.pi) where ``fs`` is the sampling rate and `s` is the wavelet width parameter. Similarly we can get the wavelet width parameter at ``f``:: s = w*fs / (2*f*np.pi) Examples -------- >>> import numpy as np >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> M = 100 >>> s = 4.0 >>> w = 2.0 >>> wavelet = signal.morlet2(M, s, w) >>> plt.plot(abs(wavelet)) >>> plt.show() This example shows basic use of `morlet2` with `cwt` in time-frequency analysis: >>> t, dt = np.linspace(0, 1, 200, retstep=True) >>> fs = 1/dt >>> w = 6. >>> sig = np.cos(2*np.pi*(50 + 10*t)*t) + np.sin(40*np.pi*t) >>> freq = np.linspace(1, fs/2, 100) >>> widths = w*fs / (2*freq*np.pi) >>> cwtm = signal.cwt(sig, signal.morlet2, widths, w=w) >>> plt.pcolormesh(t, freq, np.abs(cwtm), cmap='viridis', shading='gouraud') >>> plt.show() """ x = np.arange(0, M) - (M - 1.0) / 2 x = x / s wavelet = np.exp(1j * w * x) * np.exp(-0.5 * x**2) * np.pi**(-0.25) output = np.sqrt(1/s) * wavelet return output def cwt(data, wavelet, widths, dtype=None, **kwargs): """ Continuous wavelet transform. Performs a continuous wavelet transform on `data`, using the `wavelet` function. A CWT performs a convolution with `data` using the `wavelet` function, which is characterized by a width parameter and length parameter. The `wavelet` function is allowed to be complex. Parameters ---------- data : (N,) ndarray data on which to perform the transform. wavelet : function Wavelet function, which should take 2 arguments. The first argument is the number of points that the returned vector will have (len(wavelet(length,width)) == length). The second is a width parameter, defining the size of the wavelet (e.g. standard deviation of a gaussian). See `ricker`, which satisfies these requirements. widths : (M,) sequence Widths to use for transform. dtype : data-type, optional The desired data type of output. Defaults to ``float64`` if the output of `wavelet` is real and ``complex128`` if it is complex. .. versionadded:: 1.4.0 kwargs Keyword arguments passed to wavelet function. .. versionadded:: 1.4.0 Returns ------- cwt: (M, N) ndarray Will have shape of (len(widths), len(data)). Notes ----- .. versionadded:: 1.4.0 For non-symmetric, complex-valued wavelets, the input signal is convolved with the time-reversed complex-conjugate of the wavelet data [1]. :: length = min(10 * width[ii], len(data)) cwt[ii,:] = signal.convolve(data, np.conj(wavelet(length, width[ii], **kwargs))[::-1], mode='same') References ---------- .. [1] S. Mallat, "A Wavelet Tour of Signal Processing (3rd Edition)", Academic Press, 2009. Examples -------- >>> import numpy as np >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> t = np.linspace(-1, 1, 200, endpoint=False) >>> sig = np.cos(2 * np.pi * 7 * t) + signal.gausspulse(t - 0.4, fc=2) >>> widths = np.arange(1, 31) >>> cwtmatr = signal.cwt(sig, signal.ricker, widths) .. note:: For cwt matrix plotting it is advisable to flip the y-axis >>> cwtmatr_yflip = np.flipud(cwtmatr) >>> plt.imshow(cwtmatr_yflip, extent=[-1, 1, 1, 31], cmap='PRGn', aspect='auto', ... vmax=abs(cwtmatr).max(), vmin=-abs(cwtmatr).max()) >>> plt.show() """ # Determine output type if dtype is None: if np.asarray(wavelet(1, widths[0], **kwargs)).dtype.char in 'FDG': dtype = np.complex128 else: dtype = np.float64 output = np.empty((len(widths), len(data)), dtype=dtype) for ind, width in enumerate(widths): N = np.min([10 * width, len(data)]) wavelet_data = np.conj(wavelet(N, width, **kwargs)[::-1]) output[ind] = convolve(data, wavelet_data, mode='same') return output
14,132
27.551515
84
py
scipy
scipy-main/scipy/signal/filter_design.py
# This file is not meant for public use and will be removed in SciPy v2.0.0. # Use the `scipy.signal` namespace for importing the functions # included below. import warnings from . import _filter_design __all__ = [ # noqa: F822 'findfreqs', 'freqs', 'freqz', 'tf2zpk', 'zpk2tf', 'normalize', 'lp2lp', 'lp2hp', 'lp2bp', 'lp2bs', 'bilinear', 'iirdesign', 'iirfilter', 'butter', 'cheby1', 'cheby2', 'ellip', 'bessel', 'band_stop_obj', 'buttord', 'cheb1ord', 'cheb2ord', 'ellipord', 'buttap', 'cheb1ap', 'cheb2ap', 'ellipap', 'besselap', 'BadCoefficients', 'freqs_zpk', 'freqz_zpk', 'tf2sos', 'sos2tf', 'zpk2sos', 'sos2zpk', 'group_delay', 'sosfreqz', 'iirnotch', 'iirpeak', 'bilinear_zpk', 'lp2lp_zpk', 'lp2hp_zpk', 'lp2bp_zpk', 'lp2bs_zpk', 'gammatone', 'iircomb', 'atleast_1d', 'poly', 'polyval', 'roots', 'resize', 'absolute', 'logspace', 'tan', 'log10', 'arctan', 'arcsinh', 'exp', 'arccosh', 'ceil', 'conjugate', 'append', 'prod', 'full', 'array', 'mintypecode', 'npp_polyval', 'polyvalfromroots', 'optimize', 'sp_fft', 'comb', 'float_factorial', 'abs', 'maxflat', 'yulewalk', 'EPSILON', 'filter_dict', 'band_dict', 'bessel_norms' ] def __dir__(): return __all__ def __getattr__(name): if name not in __all__: raise AttributeError( "scipy.signal.filter_design is deprecated and has no attribute " f"{name}. Try looking in scipy.signal instead.") warnings.warn(f"Please use `{name}` from the `scipy.signal` namespace, " "the `scipy.signal.filter_design` namespace is deprecated.", category=DeprecationWarning, stacklevel=2) return getattr(_filter_design, name)
1,719
39
78
py
scipy
scipy-main/scipy/signal/wavelets.py
# This file is not meant for public use and will be removed in SciPy v2.0.0. # Use the `scipy.signal` namespace for importing the functions # included below. import warnings from . import _wavelets __all__ = [ # noqa: F822 'daub', 'qmf', 'cascade', 'morlet', 'ricker', 'morlet2', 'cwt', 'eig', 'comb', 'convolve' ] def __dir__(): return __all__ def __getattr__(name): if name not in __all__: raise AttributeError( "scipy.signal.wavelets is deprecated and has no attribute " f"{name}. Try looking in scipy.signal instead.") warnings.warn(f"Please use `{name}` from the `scipy.signal` namespace, " "the `scipy.signal.wavelets` namespace is deprecated.", category=DeprecationWarning, stacklevel=2) return getattr(_wavelets, name)
828
27.586207
76
py
scipy
scipy-main/scipy/signal/_ltisys.py
""" ltisys -- a collection of classes and functions for modeling linear time invariant systems. """ # # Author: Travis Oliphant 2001 # # Feb 2010: Warren Weckesser # Rewrote lsim2 and added impulse2. # Apr 2011: Jeffrey Armstrong <jeff@approximatrix.com> # Added dlsim, dstep, dimpulse, cont2discrete # Aug 2013: Juan Luis Cano # Rewrote abcd_normalize. # Jan 2015: Irvin Probst irvin DOT probst AT ensta-bretagne DOT fr # Added pole placement # Mar 2015: Clancy Rowley # Rewrote lsim # May 2015: Felix Berkenkamp # Split lti class into subclasses # Merged discrete systems and added dlti import warnings # np.linalg.qr fails on some tests with LinAlgError: zgeqrf returns -7 # use scipy's qr until this is solved from scipy.linalg import qr as s_qr from scipy import integrate, interpolate, linalg from scipy.interpolate import make_interp_spline from ._filter_design import (tf2zpk, zpk2tf, normalize, freqs, freqz, freqs_zpk, freqz_zpk) from ._lti_conversion import (tf2ss, abcd_normalize, ss2tf, zpk2ss, ss2zpk, cont2discrete, _atleast_2d_or_none) import numpy import numpy as np from numpy.testing import suppress_warnings from numpy import (real, atleast_1d, squeeze, asarray, zeros, dot, transpose, ones, zeros_like, linspace, nan_to_num) import copy __all__ = ['lti', 'dlti', 'TransferFunction', 'ZerosPolesGain', 'StateSpace', 'lsim', 'lsim2', 'impulse', 'impulse2', 'step', 'step2', 'bode', 'freqresp', 'place_poles', 'dlsim', 'dstep', 'dimpulse', 'dfreqresp', 'dbode'] class LinearTimeInvariant: def __new__(cls, *system, **kwargs): """Create a new object, don't allow direct instances.""" if cls is LinearTimeInvariant: raise NotImplementedError('The LinearTimeInvariant class is not ' 'meant to be used directly, use `lti` ' 'or `dlti` instead.') return super().__new__(cls) def __init__(self): """ Initialize the `lti` baseclass. The heavy lifting is done by the subclasses. """ super().__init__() self.inputs = None self.outputs = None self._dt = None @property def dt(self): """Return the sampling time of the system, `None` for `lti` systems.""" return self._dt @property def _dt_dict(self): if self.dt is None: return {} else: return {'dt': self.dt} @property def zeros(self): """Zeros of the system.""" return self.to_zpk().zeros @property def poles(self): """Poles of the system.""" return self.to_zpk().poles def _as_ss(self): """Convert to `StateSpace` system, without copying. Returns ------- sys: StateSpace The `StateSpace` system. If the class is already an instance of `StateSpace` then this instance is returned. """ if isinstance(self, StateSpace): return self else: return self.to_ss() def _as_zpk(self): """Convert to `ZerosPolesGain` system, without copying. Returns ------- sys: ZerosPolesGain The `ZerosPolesGain` system. If the class is already an instance of `ZerosPolesGain` then this instance is returned. """ if isinstance(self, ZerosPolesGain): return self else: return self.to_zpk() def _as_tf(self): """Convert to `TransferFunction` system, without copying. Returns ------- sys: ZerosPolesGain The `TransferFunction` system. If the class is already an instance of `TransferFunction` then this instance is returned. """ if isinstance(self, TransferFunction): return self else: return self.to_tf() class lti(LinearTimeInvariant): r""" Continuous-time linear time invariant system base class. Parameters ---------- *system : arguments The `lti` class can be instantiated with either 2, 3 or 4 arguments. The following gives the number of arguments and the corresponding continuous-time subclass that is created: * 2: `TransferFunction`: (numerator, denominator) * 3: `ZerosPolesGain`: (zeros, poles, gain) * 4: `StateSpace`: (A, B, C, D) Each argument can be an array or a sequence. See Also -------- ZerosPolesGain, StateSpace, TransferFunction, dlti Notes ----- `lti` instances do not exist directly. Instead, `lti` creates an instance of one of its subclasses: `StateSpace`, `TransferFunction` or `ZerosPolesGain`. If (numerator, denominator) is passed in for ``*system``, coefficients for both the numerator and denominator should be specified in descending exponent order (e.g., ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``). Changing the value of properties that are not directly part of the current system representation (such as the `zeros` of a `StateSpace` system) is very inefficient and may lead to numerical inaccuracies. It is better to convert to the specific system representation first. For example, call ``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain. Examples -------- >>> from scipy import signal >>> signal.lti(1, 2, 3, 4) StateSpaceContinuous( array([[1]]), array([[2]]), array([[3]]), array([[4]]), dt: None ) Construct the transfer function :math:`H(s) = \frac{5(s - 1)(s - 2)}{(s - 3)(s - 4)}`: >>> signal.lti([1, 2], [3, 4], 5) ZerosPolesGainContinuous( array([1, 2]), array([3, 4]), 5, dt: None ) Construct the transfer function :math:`H(s) = \frac{3s + 4}{1s + 2}`: >>> signal.lti([3, 4], [1, 2]) TransferFunctionContinuous( array([3., 4.]), array([1., 2.]), dt: None ) """ def __new__(cls, *system): """Create an instance of the appropriate subclass.""" if cls is lti: N = len(system) if N == 2: return TransferFunctionContinuous.__new__( TransferFunctionContinuous, *system) elif N == 3: return ZerosPolesGainContinuous.__new__( ZerosPolesGainContinuous, *system) elif N == 4: return StateSpaceContinuous.__new__(StateSpaceContinuous, *system) else: raise ValueError("`system` needs to be an instance of `lti` " "or have 2, 3 or 4 arguments.") # __new__ was called from a subclass, let it call its own functions return super().__new__(cls) def __init__(self, *system): """ Initialize the `lti` baseclass. The heavy lifting is done by the subclasses. """ super().__init__(*system) def impulse(self, X0=None, T=None, N=None): """ Return the impulse response of a continuous-time system. See `impulse` for details. """ return impulse(self, X0=X0, T=T, N=N) def step(self, X0=None, T=None, N=None): """ Return the step response of a continuous-time system. See `step` for details. """ return step(self, X0=X0, T=T, N=N) def output(self, U, T, X0=None): """ Return the response of a continuous-time system to input `U`. See `lsim` for details. """ return lsim(self, U, T, X0=X0) def bode(self, w=None, n=100): """ Calculate Bode magnitude and phase data of a continuous-time system. Returns a 3-tuple containing arrays of frequencies [rad/s], magnitude [dB] and phase [deg]. See `bode` for details. Examples -------- >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> sys = signal.TransferFunction([1], [1, 1]) >>> w, mag, phase = sys.bode() >>> plt.figure() >>> plt.semilogx(w, mag) # Bode magnitude plot >>> plt.figure() >>> plt.semilogx(w, phase) # Bode phase plot >>> plt.show() """ return bode(self, w=w, n=n) def freqresp(self, w=None, n=10000): """ Calculate the frequency response of a continuous-time system. Returns a 2-tuple containing arrays of frequencies [rad/s] and complex magnitude. See `freqresp` for details. """ return freqresp(self, w=w, n=n) def to_discrete(self, dt, method='zoh', alpha=None): """Return a discretized version of the current system. Parameters: See `cont2discrete` for details. Returns ------- sys: instance of `dlti` """ raise NotImplementedError('to_discrete is not implemented for this ' 'system class.') class dlti(LinearTimeInvariant): r""" Discrete-time linear time invariant system base class. Parameters ---------- *system: arguments The `dlti` class can be instantiated with either 2, 3 or 4 arguments. The following gives the number of arguments and the corresponding discrete-time subclass that is created: * 2: `TransferFunction`: (numerator, denominator) * 3: `ZerosPolesGain`: (zeros, poles, gain) * 4: `StateSpace`: (A, B, C, D) Each argument can be an array or a sequence. dt: float, optional Sampling time [s] of the discrete-time systems. Defaults to ``True`` (unspecified sampling time). Must be specified as a keyword argument, for example, ``dt=0.1``. See Also -------- ZerosPolesGain, StateSpace, TransferFunction, lti Notes ----- `dlti` instances do not exist directly. Instead, `dlti` creates an instance of one of its subclasses: `StateSpace`, `TransferFunction` or `ZerosPolesGain`. Changing the value of properties that are not directly part of the current system representation (such as the `zeros` of a `StateSpace` system) is very inefficient and may lead to numerical inaccuracies. It is better to convert to the specific system representation first. For example, call ``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain. If (numerator, denominator) is passed in for ``*system``, coefficients for both the numerator and denominator should be specified in descending exponent order (e.g., ``z^2 + 3z + 5`` would be represented as ``[1, 3, 5]``). .. versionadded:: 0.18.0 Examples -------- >>> from scipy import signal >>> signal.dlti(1, 2, 3, 4) StateSpaceDiscrete( array([[1]]), array([[2]]), array([[3]]), array([[4]]), dt: True ) >>> signal.dlti(1, 2, 3, 4, dt=0.1) StateSpaceDiscrete( array([[1]]), array([[2]]), array([[3]]), array([[4]]), dt: 0.1 ) Construct the transfer function :math:`H(z) = \frac{5(z - 1)(z - 2)}{(z - 3)(z - 4)}` with a sampling time of 0.1 seconds: >>> signal.dlti([1, 2], [3, 4], 5, dt=0.1) ZerosPolesGainDiscrete( array([1, 2]), array([3, 4]), 5, dt: 0.1 ) Construct the transfer function :math:`H(z) = \frac{3z + 4}{1z + 2}` with a sampling time of 0.1 seconds: >>> signal.dlti([3, 4], [1, 2], dt=0.1) TransferFunctionDiscrete( array([3., 4.]), array([1., 2.]), dt: 0.1 ) """ def __new__(cls, *system, **kwargs): """Create an instance of the appropriate subclass.""" if cls is dlti: N = len(system) if N == 2: return TransferFunctionDiscrete.__new__( TransferFunctionDiscrete, *system, **kwargs) elif N == 3: return ZerosPolesGainDiscrete.__new__(ZerosPolesGainDiscrete, *system, **kwargs) elif N == 4: return StateSpaceDiscrete.__new__(StateSpaceDiscrete, *system, **kwargs) else: raise ValueError("`system` needs to be an instance of `dlti` " "or have 2, 3 or 4 arguments.") # __new__ was called from a subclass, let it call its own functions return super().__new__(cls) def __init__(self, *system, **kwargs): """ Initialize the `lti` baseclass. The heavy lifting is done by the subclasses. """ dt = kwargs.pop('dt', True) super().__init__(*system, **kwargs) self.dt = dt @property def dt(self): """Return the sampling time of the system.""" return self._dt @dt.setter def dt(self, dt): self._dt = dt def impulse(self, x0=None, t=None, n=None): """ Return the impulse response of the discrete-time `dlti` system. See `dimpulse` for details. """ return dimpulse(self, x0=x0, t=t, n=n) def step(self, x0=None, t=None, n=None): """ Return the step response of the discrete-time `dlti` system. See `dstep` for details. """ return dstep(self, x0=x0, t=t, n=n) def output(self, u, t, x0=None): """ Return the response of the discrete-time system to input `u`. See `dlsim` for details. """ return dlsim(self, u, t, x0=x0) def bode(self, w=None, n=100): r""" Calculate Bode magnitude and phase data of a discrete-time system. Returns a 3-tuple containing arrays of frequencies [rad/s], magnitude [dB] and phase [deg]. See `dbode` for details. Examples -------- >>> from scipy import signal >>> import matplotlib.pyplot as plt Construct the transfer function :math:`H(z) = \frac{1}{z^2 + 2z + 3}` with sampling time 0.5s: >>> sys = signal.TransferFunction([1], [1, 2, 3], dt=0.5) Equivalent: signal.dbode(sys) >>> w, mag, phase = sys.bode() >>> plt.figure() >>> plt.semilogx(w, mag) # Bode magnitude plot >>> plt.figure() >>> plt.semilogx(w, phase) # Bode phase plot >>> plt.show() """ return dbode(self, w=w, n=n) def freqresp(self, w=None, n=10000, whole=False): """ Calculate the frequency response of a discrete-time system. Returns a 2-tuple containing arrays of frequencies [rad/s] and complex magnitude. See `dfreqresp` for details. """ return dfreqresp(self, w=w, n=n, whole=whole) class TransferFunction(LinearTimeInvariant): r"""Linear Time Invariant system class in transfer function form. Represents the system as the continuous-time transfer function :math:`H(s)=\sum_{i=0}^N b[N-i] s^i / \sum_{j=0}^M a[M-j] s^j` or the discrete-time transfer function :math:`H(z)=\sum_{i=0}^N b[N-i] z^i / \sum_{j=0}^M a[M-j] z^j`, where :math:`b` are elements of the numerator `num`, :math:`a` are elements of the denominator `den`, and ``N == len(b) - 1``, ``M == len(a) - 1``. `TransferFunction` systems inherit additional functionality from the `lti`, respectively the `dlti` classes, depending on which system representation is used. Parameters ---------- *system: arguments The `TransferFunction` class can be instantiated with 1 or 2 arguments. The following gives the number of input arguments and their interpretation: * 1: `lti` or `dlti` system: (`StateSpace`, `TransferFunction` or `ZerosPolesGain`) * 2: array_like: (numerator, denominator) dt: float, optional Sampling time [s] of the discrete-time systems. Defaults to `None` (continuous-time). Must be specified as a keyword argument, for example, ``dt=0.1``. See Also -------- ZerosPolesGain, StateSpace, lti, dlti tf2ss, tf2zpk, tf2sos Notes ----- Changing the value of properties that are not part of the `TransferFunction` system representation (such as the `A`, `B`, `C`, `D` state-space matrices) is very inefficient and may lead to numerical inaccuracies. It is better to convert to the specific system representation first. For example, call ``sys = sys.to_ss()`` before accessing/changing the A, B, C, D system matrices. If (numerator, denominator) is passed in for ``*system``, coefficients for both the numerator and denominator should be specified in descending exponent order (e.g. ``s^2 + 3s + 5`` or ``z^2 + 3z + 5`` would be represented as ``[1, 3, 5]``) Examples -------- Construct the transfer function :math:`H(s) = \frac{s^2 + 3s + 3}{s^2 + 2s + 1}`: >>> from scipy import signal >>> num = [1, 3, 3] >>> den = [1, 2, 1] >>> signal.TransferFunction(num, den) TransferFunctionContinuous( array([1., 3., 3.]), array([1., 2., 1.]), dt: None ) Construct the transfer function :math:`H(z) = \frac{z^2 + 3z + 3}{z^2 + 2z + 1}` with a sampling time of 0.1 seconds: >>> signal.TransferFunction(num, den, dt=0.1) TransferFunctionDiscrete( array([1., 3., 3.]), array([1., 2., 1.]), dt: 0.1 ) """ def __new__(cls, *system, **kwargs): """Handle object conversion if input is an instance of lti.""" if len(system) == 1 and isinstance(system[0], LinearTimeInvariant): return system[0].to_tf() # Choose whether to inherit from `lti` or from `dlti` if cls is TransferFunction: if kwargs.get('dt') is None: return TransferFunctionContinuous.__new__( TransferFunctionContinuous, *system, **kwargs) else: return TransferFunctionDiscrete.__new__( TransferFunctionDiscrete, *system, **kwargs) # No special conversion needed return super().__new__(cls) def __init__(self, *system, **kwargs): """Initialize the state space LTI system.""" # Conversion of lti instances is handled in __new__ if isinstance(system[0], LinearTimeInvariant): return # Remove system arguments, not needed by parents anymore super().__init__(**kwargs) self._num = None self._den = None self.num, self.den = normalize(*system) def __repr__(self): """Return representation of the system's transfer function""" return '{}(\n{},\n{},\ndt: {}\n)'.format( self.__class__.__name__, repr(self.num), repr(self.den), repr(self.dt), ) @property def num(self): """Numerator of the `TransferFunction` system.""" return self._num @num.setter def num(self, num): self._num = atleast_1d(num) # Update dimensions if len(self.num.shape) > 1: self.outputs, self.inputs = self.num.shape else: self.outputs = 1 self.inputs = 1 @property def den(self): """Denominator of the `TransferFunction` system.""" return self._den @den.setter def den(self, den): self._den = atleast_1d(den) def _copy(self, system): """ Copy the parameters of another `TransferFunction` object Parameters ---------- system : `TransferFunction` The `StateSpace` system that is to be copied """ self.num = system.num self.den = system.den def to_tf(self): """ Return a copy of the current `TransferFunction` system. Returns ------- sys : instance of `TransferFunction` The current system (copy) """ return copy.deepcopy(self) def to_zpk(self): """ Convert system representation to `ZerosPolesGain`. Returns ------- sys : instance of `ZerosPolesGain` Zeros, poles, gain representation of the current system """ return ZerosPolesGain(*tf2zpk(self.num, self.den), **self._dt_dict) def to_ss(self): """ Convert system representation to `StateSpace`. Returns ------- sys : instance of `StateSpace` State space model of the current system """ return StateSpace(*tf2ss(self.num, self.den), **self._dt_dict) @staticmethod def _z_to_zinv(num, den): """Change a transfer function from the variable `z` to `z**-1`. Parameters ---------- num, den: 1d array_like Sequences representing the coefficients of the numerator and denominator polynomials, in order of descending degree of 'z'. That is, ``5z**2 + 3z + 2`` is presented as ``[5, 3, 2]``. Returns ------- num, den: 1d array_like Sequences representing the coefficients of the numerator and denominator polynomials, in order of ascending degree of 'z**-1'. That is, ``5 + 3 z**-1 + 2 z**-2`` is presented as ``[5, 3, 2]``. """ diff = len(num) - len(den) if diff > 0: den = np.hstack((np.zeros(diff), den)) elif diff < 0: num = np.hstack((np.zeros(-diff), num)) return num, den @staticmethod def _zinv_to_z(num, den): """Change a transfer function from the variable `z` to `z**-1`. Parameters ---------- num, den: 1d array_like Sequences representing the coefficients of the numerator and denominator polynomials, in order of ascending degree of 'z**-1'. That is, ``5 + 3 z**-1 + 2 z**-2`` is presented as ``[5, 3, 2]``. Returns ------- num, den: 1d array_like Sequences representing the coefficients of the numerator and denominator polynomials, in order of descending degree of 'z'. That is, ``5z**2 + 3z + 2`` is presented as ``[5, 3, 2]``. """ diff = len(num) - len(den) if diff > 0: den = np.hstack((den, np.zeros(diff))) elif diff < 0: num = np.hstack((num, np.zeros(-diff))) return num, den class TransferFunctionContinuous(TransferFunction, lti): r""" Continuous-time Linear Time Invariant system in transfer function form. Represents the system as the transfer function :math:`H(s)=\sum_{i=0}^N b[N-i] s^i / \sum_{j=0}^M a[M-j] s^j`, where :math:`b` are elements of the numerator `num`, :math:`a` are elements of the denominator `den`, and ``N == len(b) - 1``, ``M == len(a) - 1``. Continuous-time `TransferFunction` systems inherit additional functionality from the `lti` class. Parameters ---------- *system: arguments The `TransferFunction` class can be instantiated with 1 or 2 arguments. The following gives the number of input arguments and their interpretation: * 1: `lti` system: (`StateSpace`, `TransferFunction` or `ZerosPolesGain`) * 2: array_like: (numerator, denominator) See Also -------- ZerosPolesGain, StateSpace, lti tf2ss, tf2zpk, tf2sos Notes ----- Changing the value of properties that are not part of the `TransferFunction` system representation (such as the `A`, `B`, `C`, `D` state-space matrices) is very inefficient and may lead to numerical inaccuracies. It is better to convert to the specific system representation first. For example, call ``sys = sys.to_ss()`` before accessing/changing the A, B, C, D system matrices. If (numerator, denominator) is passed in for ``*system``, coefficients for both the numerator and denominator should be specified in descending exponent order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``) Examples -------- Construct the transfer function :math:`H(s) = \frac{s^2 + 3s + 3}{s^2 + 2s + 1}`: >>> from scipy import signal >>> num = [1, 3, 3] >>> den = [1, 2, 1] >>> signal.TransferFunction(num, den) TransferFunctionContinuous( array([ 1., 3., 3.]), array([ 1., 2., 1.]), dt: None ) """ def to_discrete(self, dt, method='zoh', alpha=None): """ Returns the discretized `TransferFunction` system. Parameters: See `cont2discrete` for details. Returns ------- sys: instance of `dlti` and `StateSpace` """ return TransferFunction(*cont2discrete((self.num, self.den), dt, method=method, alpha=alpha)[:-1], dt=dt) class TransferFunctionDiscrete(TransferFunction, dlti): r""" Discrete-time Linear Time Invariant system in transfer function form. Represents the system as the transfer function :math:`H(z)=\sum_{i=0}^N b[N-i] z^i / \sum_{j=0}^M a[M-j] z^j`, where :math:`b` are elements of the numerator `num`, :math:`a` are elements of the denominator `den`, and ``N == len(b) - 1``, ``M == len(a) - 1``. Discrete-time `TransferFunction` systems inherit additional functionality from the `dlti` class. Parameters ---------- *system: arguments The `TransferFunction` class can be instantiated with 1 or 2 arguments. The following gives the number of input arguments and their interpretation: * 1: `dlti` system: (`StateSpace`, `TransferFunction` or `ZerosPolesGain`) * 2: array_like: (numerator, denominator) dt: float, optional Sampling time [s] of the discrete-time systems. Defaults to `True` (unspecified sampling time). Must be specified as a keyword argument, for example, ``dt=0.1``. See Also -------- ZerosPolesGain, StateSpace, dlti tf2ss, tf2zpk, tf2sos Notes ----- Changing the value of properties that are not part of the `TransferFunction` system representation (such as the `A`, `B`, `C`, `D` state-space matrices) is very inefficient and may lead to numerical inaccuracies. If (numerator, denominator) is passed in for ``*system``, coefficients for both the numerator and denominator should be specified in descending exponent order (e.g., ``z^2 + 3z + 5`` would be represented as ``[1, 3, 5]``). Examples -------- Construct the transfer function :math:`H(z) = \frac{z^2 + 3z + 3}{z^2 + 2z + 1}` with a sampling time of 0.5 seconds: >>> from scipy import signal >>> num = [1, 3, 3] >>> den = [1, 2, 1] >>> signal.TransferFunction(num, den, dt=0.5) TransferFunctionDiscrete( array([ 1., 3., 3.]), array([ 1., 2., 1.]), dt: 0.5 ) """ pass class ZerosPolesGain(LinearTimeInvariant): r""" Linear Time Invariant system class in zeros, poles, gain form. Represents the system as the continuous- or discrete-time transfer function :math:`H(s)=k \prod_i (s - z[i]) / \prod_j (s - p[j])`, where :math:`k` is the `gain`, :math:`z` are the `zeros` and :math:`p` are the `poles`. `ZerosPolesGain` systems inherit additional functionality from the `lti`, respectively the `dlti` classes, depending on which system representation is used. Parameters ---------- *system : arguments The `ZerosPolesGain` class can be instantiated with 1 or 3 arguments. The following gives the number of input arguments and their interpretation: * 1: `lti` or `dlti` system: (`StateSpace`, `TransferFunction` or `ZerosPolesGain`) * 3: array_like: (zeros, poles, gain) dt: float, optional Sampling time [s] of the discrete-time systems. Defaults to `None` (continuous-time). Must be specified as a keyword argument, for example, ``dt=0.1``. See Also -------- TransferFunction, StateSpace, lti, dlti zpk2ss, zpk2tf, zpk2sos Notes ----- Changing the value of properties that are not part of the `ZerosPolesGain` system representation (such as the `A`, `B`, `C`, `D` state-space matrices) is very inefficient and may lead to numerical inaccuracies. It is better to convert to the specific system representation first. For example, call ``sys = sys.to_ss()`` before accessing/changing the A, B, C, D system matrices. Examples -------- Construct the transfer function :math:`H(s) = \frac{5(s - 1)(s - 2)}{(s - 3)(s - 4)}`: >>> from scipy import signal >>> signal.ZerosPolesGain([1, 2], [3, 4], 5) ZerosPolesGainContinuous( array([1, 2]), array([3, 4]), 5, dt: None ) Construct the transfer function :math:`H(z) = \frac{5(z - 1)(z - 2)}{(z - 3)(z - 4)}` with a sampling time of 0.1 seconds: >>> signal.ZerosPolesGain([1, 2], [3, 4], 5, dt=0.1) ZerosPolesGainDiscrete( array([1, 2]), array([3, 4]), 5, dt: 0.1 ) """ def __new__(cls, *system, **kwargs): """Handle object conversion if input is an instance of `lti`""" if len(system) == 1 and isinstance(system[0], LinearTimeInvariant): return system[0].to_zpk() # Choose whether to inherit from `lti` or from `dlti` if cls is ZerosPolesGain: if kwargs.get('dt') is None: return ZerosPolesGainContinuous.__new__( ZerosPolesGainContinuous, *system, **kwargs) else: return ZerosPolesGainDiscrete.__new__( ZerosPolesGainDiscrete, *system, **kwargs ) # No special conversion needed return super().__new__(cls) def __init__(self, *system, **kwargs): """Initialize the zeros, poles, gain system.""" # Conversion of lti instances is handled in __new__ if isinstance(system[0], LinearTimeInvariant): return super().__init__(**kwargs) self._zeros = None self._poles = None self._gain = None self.zeros, self.poles, self.gain = system def __repr__(self): """Return representation of the `ZerosPolesGain` system.""" return '{}(\n{},\n{},\n{},\ndt: {}\n)'.format( self.__class__.__name__, repr(self.zeros), repr(self.poles), repr(self.gain), repr(self.dt), ) @property def zeros(self): """Zeros of the `ZerosPolesGain` system.""" return self._zeros @zeros.setter def zeros(self, zeros): self._zeros = atleast_1d(zeros) # Update dimensions if len(self.zeros.shape) > 1: self.outputs, self.inputs = self.zeros.shape else: self.outputs = 1 self.inputs = 1 @property def poles(self): """Poles of the `ZerosPolesGain` system.""" return self._poles @poles.setter def poles(self, poles): self._poles = atleast_1d(poles) @property def gain(self): """Gain of the `ZerosPolesGain` system.""" return self._gain @gain.setter def gain(self, gain): self._gain = gain def _copy(self, system): """ Copy the parameters of another `ZerosPolesGain` system. Parameters ---------- system : instance of `ZerosPolesGain` The zeros, poles gain system that is to be copied """ self.poles = system.poles self.zeros = system.zeros self.gain = system.gain def to_tf(self): """ Convert system representation to `TransferFunction`. Returns ------- sys : instance of `TransferFunction` Transfer function of the current system """ return TransferFunction(*zpk2tf(self.zeros, self.poles, self.gain), **self._dt_dict) def to_zpk(self): """ Return a copy of the current 'ZerosPolesGain' system. Returns ------- sys : instance of `ZerosPolesGain` The current system (copy) """ return copy.deepcopy(self) def to_ss(self): """ Convert system representation to `StateSpace`. Returns ------- sys : instance of `StateSpace` State space model of the current system """ return StateSpace(*zpk2ss(self.zeros, self.poles, self.gain), **self._dt_dict) class ZerosPolesGainContinuous(ZerosPolesGain, lti): r""" Continuous-time Linear Time Invariant system in zeros, poles, gain form. Represents the system as the continuous time transfer function :math:`H(s)=k \prod_i (s - z[i]) / \prod_j (s - p[j])`, where :math:`k` is the `gain`, :math:`z` are the `zeros` and :math:`p` are the `poles`. Continuous-time `ZerosPolesGain` systems inherit additional functionality from the `lti` class. Parameters ---------- *system : arguments The `ZerosPolesGain` class can be instantiated with 1 or 3 arguments. The following gives the number of input arguments and their interpretation: * 1: `lti` system: (`StateSpace`, `TransferFunction` or `ZerosPolesGain`) * 3: array_like: (zeros, poles, gain) See Also -------- TransferFunction, StateSpace, lti zpk2ss, zpk2tf, zpk2sos Notes ----- Changing the value of properties that are not part of the `ZerosPolesGain` system representation (such as the `A`, `B`, `C`, `D` state-space matrices) is very inefficient and may lead to numerical inaccuracies. It is better to convert to the specific system representation first. For example, call ``sys = sys.to_ss()`` before accessing/changing the A, B, C, D system matrices. Examples -------- Construct the transfer function :math:`H(s)=\frac{5(s - 1)(s - 2)}{(s - 3)(s - 4)}`: >>> from scipy import signal >>> signal.ZerosPolesGain([1, 2], [3, 4], 5) ZerosPolesGainContinuous( array([1, 2]), array([3, 4]), 5, dt: None ) """ def to_discrete(self, dt, method='zoh', alpha=None): """ Returns the discretized `ZerosPolesGain` system. Parameters: See `cont2discrete` for details. Returns ------- sys: instance of `dlti` and `ZerosPolesGain` """ return ZerosPolesGain( *cont2discrete((self.zeros, self.poles, self.gain), dt, method=method, alpha=alpha)[:-1], dt=dt) class ZerosPolesGainDiscrete(ZerosPolesGain, dlti): r""" Discrete-time Linear Time Invariant system in zeros, poles, gain form. Represents the system as the discrete-time transfer function :math:`H(z)=k \prod_i (z - q[i]) / \prod_j (z - p[j])`, where :math:`k` is the `gain`, :math:`q` are the `zeros` and :math:`p` are the `poles`. Discrete-time `ZerosPolesGain` systems inherit additional functionality from the `dlti` class. Parameters ---------- *system : arguments The `ZerosPolesGain` class can be instantiated with 1 or 3 arguments. The following gives the number of input arguments and their interpretation: * 1: `dlti` system: (`StateSpace`, `TransferFunction` or `ZerosPolesGain`) * 3: array_like: (zeros, poles, gain) dt: float, optional Sampling time [s] of the discrete-time systems. Defaults to `True` (unspecified sampling time). Must be specified as a keyword argument, for example, ``dt=0.1``. See Also -------- TransferFunction, StateSpace, dlti zpk2ss, zpk2tf, zpk2sos Notes ----- Changing the value of properties that are not part of the `ZerosPolesGain` system representation (such as the `A`, `B`, `C`, `D` state-space matrices) is very inefficient and may lead to numerical inaccuracies. It is better to convert to the specific system representation first. For example, call ``sys = sys.to_ss()`` before accessing/changing the A, B, C, D system matrices. Examples -------- Construct the transfer function :math:`H(s) = \frac{5(s - 1)(s - 2)}{(s - 3)(s - 4)}`: >>> from scipy import signal >>> signal.ZerosPolesGain([1, 2], [3, 4], 5) ZerosPolesGainContinuous( array([1, 2]), array([3, 4]), 5, dt: None ) Construct the transfer function :math:`H(z) = \frac{5(z - 1)(z - 2)}{(z - 3)(z - 4)}` with a sampling time of 0.1 seconds: >>> signal.ZerosPolesGain([1, 2], [3, 4], 5, dt=0.1) ZerosPolesGainDiscrete( array([1, 2]), array([3, 4]), 5, dt: 0.1 ) """ pass class StateSpace(LinearTimeInvariant): r""" Linear Time Invariant system in state-space form. Represents the system as the continuous-time, first order differential equation :math:`\dot{x} = A x + B u` or the discrete-time difference equation :math:`x[k+1] = A x[k] + B u[k]`. `StateSpace` systems inherit additional functionality from the `lti`, respectively the `dlti` classes, depending on which system representation is used. Parameters ---------- *system: arguments The `StateSpace` class can be instantiated with 1 or 4 arguments. The following gives the number of input arguments and their interpretation: * 1: `lti` or `dlti` system: (`StateSpace`, `TransferFunction` or `ZerosPolesGain`) * 4: array_like: (A, B, C, D) dt: float, optional Sampling time [s] of the discrete-time systems. Defaults to `None` (continuous-time). Must be specified as a keyword argument, for example, ``dt=0.1``. See Also -------- TransferFunction, ZerosPolesGain, lti, dlti ss2zpk, ss2tf, zpk2sos Notes ----- Changing the value of properties that are not part of the `StateSpace` system representation (such as `zeros` or `poles`) is very inefficient and may lead to numerical inaccuracies. It is better to convert to the specific system representation first. For example, call ``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain. Examples -------- >>> from scipy import signal >>> import numpy as np >>> a = np.array([[0, 1], [0, 0]]) >>> b = np.array([[0], [1]]) >>> c = np.array([[1, 0]]) >>> d = np.array([[0]]) >>> sys = signal.StateSpace(a, b, c, d) >>> print(sys) StateSpaceContinuous( array([[0, 1], [0, 0]]), array([[0], [1]]), array([[1, 0]]), array([[0]]), dt: None ) >>> sys.to_discrete(0.1) StateSpaceDiscrete( array([[1. , 0.1], [0. , 1. ]]), array([[0.005], [0.1 ]]), array([[1, 0]]), array([[0]]), dt: 0.1 ) >>> a = np.array([[1, 0.1], [0, 1]]) >>> b = np.array([[0.005], [0.1]]) >>> signal.StateSpace(a, b, c, d, dt=0.1) StateSpaceDiscrete( array([[1. , 0.1], [0. , 1. ]]), array([[0.005], [0.1 ]]), array([[1, 0]]), array([[0]]), dt: 0.1 ) """ # Override NumPy binary operations and ufuncs __array_priority__ = 100.0 __array_ufunc__ = None def __new__(cls, *system, **kwargs): """Create new StateSpace object and settle inheritance.""" # Handle object conversion if input is an instance of `lti` if len(system) == 1 and isinstance(system[0], LinearTimeInvariant): return system[0].to_ss() # Choose whether to inherit from `lti` or from `dlti` if cls is StateSpace: if kwargs.get('dt') is None: return StateSpaceContinuous.__new__(StateSpaceContinuous, *system, **kwargs) else: return StateSpaceDiscrete.__new__(StateSpaceDiscrete, *system, **kwargs) # No special conversion needed return super().__new__(cls) def __init__(self, *system, **kwargs): """Initialize the state space lti/dlti system.""" # Conversion of lti instances is handled in __new__ if isinstance(system[0], LinearTimeInvariant): return # Remove system arguments, not needed by parents anymore super().__init__(**kwargs) self._A = None self._B = None self._C = None self._D = None self.A, self.B, self.C, self.D = abcd_normalize(*system) def __repr__(self): """Return representation of the `StateSpace` system.""" return '{}(\n{},\n{},\n{},\n{},\ndt: {}\n)'.format( self.__class__.__name__, repr(self.A), repr(self.B), repr(self.C), repr(self.D), repr(self.dt), ) def _check_binop_other(self, other): return isinstance(other, (StateSpace, np.ndarray, float, complex, np.number, int)) def __mul__(self, other): """ Post-multiply another system or a scalar Handles multiplication of systems in the sense of a frequency domain multiplication. That means, given two systems E1(s) and E2(s), their multiplication, H(s) = E1(s) * E2(s), means that applying H(s) to U(s) is equivalent to first applying E2(s), and then E1(s). Notes ----- For SISO systems the order of system application does not matter. However, for MIMO systems, where the two systems are matrices, the order above ensures standard Matrix multiplication rules apply. """ if not self._check_binop_other(other): return NotImplemented if isinstance(other, StateSpace): # Disallow mix of discrete and continuous systems. if type(other) is not type(self): return NotImplemented if self.dt != other.dt: raise TypeError('Cannot multiply systems with different `dt`.') n1 = self.A.shape[0] n2 = other.A.shape[0] # Interconnection of systems # x1' = A1 x1 + B1 u1 # y1 = C1 x1 + D1 u1 # x2' = A2 x2 + B2 y1 # y2 = C2 x2 + D2 y1 # # Plugging in with u1 = y2 yields # [x1'] [A1 B1*C2 ] [x1] [B1*D2] # [x2'] = [0 A2 ] [x2] + [B2 ] u2 # [x1] # y2 = [C1 D1*C2] [x2] + D1*D2 u2 a = np.vstack((np.hstack((self.A, np.dot(self.B, other.C))), np.hstack((zeros((n2, n1)), other.A)))) b = np.vstack((np.dot(self.B, other.D), other.B)) c = np.hstack((self.C, np.dot(self.D, other.C))) d = np.dot(self.D, other.D) else: # Assume that other is a scalar / matrix # For post multiplication the input gets scaled a = self.A b = np.dot(self.B, other) c = self.C d = np.dot(self.D, other) common_dtype = np.result_type(a.dtype, b.dtype, c.dtype, d.dtype) return StateSpace(np.asarray(a, dtype=common_dtype), np.asarray(b, dtype=common_dtype), np.asarray(c, dtype=common_dtype), np.asarray(d, dtype=common_dtype), **self._dt_dict) def __rmul__(self, other): """Pre-multiply a scalar or matrix (but not StateSpace)""" if not self._check_binop_other(other) or isinstance(other, StateSpace): return NotImplemented # For pre-multiplication only the output gets scaled a = self.A b = self.B c = np.dot(other, self.C) d = np.dot(other, self.D) common_dtype = np.result_type(a.dtype, b.dtype, c.dtype, d.dtype) return StateSpace(np.asarray(a, dtype=common_dtype), np.asarray(b, dtype=common_dtype), np.asarray(c, dtype=common_dtype), np.asarray(d, dtype=common_dtype), **self._dt_dict) def __neg__(self): """Negate the system (equivalent to pre-multiplying by -1).""" return StateSpace(self.A, self.B, -self.C, -self.D, **self._dt_dict) def __add__(self, other): """ Adds two systems in the sense of frequency domain addition. """ if not self._check_binop_other(other): return NotImplemented if isinstance(other, StateSpace): # Disallow mix of discrete and continuous systems. if type(other) is not type(self): raise TypeError('Cannot add {} and {}'.format(type(self), type(other))) if self.dt != other.dt: raise TypeError('Cannot add systems with different `dt`.') # Interconnection of systems # x1' = A1 x1 + B1 u # y1 = C1 x1 + D1 u # x2' = A2 x2 + B2 u # y2 = C2 x2 + D2 u # y = y1 + y2 # # Plugging in yields # [x1'] [A1 0 ] [x1] [B1] # [x2'] = [0 A2] [x2] + [B2] u # [x1] # y = [C1 C2] [x2] + [D1 + D2] u a = linalg.block_diag(self.A, other.A) b = np.vstack((self.B, other.B)) c = np.hstack((self.C, other.C)) d = self.D + other.D else: other = np.atleast_2d(other) if self.D.shape == other.shape: # A scalar/matrix is really just a static system (A=0, B=0, C=0) a = self.A b = self.B c = self.C d = self.D + other else: raise ValueError("Cannot add systems with incompatible " "dimensions ({} and {})" .format(self.D.shape, other.shape)) common_dtype = np.result_type(a.dtype, b.dtype, c.dtype, d.dtype) return StateSpace(np.asarray(a, dtype=common_dtype), np.asarray(b, dtype=common_dtype), np.asarray(c, dtype=common_dtype), np.asarray(d, dtype=common_dtype), **self._dt_dict) def __sub__(self, other): if not self._check_binop_other(other): return NotImplemented return self.__add__(-other) def __radd__(self, other): if not self._check_binop_other(other): return NotImplemented return self.__add__(other) def __rsub__(self, other): if not self._check_binop_other(other): return NotImplemented return (-self).__add__(other) def __truediv__(self, other): """ Divide by a scalar """ # Division by non-StateSpace scalars if not self._check_binop_other(other) or isinstance(other, StateSpace): return NotImplemented if isinstance(other, np.ndarray) and other.ndim > 0: # It's ambiguous what this means, so disallow it raise ValueError("Cannot divide StateSpace by non-scalar numpy arrays") return self.__mul__(1/other) @property def A(self): """State matrix of the `StateSpace` system.""" return self._A @A.setter def A(self, A): self._A = _atleast_2d_or_none(A) @property def B(self): """Input matrix of the `StateSpace` system.""" return self._B @B.setter def B(self, B): self._B = _atleast_2d_or_none(B) self.inputs = self.B.shape[-1] @property def C(self): """Output matrix of the `StateSpace` system.""" return self._C @C.setter def C(self, C): self._C = _atleast_2d_or_none(C) self.outputs = self.C.shape[0] @property def D(self): """Feedthrough matrix of the `StateSpace` system.""" return self._D @D.setter def D(self, D): self._D = _atleast_2d_or_none(D) def _copy(self, system): """ Copy the parameters of another `StateSpace` system. Parameters ---------- system : instance of `StateSpace` The state-space system that is to be copied """ self.A = system.A self.B = system.B self.C = system.C self.D = system.D def to_tf(self, **kwargs): """ Convert system representation to `TransferFunction`. Parameters ---------- kwargs : dict, optional Additional keywords passed to `ss2zpk` Returns ------- sys : instance of `TransferFunction` Transfer function of the current system """ return TransferFunction(*ss2tf(self._A, self._B, self._C, self._D, **kwargs), **self._dt_dict) def to_zpk(self, **kwargs): """ Convert system representation to `ZerosPolesGain`. Parameters ---------- kwargs : dict, optional Additional keywords passed to `ss2zpk` Returns ------- sys : instance of `ZerosPolesGain` Zeros, poles, gain representation of the current system """ return ZerosPolesGain(*ss2zpk(self._A, self._B, self._C, self._D, **kwargs), **self._dt_dict) def to_ss(self): """ Return a copy of the current `StateSpace` system. Returns ------- sys : instance of `StateSpace` The current system (copy) """ return copy.deepcopy(self) class StateSpaceContinuous(StateSpace, lti): r""" Continuous-time Linear Time Invariant system in state-space form. Represents the system as the continuous-time, first order differential equation :math:`\dot{x} = A x + B u`. Continuous-time `StateSpace` systems inherit additional functionality from the `lti` class. Parameters ---------- *system: arguments The `StateSpace` class can be instantiated with 1 or 3 arguments. The following gives the number of input arguments and their interpretation: * 1: `lti` system: (`StateSpace`, `TransferFunction` or `ZerosPolesGain`) * 4: array_like: (A, B, C, D) See Also -------- TransferFunction, ZerosPolesGain, lti ss2zpk, ss2tf, zpk2sos Notes ----- Changing the value of properties that are not part of the `StateSpace` system representation (such as `zeros` or `poles`) is very inefficient and may lead to numerical inaccuracies. It is better to convert to the specific system representation first. For example, call ``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain. Examples -------- >>> from scipy import signal >>> a = np.array([[0, 1], [0, 0]]) >>> b = np.array([[0], [1]]) >>> c = np.array([[1, 0]]) >>> d = np.array([[0]]) >>> sys = signal.StateSpace(a, b, c, d) >>> print(sys) StateSpaceContinuous( array([[0, 1], [0, 0]]), array([[0], [1]]), array([[1, 0]]), array([[0]]), dt: None ) """ def to_discrete(self, dt, method='zoh', alpha=None): """ Returns the discretized `StateSpace` system. Parameters: See `cont2discrete` for details. Returns ------- sys: instance of `dlti` and `StateSpace` """ return StateSpace(*cont2discrete((self.A, self.B, self.C, self.D), dt, method=method, alpha=alpha)[:-1], dt=dt) class StateSpaceDiscrete(StateSpace, dlti): r""" Discrete-time Linear Time Invariant system in state-space form. Represents the system as the discrete-time difference equation :math:`x[k+1] = A x[k] + B u[k]`. `StateSpace` systems inherit additional functionality from the `dlti` class. Parameters ---------- *system: arguments The `StateSpace` class can be instantiated with 1 or 3 arguments. The following gives the number of input arguments and their interpretation: * 1: `dlti` system: (`StateSpace`, `TransferFunction` or `ZerosPolesGain`) * 4: array_like: (A, B, C, D) dt: float, optional Sampling time [s] of the discrete-time systems. Defaults to `True` (unspecified sampling time). Must be specified as a keyword argument, for example, ``dt=0.1``. See Also -------- TransferFunction, ZerosPolesGain, dlti ss2zpk, ss2tf, zpk2sos Notes ----- Changing the value of properties that are not part of the `StateSpace` system representation (such as `zeros` or `poles`) is very inefficient and may lead to numerical inaccuracies. It is better to convert to the specific system representation first. For example, call ``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain. Examples -------- >>> from scipy import signal >>> a = np.array([[1, 0.1], [0, 1]]) >>> b = np.array([[0.005], [0.1]]) >>> c = np.array([[1, 0]]) >>> d = np.array([[0]]) >>> signal.StateSpace(a, b, c, d, dt=0.1) StateSpaceDiscrete( array([[ 1. , 0.1], [ 0. , 1. ]]), array([[ 0.005], [ 0.1 ]]), array([[1, 0]]), array([[0]]), dt: 0.1 ) """ pass def lsim2(system, U=None, T=None, X0=None, **kwargs): """ Simulate output of a continuous-time linear system, by using the ODE solver `scipy.integrate.odeint`. .. deprecated:: 1.11.0 Function `lsim2` is deprecated in favor of the faster `lsim` function. `lsim2` will be removed in SciPy 1.13. Parameters ---------- system : an instance of the `lti` class or a tuple describing the system. The following gives the number of elements in the tuple and the interpretation: * 1: (instance of `lti`) * 2: (num, den) * 3: (zeros, poles, gain) * 4: (A, B, C, D) U : array_like (1D or 2D), optional An input array describing the input at each time T. Linear interpolation is used between given times. If there are multiple inputs, then each column of the rank-2 array represents an input. If U is not given, the input is assumed to be zero. T : array_like (1D or 2D), optional The time steps at which the input is defined and at which the output is desired. The default is 101 evenly spaced points on the interval [0,10.0]. X0 : array_like (1D), optional The initial condition of the state vector. If `X0` is not given, the initial conditions are assumed to be 0. kwargs : dict Additional keyword arguments are passed on to the function `odeint`. See the notes below for more details. Returns ------- T : 1D ndarray The time values for the output. yout : ndarray The response of the system. xout : ndarray The time-evolution of the state-vector. See Also -------- lsim Notes ----- This function uses `scipy.integrate.odeint` to solve the system's differential equations. Additional keyword arguments given to `lsim2` are passed on to `scipy.integrate.odeint`. See the documentation for `scipy.integrate.odeint` for the full list of arguments. As `lsim2` is now deprecated, users are advised to switch to the faster and more accurate `lsim` function. Keyword arguments for `scipy.integrate.odeint` are not supported in `lsim`, but not needed in general. If (num, den) is passed in for ``system``, coefficients for both the numerator and denominator should be specified in descending exponent order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``). Examples -------- We'll use `lsim2` to simulate an analog Bessel filter applied to a signal. >>> import numpy as np >>> from scipy.signal import bessel, lsim2 >>> import matplotlib.pyplot as plt Create a low-pass Bessel filter with a cutoff of 12 Hz. >>> b, a = bessel(N=5, Wn=2*np.pi*12, btype='lowpass', analog=True) Generate data to which the filter is applied. >>> t = np.linspace(0, 1.25, 500, endpoint=False) The input signal is the sum of three sinusoidal curves, with frequencies 4 Hz, 40 Hz, and 80 Hz. The filter should mostly eliminate the 40 Hz and 80 Hz components, leaving just the 4 Hz signal. >>> u = (np.cos(2*np.pi*4*t) + 0.6*np.sin(2*np.pi*40*t) + ... 0.5*np.cos(2*np.pi*80*t)) Simulate the filter with `lsim2`. >>> tout, yout, xout = lsim2((b, a), U=u, T=t) Plot the result. >>> plt.plot(t, u, 'r', alpha=0.5, linewidth=1, label='input') >>> plt.plot(tout, yout, 'k', linewidth=1.5, label='output') >>> plt.legend(loc='best', shadow=True, framealpha=1) >>> plt.grid(alpha=0.3) >>> plt.xlabel('t') >>> plt.show() In a second example, we simulate a double integrator ``y'' = u``, with a constant input ``u = 1``. We'll use the state space representation of the integrator. >>> from scipy.signal import lti >>> A = np.array([[0, 1], [0, 0]]) >>> B = np.array([[0], [1]]) >>> C = np.array([[1, 0]]) >>> D = 0 >>> system = lti(A, B, C, D) `t` and `u` define the time and input signal for the system to be simulated. >>> t = np.linspace(0, 5, num=50) >>> u = np.ones_like(t) Compute the simulation, and then plot `y`. As expected, the plot shows the curve ``y = 0.5*t**2``. >>> tout, y, x = lsim2(system, u, t) >>> plt.plot(t, y) >>> plt.grid(alpha=0.3) >>> plt.xlabel('t') >>> plt.show() """ warnings.warn("lsim2 is deprecated and will be removed from scipy 1.13. " "Use the feature-equivalent lsim function.", DeprecationWarning, stacklevel=2) if isinstance(system, lti): sys = system._as_ss() elif isinstance(system, dlti): raise AttributeError('lsim2 can only be used with continuous-time ' 'systems.') else: sys = lti(*system)._as_ss() if X0 is None: X0 = zeros(sys.B.shape[0], sys.A.dtype) if T is None: # XXX T should really be a required argument, but U was # changed from a required positional argument to a keyword, # and T is after U in the argument list. So we either: change # the API and move T in front of U; check here for T being # None and raise an exception; or assign a default value to T # here. This code implements the latter. T = linspace(0, 10.0, 101) T = atleast_1d(T) if len(T.shape) != 1: raise ValueError("T must be a rank-1 array.") if U is not None: U = atleast_1d(U) if len(U.shape) == 1: U = U.reshape(-1, 1) sU = U.shape if sU[0] != len(T): raise ValueError("U must have the same number of rows " "as elements in T.") if sU[1] != sys.inputs: raise ValueError("The number of inputs in U (%d) is not " "compatible with the number of system " "inputs (%d)" % (sU[1], sys.inputs)) # Create a callable that uses linear interpolation to # calculate the input at any time. ufunc = interpolate.interp1d(T, U, kind='linear', axis=0, bounds_error=False) def fprime(x, t, sys, ufunc): """The vector field of the linear system.""" return dot(sys.A, x) + squeeze(dot(sys.B, nan_to_num(ufunc(t)))) xout = integrate.odeint(fprime, X0, T, args=(sys, ufunc), **kwargs) yout = dot(sys.C, transpose(xout)) + dot(sys.D, transpose(U)) else: def fprime(x, t, sys): """The vector field of the linear system.""" return dot(sys.A, x) xout = integrate.odeint(fprime, X0, T, args=(sys,), **kwargs) yout = dot(sys.C, transpose(xout)) return T, squeeze(transpose(yout)), xout def _cast_to_array_dtype(in1, in2): """Cast array to dtype of other array, while avoiding ComplexWarning. Those can be raised when casting complex to real. """ if numpy.issubdtype(in2.dtype, numpy.float64): # dtype to cast to is not complex, so use .real in1 = in1.real.astype(in2.dtype) else: in1 = in1.astype(in2.dtype) return in1 def lsim(system, U, T, X0=None, interp=True): """ Simulate output of a continuous-time linear system. Parameters ---------- system : an instance of the LTI class or a tuple describing the system. The following gives the number of elements in the tuple and the interpretation: * 1: (instance of `lti`) * 2: (num, den) * 3: (zeros, poles, gain) * 4: (A, B, C, D) U : array_like An input array describing the input at each time `T` (interpolation is assumed between given times). If there are multiple inputs, then each column of the rank-2 array represents an input. If U = 0 or None, a zero input is used. T : array_like The time steps at which the input is defined and at which the output is desired. Must be nonnegative, increasing, and equally spaced. X0 : array_like, optional The initial conditions on the state vector (zero by default). interp : bool, optional Whether to use linear (True, the default) or zero-order-hold (False) interpolation for the input array. Returns ------- T : 1D ndarray Time values for the output. yout : 1D ndarray System response. xout : ndarray Time evolution of the state vector. Notes ----- If (num, den) is passed in for ``system``, coefficients for both the numerator and denominator should be specified in descending exponent order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``). Examples -------- We'll use `lsim` to simulate an analog Bessel filter applied to a signal. >>> import numpy as np >>> from scipy.signal import bessel, lsim >>> import matplotlib.pyplot as plt Create a low-pass Bessel filter with a cutoff of 12 Hz. >>> b, a = bessel(N=5, Wn=2*np.pi*12, btype='lowpass', analog=True) Generate data to which the filter is applied. >>> t = np.linspace(0, 1.25, 500, endpoint=False) The input signal is the sum of three sinusoidal curves, with frequencies 4 Hz, 40 Hz, and 80 Hz. The filter should mostly eliminate the 40 Hz and 80 Hz components, leaving just the 4 Hz signal. >>> u = (np.cos(2*np.pi*4*t) + 0.6*np.sin(2*np.pi*40*t) + ... 0.5*np.cos(2*np.pi*80*t)) Simulate the filter with `lsim`. >>> tout, yout, xout = lsim((b, a), U=u, T=t) Plot the result. >>> plt.plot(t, u, 'r', alpha=0.5, linewidth=1, label='input') >>> plt.plot(tout, yout, 'k', linewidth=1.5, label='output') >>> plt.legend(loc='best', shadow=True, framealpha=1) >>> plt.grid(alpha=0.3) >>> plt.xlabel('t') >>> plt.show() In a second example, we simulate a double integrator ``y'' = u``, with a constant input ``u = 1``. We'll use the state space representation of the integrator. >>> from scipy.signal import lti >>> A = np.array([[0.0, 1.0], [0.0, 0.0]]) >>> B = np.array([[0.0], [1.0]]) >>> C = np.array([[1.0, 0.0]]) >>> D = 0.0 >>> system = lti(A, B, C, D) `t` and `u` define the time and input signal for the system to be simulated. >>> t = np.linspace(0, 5, num=50) >>> u = np.ones_like(t) Compute the simulation, and then plot `y`. As expected, the plot shows the curve ``y = 0.5*t**2``. >>> tout, y, x = lsim(system, u, t) >>> plt.plot(t, y) >>> plt.grid(alpha=0.3) >>> plt.xlabel('t') >>> plt.show() """ if isinstance(system, lti): sys = system._as_ss() elif isinstance(system, dlti): raise AttributeError('lsim can only be used with continuous-time ' 'systems.') else: sys = lti(*system)._as_ss() T = atleast_1d(T) if len(T.shape) != 1: raise ValueError("T must be a rank-1 array.") A, B, C, D = map(np.asarray, (sys.A, sys.B, sys.C, sys.D)) n_states = A.shape[0] n_inputs = B.shape[1] n_steps = T.size if X0 is None: X0 = zeros(n_states, sys.A.dtype) xout = np.empty((n_steps, n_states), sys.A.dtype) if T[0] == 0: xout[0] = X0 elif T[0] > 0: # step forward to initial time, with zero input xout[0] = dot(X0, linalg.expm(transpose(A) * T[0])) else: raise ValueError("Initial time must be nonnegative") no_input = (U is None or (isinstance(U, (int, float)) and U == 0.) or not np.any(U)) if n_steps == 1: yout = squeeze(xout @ C.T) if not no_input: yout += squeeze(U @ D.T) return T, yout, squeeze(xout) dt = T[1] - T[0] if not np.allclose(np.diff(T), dt): raise ValueError("Time steps are not equally spaced.") if no_input: # Zero input: just use matrix exponential # take transpose because state is a row vector expAT_dt = linalg.expm(A.T * dt) for i in range(1, n_steps): xout[i] = xout[i-1] @ expAT_dt yout = squeeze(xout @ C.T) return T, yout, squeeze(xout) # Nonzero input U = atleast_1d(U) if U.ndim == 1: U = U[:, np.newaxis] if U.shape[0] != n_steps: raise ValueError("U must have the same number of rows " "as elements in T.") if U.shape[1] != n_inputs: raise ValueError("System does not define that many inputs.") if not interp: # Zero-order hold # Algorithm: to integrate from time 0 to time dt, we solve # xdot = A x + B u, x(0) = x0 # udot = 0, u(0) = u0. # # Solution is # [ x(dt) ] [ A*dt B*dt ] [ x0 ] # [ u(dt) ] = exp [ 0 0 ] [ u0 ] M = np.vstack([np.hstack([A * dt, B * dt]), np.zeros((n_inputs, n_states + n_inputs))]) # transpose everything because the state and input are row vectors expMT = linalg.expm(M.T) Ad = expMT[:n_states, :n_states] Bd = expMT[n_states:, :n_states] for i in range(1, n_steps): xout[i] = xout[i-1] @ Ad + U[i-1] @ Bd else: # Linear interpolation between steps # Algorithm: to integrate from time 0 to time dt, with linear # interpolation between inputs u(0) = u0 and u(dt) = u1, we solve # xdot = A x + B u, x(0) = x0 # udot = (u1 - u0) / dt, u(0) = u0. # # Solution is # [ x(dt) ] [ A*dt B*dt 0 ] [ x0 ] # [ u(dt) ] = exp [ 0 0 I ] [ u0 ] # [u1 - u0] [ 0 0 0 ] [u1 - u0] M = np.vstack([np.hstack([A * dt, B * dt, np.zeros((n_states, n_inputs))]), np.hstack([np.zeros((n_inputs, n_states + n_inputs)), np.identity(n_inputs)]), np.zeros((n_inputs, n_states + 2 * n_inputs))]) expMT = linalg.expm(M.T) Ad = expMT[:n_states, :n_states] Bd1 = expMT[n_states+n_inputs:, :n_states] Bd0 = expMT[n_states:n_states + n_inputs, :n_states] - Bd1 for i in range(1, n_steps): xout[i] = xout[i-1] @ Ad + U[i-1] @ Bd0 + U[i] @ Bd1 yout = squeeze(xout @ C.T) + squeeze(U @ D.T) return T, yout, squeeze(xout) def _default_response_times(A, n): """Compute a reasonable set of time samples for the response time. This function is used by `impulse`, `impulse2`, `step` and `step2` to compute the response time when the `T` argument to the function is None. Parameters ---------- A : array_like The system matrix, which is square. n : int The number of time samples to generate. Returns ------- t : ndarray The 1-D array of length `n` of time samples at which the response is to be computed. """ # Create a reasonable time interval. # TODO: This could use some more work. # For example, what is expected when the system is unstable? vals = linalg.eigvals(A) r = min(abs(real(vals))) if r == 0.0: r = 1.0 tc = 1.0 / r t = linspace(0.0, 7 * tc, n) return t def impulse(system, X0=None, T=None, N=None): """Impulse response of continuous-time system. Parameters ---------- system : an instance of the LTI class or a tuple of array_like describing the system. The following gives the number of elements in the tuple and the interpretation: * 1 (instance of `lti`) * 2 (num, den) * 3 (zeros, poles, gain) * 4 (A, B, C, D) X0 : array_like, optional Initial state-vector. Defaults to zero. T : array_like, optional Time points. Computed if not given. N : int, optional The number of time points to compute (if `T` is not given). Returns ------- T : ndarray A 1-D array of time points. yout : ndarray A 1-D array containing the impulse response of the system (except for singularities at zero). Notes ----- If (num, den) is passed in for ``system``, coefficients for both the numerator and denominator should be specified in descending exponent order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``). Examples -------- Compute the impulse response of a second order system with a repeated root: ``x''(t) + 2*x'(t) + x(t) = u(t)`` >>> from scipy import signal >>> system = ([1.0], [1.0, 2.0, 1.0]) >>> t, y = signal.impulse(system) >>> import matplotlib.pyplot as plt >>> plt.plot(t, y) """ if isinstance(system, lti): sys = system._as_ss() elif isinstance(system, dlti): raise AttributeError('impulse can only be used with continuous-time ' 'systems.') else: sys = lti(*system)._as_ss() if X0 is None: X = squeeze(sys.B) else: X = squeeze(sys.B + X0) if N is None: N = 100 if T is None: T = _default_response_times(sys.A, N) else: T = asarray(T) _, h, _ = lsim(sys, 0., T, X, interp=False) return T, h def impulse2(system, X0=None, T=None, N=None, **kwargs): """ Impulse response of a single-input, continuous-time linear system. .. deprecated:: 1.11.0 Function `impulse2` is deprecated in favor of the faster `impulse` function. `impulse2` will be removed in SciPy 1.13. Parameters ---------- system : an instance of the LTI class or a tuple of array_like describing the system. The following gives the number of elements in the tuple and the interpretation: * 1 (instance of `lti`) * 2 (num, den) * 3 (zeros, poles, gain) * 4 (A, B, C, D) X0 : 1-D array_like, optional The initial condition of the state vector. Default: 0 (the zero vector). T : 1-D array_like, optional The time steps at which the input is defined and at which the output is desired. If `T` is not given, the function will generate a set of time samples automatically. N : int, optional Number of time points to compute. Default: 100. kwargs : various types Additional keyword arguments are passed on to the function `scipy.signal.lsim2`, which in turn passes them on to `scipy.integrate.odeint`; see the latter's documentation for information about these arguments. Returns ------- T : ndarray The time values for the output. yout : ndarray The output response of the system. See Also -------- impulse, lsim2, scipy.integrate.odeint Notes ----- The solution is generated by calling `scipy.signal.lsim2`, which uses the differential equation solver `scipy.integrate.odeint`. As `impulse2` is now deprecated, users are advised to switch to the faster and more accurate `impulse` function. Keyword arguments for `scipy.integrate.odeint` are not supported in `impulse`, but not needed in general. If (num, den) is passed in for ``system``, coefficients for both the numerator and denominator should be specified in descending exponent order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``). .. versionadded:: 0.8.0 Examples -------- Compute the impulse response of a second order system with a repeated root: ``x''(t) + 2*x'(t) + x(t) = u(t)`` >>> from scipy import signal >>> system = ([1.0], [1.0, 2.0, 1.0]) >>> t, y = signal.impulse2(system) >>> import matplotlib.pyplot as plt >>> plt.plot(t, y) """ warnings.warn("impulse2 is deprecated and will be removed from " "scipy 1.13. Use the feature-equivalent impulse function.", DeprecationWarning, stacklevel=2) if isinstance(system, lti): sys = system._as_ss() elif isinstance(system, dlti): raise AttributeError('impulse2 can only be used with continuous-time ' 'systems.') else: sys = lti(*system)._as_ss() B = sys.B if B.shape[-1] != 1: raise ValueError("impulse2() requires a single-input system.") B = B.squeeze() if X0 is None: X0 = zeros_like(B) if N is None: N = 100 if T is None: T = _default_response_times(sys.A, N) # Move the impulse in the input to the initial conditions, and then # solve using lsim2(). ic = B + X0 with suppress_warnings() as sup: sup.filter(DeprecationWarning, "lsim2 is deprecated and will be removed from scipy 1.13. " "Use the feature-equivalent lsim function.") Tr, Yr, Xr = lsim2(sys, T=T, X0=ic, **kwargs) return Tr, Yr def step(system, X0=None, T=None, N=None): """Step response of continuous-time system. Parameters ---------- system : an instance of the LTI class or a tuple of array_like describing the system. The following gives the number of elements in the tuple and the interpretation: * 1 (instance of `lti`) * 2 (num, den) * 3 (zeros, poles, gain) * 4 (A, B, C, D) X0 : array_like, optional Initial state-vector (default is zero). T : array_like, optional Time points (computed if not given). N : int, optional Number of time points to compute if `T` is not given. Returns ------- T : 1D ndarray Output time points. yout : 1D ndarray Step response of system. Notes ----- If (num, den) is passed in for ``system``, coefficients for both the numerator and denominator should be specified in descending exponent order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``). Examples -------- >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> lti = signal.lti([1.0], [1.0, 1.0]) >>> t, y = signal.step(lti) >>> plt.plot(t, y) >>> plt.xlabel('Time [s]') >>> plt.ylabel('Amplitude') >>> plt.title('Step response for 1. Order Lowpass') >>> plt.grid() """ if isinstance(system, lti): sys = system._as_ss() elif isinstance(system, dlti): raise AttributeError('step can only be used with continuous-time ' 'systems.') else: sys = lti(*system)._as_ss() if N is None: N = 100 if T is None: T = _default_response_times(sys.A, N) else: T = asarray(T) U = ones(T.shape, sys.A.dtype) vals = lsim(sys, U, T, X0=X0, interp=False) return vals[0], vals[1] def step2(system, X0=None, T=None, N=None, **kwargs): """Step response of continuous-time system. This function is functionally the same as `scipy.signal.step`, but it uses the function `scipy.signal.lsim2` to compute the step response. .. deprecated:: 1.11.0 Function `step2` is deprecated in favor of the faster `step` function. `step2` will be removed in SciPy 1.13. Parameters ---------- system : an instance of the LTI class or a tuple of array_like describing the system. The following gives the number of elements in the tuple and the interpretation: * 1 (instance of `lti`) * 2 (num, den) * 3 (zeros, poles, gain) * 4 (A, B, C, D) X0 : array_like, optional Initial state-vector (default is zero). T : array_like, optional Time points (computed if not given). N : int, optional Number of time points to compute if `T` is not given. kwargs : various types Additional keyword arguments are passed on the function `scipy.signal.lsim2`, which in turn passes them on to `scipy.integrate.odeint`. See the documentation for `scipy.integrate.odeint` for information about these arguments. Returns ------- T : 1D ndarray Output time points. yout : 1D ndarray Step response of system. See Also -------- scipy.signal.step Notes ----- As `step2` is now deprecated, users are advised to switch to the faster and more accurate `step` function. Keyword arguments for `scipy.integrate.odeint` are not supported in `step`, but not needed in general. If (num, den) is passed in for ``system``, coefficients for both the numerator and denominator should be specified in descending exponent order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``). .. versionadded:: 0.8.0 Examples -------- >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> lti = signal.lti([1.0], [1.0, 1.0]) >>> t, y = signal.step2(lti) >>> plt.plot(t, y) >>> plt.xlabel('Time [s]') >>> plt.ylabel('Amplitude') >>> plt.title('Step response for 1. Order Lowpass') >>> plt.grid() """ warnings.warn("step2 is deprecated and will be removed from scipy 1.13. " "Use the feature-equivalent step function.", DeprecationWarning, stacklevel=2) if isinstance(system, lti): sys = system._as_ss() elif isinstance(system, dlti): raise AttributeError('step2 can only be used with continuous-time ' 'systems.') else: sys = lti(*system)._as_ss() if N is None: N = 100 if T is None: T = _default_response_times(sys.A, N) else: T = asarray(T) U = ones(T.shape, sys.A.dtype) with suppress_warnings() as sup: sup.filter(DeprecationWarning, "lsim2 is deprecated and will be removed from scipy 1.13. " "Use the feature-equivalent lsim function.") vals = lsim2(sys, U, T, X0=X0, **kwargs) return vals[0], vals[1] def bode(system, w=None, n=100): """ Calculate Bode magnitude and phase data of a continuous-time system. Parameters ---------- system : an instance of the LTI class or a tuple describing the system. The following gives the number of elements in the tuple and the interpretation: * 1 (instance of `lti`) * 2 (num, den) * 3 (zeros, poles, gain) * 4 (A, B, C, D) w : array_like, optional Array of frequencies (in rad/s). Magnitude and phase data is calculated for every value in this array. If not given a reasonable set will be calculated. n : int, optional Number of frequency points to compute if `w` is not given. The `n` frequencies are logarithmically spaced in an interval chosen to include the influence of the poles and zeros of the system. Returns ------- w : 1D ndarray Frequency array [rad/s] mag : 1D ndarray Magnitude array [dB] phase : 1D ndarray Phase array [deg] Notes ----- If (num, den) is passed in for ``system``, coefficients for both the numerator and denominator should be specified in descending exponent order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``). .. versionadded:: 0.11.0 Examples -------- >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> sys = signal.TransferFunction([1], [1, 1]) >>> w, mag, phase = signal.bode(sys) >>> plt.figure() >>> plt.semilogx(w, mag) # Bode magnitude plot >>> plt.figure() >>> plt.semilogx(w, phase) # Bode phase plot >>> plt.show() """ w, y = freqresp(system, w=w, n=n) mag = 20.0 * numpy.log10(abs(y)) phase = numpy.unwrap(numpy.arctan2(y.imag, y.real)) * 180.0 / numpy.pi return w, mag, phase def freqresp(system, w=None, n=10000): r"""Calculate the frequency response of a continuous-time system. Parameters ---------- system : an instance of the `lti` class or a tuple describing the system. The following gives the number of elements in the tuple and the interpretation: * 1 (instance of `lti`) * 2 (num, den) * 3 (zeros, poles, gain) * 4 (A, B, C, D) w : array_like, optional Array of frequencies (in rad/s). Magnitude and phase data is calculated for every value in this array. If not given, a reasonable set will be calculated. n : int, optional Number of frequency points to compute if `w` is not given. The `n` frequencies are logarithmically spaced in an interval chosen to include the influence of the poles and zeros of the system. Returns ------- w : 1D ndarray Frequency array [rad/s] H : 1D ndarray Array of complex magnitude values Notes ----- If (num, den) is passed in for ``system``, coefficients for both the numerator and denominator should be specified in descending exponent order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``). Examples -------- Generating the Nyquist plot of a transfer function >>> from scipy import signal >>> import matplotlib.pyplot as plt Construct the transfer function :math:`H(s) = \frac{5}{(s-1)^3}`: >>> s1 = signal.ZerosPolesGain([], [1, 1, 1], [5]) >>> w, H = signal.freqresp(s1) >>> plt.figure() >>> plt.plot(H.real, H.imag, "b") >>> plt.plot(H.real, -H.imag, "r") >>> plt.show() """ if isinstance(system, lti): if isinstance(system, (TransferFunction, ZerosPolesGain)): sys = system else: sys = system._as_zpk() elif isinstance(system, dlti): raise AttributeError('freqresp can only be used with continuous-time ' 'systems.') else: sys = lti(*system)._as_zpk() if sys.inputs != 1 or sys.outputs != 1: raise ValueError("freqresp() requires a SISO (single input, single " "output) system.") if w is not None: worN = w else: worN = n if isinstance(sys, TransferFunction): # In the call to freqs(), sys.num.ravel() is used because there are # cases where sys.num is a 2-D array with a single row. w, h = freqs(sys.num.ravel(), sys.den, worN=worN) elif isinstance(sys, ZerosPolesGain): w, h = freqs_zpk(sys.zeros, sys.poles, sys.gain, worN=worN) return w, h # This class will be used by place_poles to return its results # see https://code.activestate.com/recipes/52308/ class Bunch: def __init__(self, **kwds): self.__dict__.update(kwds) def _valid_inputs(A, B, poles, method, rtol, maxiter): """ Check the poles come in complex conjugage pairs Check shapes of A, B and poles are compatible. Check the method chosen is compatible with provided poles Return update method to use and ordered poles """ poles = np.asarray(poles) if poles.ndim > 1: raise ValueError("Poles must be a 1D array like.") # Will raise ValueError if poles do not come in complex conjugates pairs poles = _order_complex_poles(poles) if A.ndim > 2: raise ValueError("A must be a 2D array/matrix.") if B.ndim > 2: raise ValueError("B must be a 2D array/matrix") if A.shape[0] != A.shape[1]: raise ValueError("A must be square") if len(poles) > A.shape[0]: raise ValueError("maximum number of poles is %d but you asked for %d" % (A.shape[0], len(poles))) if len(poles) < A.shape[0]: raise ValueError("number of poles is %d but you should provide %d" % (len(poles), A.shape[0])) r = np.linalg.matrix_rank(B) for p in poles: if sum(p == poles) > r: raise ValueError("at least one of the requested pole is repeated " "more than rank(B) times") # Choose update method update_loop = _YT_loop if method not in ('KNV0','YT'): raise ValueError("The method keyword must be one of 'YT' or 'KNV0'") if method == "KNV0": update_loop = _KNV0_loop if not all(np.isreal(poles)): raise ValueError("Complex poles are not supported by KNV0") if maxiter < 1: raise ValueError("maxiter must be at least equal to 1") # We do not check rtol <= 0 as the user can use a negative rtol to # force maxiter iterations if rtol > 1: raise ValueError("rtol can not be greater than 1") return update_loop, poles def _order_complex_poles(poles): """ Check we have complex conjugates pairs and reorder P according to YT, ie real_poles, complex_i, conjugate complex_i, .... The lexicographic sort on the complex poles is added to help the user to compare sets of poles. """ ordered_poles = np.sort(poles[np.isreal(poles)]) im_poles = [] for p in np.sort(poles[np.imag(poles) < 0]): if np.conj(p) in poles: im_poles.extend((p, np.conj(p))) ordered_poles = np.hstack((ordered_poles, im_poles)) if poles.shape[0] != len(ordered_poles): raise ValueError("Complex poles must come with their conjugates") return ordered_poles def _KNV0(B, ker_pole, transfer_matrix, j, poles): """ Algorithm "KNV0" Kautsky et Al. Robust pole assignment in linear state feedback, Int journal of Control 1985, vol 41 p 1129->1155 https://la.epfl.ch/files/content/sites/la/files/ users/105941/public/KautskyNicholsDooren """ # Remove xj form the base transfer_matrix_not_j = np.delete(transfer_matrix, j, axis=1) # If we QR this matrix in full mode Q=Q0|Q1 # then Q1 will be a single column orthogonnal to # Q0, that's what we are looking for ! # After merge of gh-4249 great speed improvements could be achieved # using QR updates instead of full QR in the line below # To debug with numpy qr uncomment the line below # Q, R = np.linalg.qr(transfer_matrix_not_j, mode="complete") Q, R = s_qr(transfer_matrix_not_j, mode="full") mat_ker_pj = np.dot(ker_pole[j], ker_pole[j].T) yj = np.dot(mat_ker_pj, Q[:, -1]) # If Q[:, -1] is "almost" orthogonal to ker_pole[j] its # projection into ker_pole[j] will yield a vector # close to 0. As we are looking for a vector in ker_pole[j] # simply stick with transfer_matrix[:, j] (unless someone provides me with # a better choice ?) if not np.allclose(yj, 0): xj = yj/np.linalg.norm(yj) transfer_matrix[:, j] = xj # KNV does not support complex poles, using YT technique the two lines # below seem to work 9 out of 10 times but it is not reliable enough: # transfer_matrix[:, j]=real(xj) # transfer_matrix[:, j+1]=imag(xj) # Add this at the beginning of this function if you wish to test # complex support: # if ~np.isreal(P[j]) and (j>=B.shape[0]-1 or P[j]!=np.conj(P[j+1])): # return # Problems arise when imag(xj)=>0 I have no idea on how to fix this def _YT_real(ker_pole, Q, transfer_matrix, i, j): """ Applies algorithm from YT section 6.1 page 19 related to real pairs """ # step 1 page 19 u = Q[:, -2, np.newaxis] v = Q[:, -1, np.newaxis] # step 2 page 19 m = np.dot(np.dot(ker_pole[i].T, np.dot(u, v.T) - np.dot(v, u.T)), ker_pole[j]) # step 3 page 19 um, sm, vm = np.linalg.svd(m) # mu1, mu2 two first columns of U => 2 first lines of U.T mu1, mu2 = um.T[:2, :, np.newaxis] # VM is V.T with numpy we want the first two lines of V.T nu1, nu2 = vm[:2, :, np.newaxis] # what follows is a rough python translation of the formulas # in section 6.2 page 20 (step 4) transfer_matrix_j_mo_transfer_matrix_j = np.vstack(( transfer_matrix[:, i, np.newaxis], transfer_matrix[:, j, np.newaxis])) if not np.allclose(sm[0], sm[1]): ker_pole_imo_mu1 = np.dot(ker_pole[i], mu1) ker_pole_i_nu1 = np.dot(ker_pole[j], nu1) ker_pole_mu_nu = np.vstack((ker_pole_imo_mu1, ker_pole_i_nu1)) else: ker_pole_ij = np.vstack(( np.hstack((ker_pole[i], np.zeros(ker_pole[i].shape))), np.hstack((np.zeros(ker_pole[j].shape), ker_pole[j])) )) mu_nu_matrix = np.vstack( (np.hstack((mu1, mu2)), np.hstack((nu1, nu2))) ) ker_pole_mu_nu = np.dot(ker_pole_ij, mu_nu_matrix) transfer_matrix_ij = np.dot(np.dot(ker_pole_mu_nu, ker_pole_mu_nu.T), transfer_matrix_j_mo_transfer_matrix_j) if not np.allclose(transfer_matrix_ij, 0): transfer_matrix_ij = (np.sqrt(2)*transfer_matrix_ij / np.linalg.norm(transfer_matrix_ij)) transfer_matrix[:, i] = transfer_matrix_ij[ :transfer_matrix[:, i].shape[0], 0 ] transfer_matrix[:, j] = transfer_matrix_ij[ transfer_matrix[:, i].shape[0]:, 0 ] else: # As in knv0 if transfer_matrix_j_mo_transfer_matrix_j is orthogonal to # Vect{ker_pole_mu_nu} assign transfer_matrixi/transfer_matrix_j to # ker_pole_mu_nu and iterate. As we are looking for a vector in # Vect{Matker_pole_MU_NU} (see section 6.1 page 19) this might help # (that's a guess, not a claim !) transfer_matrix[:, i] = ker_pole_mu_nu[ :transfer_matrix[:, i].shape[0], 0 ] transfer_matrix[:, j] = ker_pole_mu_nu[ transfer_matrix[:, i].shape[0]:, 0 ] def _YT_complex(ker_pole, Q, transfer_matrix, i, j): """ Applies algorithm from YT section 6.2 page 20 related to complex pairs """ # step 1 page 20 ur = np.sqrt(2)*Q[:, -2, np.newaxis] ui = np.sqrt(2)*Q[:, -1, np.newaxis] u = ur + 1j*ui # step 2 page 20 ker_pole_ij = ker_pole[i] m = np.dot(np.dot(np.conj(ker_pole_ij.T), np.dot(u, np.conj(u).T) - np.dot(np.conj(u), u.T)), ker_pole_ij) # step 3 page 20 e_val, e_vec = np.linalg.eig(m) # sort eigenvalues according to their module e_val_idx = np.argsort(np.abs(e_val)) mu1 = e_vec[:, e_val_idx[-1], np.newaxis] mu2 = e_vec[:, e_val_idx[-2], np.newaxis] # what follows is a rough python translation of the formulas # in section 6.2 page 20 (step 4) # remember transfer_matrix_i has been split as # transfer_matrix[i]=real(transfer_matrix_i) and # transfer_matrix[j]=imag(transfer_matrix_i) transfer_matrix_j_mo_transfer_matrix_j = ( transfer_matrix[:, i, np.newaxis] + 1j*transfer_matrix[:, j, np.newaxis] ) if not np.allclose(np.abs(e_val[e_val_idx[-1]]), np.abs(e_val[e_val_idx[-2]])): ker_pole_mu = np.dot(ker_pole_ij, mu1) else: mu1_mu2_matrix = np.hstack((mu1, mu2)) ker_pole_mu = np.dot(ker_pole_ij, mu1_mu2_matrix) transfer_matrix_i_j = np.dot(np.dot(ker_pole_mu, np.conj(ker_pole_mu.T)), transfer_matrix_j_mo_transfer_matrix_j) if not np.allclose(transfer_matrix_i_j, 0): transfer_matrix_i_j = (transfer_matrix_i_j / np.linalg.norm(transfer_matrix_i_j)) transfer_matrix[:, i] = np.real(transfer_matrix_i_j[:, 0]) transfer_matrix[:, j] = np.imag(transfer_matrix_i_j[:, 0]) else: # same idea as in YT_real transfer_matrix[:, i] = np.real(ker_pole_mu[:, 0]) transfer_matrix[:, j] = np.imag(ker_pole_mu[:, 0]) def _YT_loop(ker_pole, transfer_matrix, poles, B, maxiter, rtol): """ Algorithm "YT" Tits, Yang. Globally Convergent Algorithms for Robust Pole Assignment by State Feedback https://hdl.handle.net/1903/5598 The poles P have to be sorted accordingly to section 6.2 page 20 """ # The IEEE edition of the YT paper gives useful information on the # optimal update order for the real poles in order to minimize the number # of times we have to loop over all poles, see page 1442 nb_real = poles[np.isreal(poles)].shape[0] # hnb => Half Nb Real hnb = nb_real // 2 # Stick to the indices in the paper and then remove one to get numpy array # index it is a bit easier to link the code to the paper this way even if it # is not very clean. The paper is unclear about what should be done when # there is only one real pole => use KNV0 on this real pole seem to work if nb_real > 0: #update the biggest real pole with the smallest one update_order = [[nb_real], [1]] else: update_order = [[],[]] r_comp = np.arange(nb_real+1, len(poles)+1, 2) # step 1.a r_p = np.arange(1, hnb+nb_real % 2) update_order[0].extend(2*r_p) update_order[1].extend(2*r_p+1) # step 1.b update_order[0].extend(r_comp) update_order[1].extend(r_comp+1) # step 1.c r_p = np.arange(1, hnb+1) update_order[0].extend(2*r_p-1) update_order[1].extend(2*r_p) # step 1.d if hnb == 0 and np.isreal(poles[0]): update_order[0].append(1) update_order[1].append(1) update_order[0].extend(r_comp) update_order[1].extend(r_comp+1) # step 2.a r_j = np.arange(2, hnb+nb_real % 2) for j in r_j: for i in range(1, hnb+1): update_order[0].append(i) update_order[1].append(i+j) # step 2.b if hnb == 0 and np.isreal(poles[0]): update_order[0].append(1) update_order[1].append(1) update_order[0].extend(r_comp) update_order[1].extend(r_comp+1) # step 2.c r_j = np.arange(2, hnb+nb_real % 2) for j in r_j: for i in range(hnb+1, nb_real+1): idx_1 = i+j if idx_1 > nb_real: idx_1 = i+j-nb_real update_order[0].append(i) update_order[1].append(idx_1) # step 2.d if hnb == 0 and np.isreal(poles[0]): update_order[0].append(1) update_order[1].append(1) update_order[0].extend(r_comp) update_order[1].extend(r_comp+1) # step 3.a for i in range(1, hnb+1): update_order[0].append(i) update_order[1].append(i+hnb) # step 3.b if hnb == 0 and np.isreal(poles[0]): update_order[0].append(1) update_order[1].append(1) update_order[0].extend(r_comp) update_order[1].extend(r_comp+1) update_order = np.array(update_order).T-1 stop = False nb_try = 0 while nb_try < maxiter and not stop: det_transfer_matrixb = np.abs(np.linalg.det(transfer_matrix)) for i, j in update_order: if i == j: assert i == 0, "i!=0 for KNV call in YT" assert np.isreal(poles[i]), "calling KNV on a complex pole" _KNV0(B, ker_pole, transfer_matrix, i, poles) else: transfer_matrix_not_i_j = np.delete(transfer_matrix, (i, j), axis=1) # after merge of gh-4249 great speed improvements could be # achieved using QR updates instead of full QR in the line below #to debug with numpy qr uncomment the line below #Q, _ = np.linalg.qr(transfer_matrix_not_i_j, mode="complete") Q, _ = s_qr(transfer_matrix_not_i_j, mode="full") if np.isreal(poles[i]): assert np.isreal(poles[j]), "mixing real and complex " + \ "in YT_real" + str(poles) _YT_real(ker_pole, Q, transfer_matrix, i, j) else: assert ~np.isreal(poles[i]), "mixing real and complex " + \ "in YT_real" + str(poles) _YT_complex(ker_pole, Q, transfer_matrix, i, j) det_transfer_matrix = np.max((np.sqrt(np.spacing(1)), np.abs(np.linalg.det(transfer_matrix)))) cur_rtol = np.abs( (det_transfer_matrix - det_transfer_matrixb) / det_transfer_matrix) if cur_rtol < rtol and det_transfer_matrix > np.sqrt(np.spacing(1)): # Convergence test from YT page 21 stop = True nb_try += 1 return stop, cur_rtol, nb_try def _KNV0_loop(ker_pole, transfer_matrix, poles, B, maxiter, rtol): """ Loop over all poles one by one and apply KNV method 0 algorithm """ # This method is useful only because we need to be able to call # _KNV0 from YT without looping over all poles, otherwise it would # have been fine to mix _KNV0_loop and _KNV0 in a single function stop = False nb_try = 0 while nb_try < maxiter and not stop: det_transfer_matrixb = np.abs(np.linalg.det(transfer_matrix)) for j in range(B.shape[0]): _KNV0(B, ker_pole, transfer_matrix, j, poles) det_transfer_matrix = np.max((np.sqrt(np.spacing(1)), np.abs(np.linalg.det(transfer_matrix)))) cur_rtol = np.abs((det_transfer_matrix - det_transfer_matrixb) / det_transfer_matrix) if cur_rtol < rtol and det_transfer_matrix > np.sqrt(np.spacing(1)): # Convergence test from YT page 21 stop = True nb_try += 1 return stop, cur_rtol, nb_try def place_poles(A, B, poles, method="YT", rtol=1e-3, maxiter=30): """ Compute K such that eigenvalues (A - dot(B, K))=poles. K is the gain matrix such as the plant described by the linear system ``AX+BU`` will have its closed-loop poles, i.e the eigenvalues ``A - B*K``, as close as possible to those asked for in poles. SISO, MISO and MIMO systems are supported. Parameters ---------- A, B : ndarray State-space representation of linear system ``AX + BU``. poles : array_like Desired real poles and/or complex conjugates poles. Complex poles are only supported with ``method="YT"`` (default). method: {'YT', 'KNV0'}, optional Which method to choose to find the gain matrix K. One of: - 'YT': Yang Tits - 'KNV0': Kautsky, Nichols, Van Dooren update method 0 See References and Notes for details on the algorithms. rtol: float, optional After each iteration the determinant of the eigenvectors of ``A - B*K`` is compared to its previous value, when the relative error between these two values becomes lower than `rtol` the algorithm stops. Default is 1e-3. maxiter: int, optional Maximum number of iterations to compute the gain matrix. Default is 30. Returns ------- full_state_feedback : Bunch object full_state_feedback is composed of: gain_matrix : 1-D ndarray The closed loop matrix K such as the eigenvalues of ``A-BK`` are as close as possible to the requested poles. computed_poles : 1-D ndarray The poles corresponding to ``A-BK`` sorted as first the real poles in increasing order, then the complex congugates in lexicographic order. requested_poles : 1-D ndarray The poles the algorithm was asked to place sorted as above, they may differ from what was achieved. X : 2-D ndarray The transfer matrix such as ``X * diag(poles) = (A - B*K)*X`` (see Notes) rtol : float The relative tolerance achieved on ``det(X)`` (see Notes). `rtol` will be NaN if it is possible to solve the system ``diag(poles) = (A - B*K)``, or 0 when the optimization algorithms can't do anything i.e when ``B.shape[1] == 1``. nb_iter : int The number of iterations performed before converging. `nb_iter` will be NaN if it is possible to solve the system ``diag(poles) = (A - B*K)``, or 0 when the optimization algorithms can't do anything i.e when ``B.shape[1] == 1``. Notes ----- The Tits and Yang (YT), [2]_ paper is an update of the original Kautsky et al. (KNV) paper [1]_. KNV relies on rank-1 updates to find the transfer matrix X such that ``X * diag(poles) = (A - B*K)*X``, whereas YT uses rank-2 updates. This yields on average more robust solutions (see [2]_ pp 21-22), furthermore the YT algorithm supports complex poles whereas KNV does not in its original version. Only update method 0 proposed by KNV has been implemented here, hence the name ``'KNV0'``. KNV extended to complex poles is used in Matlab's ``place`` function, YT is distributed under a non-free licence by Slicot under the name ``robpole``. It is unclear and undocumented how KNV0 has been extended to complex poles (Tits and Yang claim on page 14 of their paper that their method can not be used to extend KNV to complex poles), therefore only YT supports them in this implementation. As the solution to the problem of pole placement is not unique for MIMO systems, both methods start with a tentative transfer matrix which is altered in various way to increase its determinant. Both methods have been proven to converge to a stable solution, however depending on the way the initial transfer matrix is chosen they will converge to different solutions and therefore there is absolutely no guarantee that using ``'KNV0'`` will yield results similar to Matlab's or any other implementation of these algorithms. Using the default method ``'YT'`` should be fine in most cases; ``'KNV0'`` is only provided because it is needed by ``'YT'`` in some specific cases. Furthermore ``'YT'`` gives on average more robust results than ``'KNV0'`` when ``abs(det(X))`` is used as a robustness indicator. [2]_ is available as a technical report on the following URL: https://hdl.handle.net/1903/5598 References ---------- .. [1] J. Kautsky, N.K. Nichols and P. van Dooren, "Robust pole assignment in linear state feedback", International Journal of Control, Vol. 41 pp. 1129-1155, 1985. .. [2] A.L. Tits and Y. Yang, "Globally convergent algorithms for robust pole assignment by state feedback", IEEE Transactions on Automatic Control, Vol. 41, pp. 1432-1452, 1996. Examples -------- A simple example demonstrating real pole placement using both KNV and YT algorithms. This is example number 1 from section 4 of the reference KNV publication ([1]_): >>> import numpy as np >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> A = np.array([[ 1.380, -0.2077, 6.715, -5.676 ], ... [-0.5814, -4.290, 0, 0.6750 ], ... [ 1.067, 4.273, -6.654, 5.893 ], ... [ 0.0480, 4.273, 1.343, -2.104 ]]) >>> B = np.array([[ 0, 5.679 ], ... [ 1.136, 1.136 ], ... [ 0, 0, ], ... [-3.146, 0 ]]) >>> P = np.array([-0.2, -0.5, -5.0566, -8.6659]) Now compute K with KNV method 0, with the default YT method and with the YT method while forcing 100 iterations of the algorithm and print some results after each call. >>> fsf1 = signal.place_poles(A, B, P, method='KNV0') >>> fsf1.gain_matrix array([[ 0.20071427, -0.96665799, 0.24066128, -0.10279785], [ 0.50587268, 0.57779091, 0.51795763, -0.41991442]]) >>> fsf2 = signal.place_poles(A, B, P) # uses YT method >>> fsf2.computed_poles array([-8.6659, -5.0566, -0.5 , -0.2 ]) >>> fsf3 = signal.place_poles(A, B, P, rtol=-1, maxiter=100) >>> fsf3.X array([[ 0.52072442+0.j, -0.08409372+0.j, -0.56847937+0.j, 0.74823657+0.j], [-0.04977751+0.j, -0.80872954+0.j, 0.13566234+0.j, -0.29322906+0.j], [-0.82266932+0.j, -0.19168026+0.j, -0.56348322+0.j, -0.43815060+0.j], [ 0.22267347+0.j, 0.54967577+0.j, -0.58387806+0.j, -0.40271926+0.j]]) The absolute value of the determinant of X is a good indicator to check the robustness of the results, both ``'KNV0'`` and ``'YT'`` aim at maximizing it. Below a comparison of the robustness of the results above: >>> abs(np.linalg.det(fsf1.X)) < abs(np.linalg.det(fsf2.X)) True >>> abs(np.linalg.det(fsf2.X)) < abs(np.linalg.det(fsf3.X)) True Now a simple example for complex poles: >>> A = np.array([[ 0, 7/3., 0, 0 ], ... [ 0, 0, 0, 7/9. ], ... [ 0, 0, 0, 0 ], ... [ 0, 0, 0, 0 ]]) >>> B = np.array([[ 0, 0 ], ... [ 0, 0 ], ... [ 1, 0 ], ... [ 0, 1 ]]) >>> P = np.array([-3, -1, -2-1j, -2+1j]) / 3. >>> fsf = signal.place_poles(A, B, P, method='YT') We can plot the desired and computed poles in the complex plane: >>> t = np.linspace(0, 2*np.pi, 401) >>> plt.plot(np.cos(t), np.sin(t), 'k--') # unit circle >>> plt.plot(fsf.requested_poles.real, fsf.requested_poles.imag, ... 'wo', label='Desired') >>> plt.plot(fsf.computed_poles.real, fsf.computed_poles.imag, 'bx', ... label='Placed') >>> plt.grid() >>> plt.axis('image') >>> plt.axis([-1.1, 1.1, -1.1, 1.1]) >>> plt.legend(bbox_to_anchor=(1.05, 1), loc=2, numpoints=1) """ # Move away all the inputs checking, it only adds noise to the code update_loop, poles = _valid_inputs(A, B, poles, method, rtol, maxiter) # The current value of the relative tolerance we achieved cur_rtol = 0 # The number of iterations needed before converging nb_iter = 0 # Step A: QR decomposition of B page 1132 KN # to debug with numpy qr uncomment the line below # u, z = np.linalg.qr(B, mode="complete") u, z = s_qr(B, mode="full") rankB = np.linalg.matrix_rank(B) u0 = u[:, :rankB] u1 = u[:, rankB:] z = z[:rankB, :] # If we can use the identity matrix as X the solution is obvious if B.shape[0] == rankB: # if B is square and full rank there is only one solution # such as (A+BK)=inv(X)*diag(P)*X with X=eye(A.shape[0]) # i.e K=inv(B)*(diag(P)-A) # if B has as many lines as its rank (but not square) there are many # solutions and we can choose one using least squares # => use lstsq in both cases. # In both cases the transfer matrix X will be eye(A.shape[0]) and I # can hardly think of a better one so there is nothing to optimize # # for complex poles we use the following trick # # |a -b| has for eigenvalues a+b and a-b # |b a| # # |a+bi 0| has the obvious eigenvalues a+bi and a-bi # |0 a-bi| # # e.g solving the first one in R gives the solution # for the second one in C diag_poles = np.zeros(A.shape) idx = 0 while idx < poles.shape[0]: p = poles[idx] diag_poles[idx, idx] = np.real(p) if ~np.isreal(p): diag_poles[idx, idx+1] = -np.imag(p) diag_poles[idx+1, idx+1] = np.real(p) diag_poles[idx+1, idx] = np.imag(p) idx += 1 # skip next one idx += 1 gain_matrix = np.linalg.lstsq(B, diag_poles-A, rcond=-1)[0] transfer_matrix = np.eye(A.shape[0]) cur_rtol = np.nan nb_iter = np.nan else: # step A (p1144 KNV) and beginning of step F: decompose # dot(U1.T, A-P[i]*I).T and build our set of transfer_matrix vectors # in the same loop ker_pole = [] # flag to skip the conjugate of a complex pole skip_conjugate = False # select orthonormal base ker_pole for each Pole and vectors for # transfer_matrix for j in range(B.shape[0]): if skip_conjugate: skip_conjugate = False continue pole_space_j = np.dot(u1.T, A-poles[j]*np.eye(B.shape[0])).T # after QR Q=Q0|Q1 # only Q0 is used to reconstruct the qr'ed (dot Q, R) matrix. # Q1 is orthogonnal to Q0 and will be multiplied by the zeros in # R when using mode "complete". In default mode Q1 and the zeros # in R are not computed # To debug with numpy qr uncomment the line below # Q, _ = np.linalg.qr(pole_space_j, mode="complete") Q, _ = s_qr(pole_space_j, mode="full") ker_pole_j = Q[:, pole_space_j.shape[1]:] # We want to select one vector in ker_pole_j to build the transfer # matrix, however qr returns sometimes vectors with zeros on the # same line for each pole and this yields very long convergence # times. # Or some other times a set of vectors, one with zero imaginary # part and one (or several) with imaginary parts. After trying # many ways to select the best possible one (eg ditch vectors # with zero imaginary part for complex poles) I ended up summing # all vectors in ker_pole_j, this solves 100% of the problems and # is a valid choice for transfer_matrix. # This way for complex poles we are sure to have a non zero # imaginary part that way, and the problem of lines full of zeros # in transfer_matrix is solved too as when a vector from # ker_pole_j has a zero the other one(s) when # ker_pole_j.shape[1]>1) for sure won't have a zero there. transfer_matrix_j = np.sum(ker_pole_j, axis=1)[:, np.newaxis] transfer_matrix_j = (transfer_matrix_j / np.linalg.norm(transfer_matrix_j)) if ~np.isreal(poles[j]): # complex pole transfer_matrix_j = np.hstack([np.real(transfer_matrix_j), np.imag(transfer_matrix_j)]) ker_pole.extend([ker_pole_j, ker_pole_j]) # Skip next pole as it is the conjugate skip_conjugate = True else: # real pole, nothing to do ker_pole.append(ker_pole_j) if j == 0: transfer_matrix = transfer_matrix_j else: transfer_matrix = np.hstack((transfer_matrix, transfer_matrix_j)) if rankB > 1: # otherwise there is nothing we can optimize stop, cur_rtol, nb_iter = update_loop(ker_pole, transfer_matrix, poles, B, maxiter, rtol) if not stop and rtol > 0: # if rtol<=0 the user has probably done that on purpose, # don't annoy him err_msg = ( "Convergence was not reached after maxiter iterations.\n" f"You asked for a tolerance of {rtol}, we got {cur_rtol}." ) warnings.warn(err_msg, stacklevel=2) # reconstruct transfer_matrix to match complex conjugate pairs, # ie transfer_matrix_j/transfer_matrix_j+1 are # Re(Complex_pole), Im(Complex_pole) now and will be Re-Im/Re+Im after transfer_matrix = transfer_matrix.astype(complex) idx = 0 while idx < poles.shape[0]-1: if ~np.isreal(poles[idx]): rel = transfer_matrix[:, idx].copy() img = transfer_matrix[:, idx+1] # rel will be an array referencing a column of transfer_matrix # if we don't copy() it will changer after the next line and # and the line after will not yield the correct value transfer_matrix[:, idx] = rel-1j*img transfer_matrix[:, idx+1] = rel+1j*img idx += 1 # skip next one idx += 1 try: m = np.linalg.solve(transfer_matrix.T, np.dot(np.diag(poles), transfer_matrix.T)).T gain_matrix = np.linalg.solve(z, np.dot(u0.T, m-A)) except np.linalg.LinAlgError as e: raise ValueError("The poles you've chosen can't be placed. " "Check the controllability matrix and try " "another set of poles") from e # Beware: Kautsky solves A+BK but the usual form is A-BK gain_matrix = -gain_matrix # K still contains complex with ~=0j imaginary parts, get rid of them gain_matrix = np.real(gain_matrix) full_state_feedback = Bunch() full_state_feedback.gain_matrix = gain_matrix full_state_feedback.computed_poles = _order_complex_poles( np.linalg.eig(A - np.dot(B, gain_matrix))[0] ) full_state_feedback.requested_poles = poles full_state_feedback.X = transfer_matrix full_state_feedback.rtol = cur_rtol full_state_feedback.nb_iter = nb_iter return full_state_feedback def dlsim(system, u, t=None, x0=None): """ Simulate output of a discrete-time linear system. Parameters ---------- system : tuple of array_like or instance of `dlti` A tuple describing the system. The following gives the number of elements in the tuple and the interpretation: * 1: (instance of `dlti`) * 3: (num, den, dt) * 4: (zeros, poles, gain, dt) * 5: (A, B, C, D, dt) u : array_like An input array describing the input at each time `t` (interpolation is assumed between given times). If there are multiple inputs, then each column of the rank-2 array represents an input. t : array_like, optional The time steps at which the input is defined. If `t` is given, it must be the same length as `u`, and the final value in `t` determines the number of steps returned in the output. x0 : array_like, optional The initial conditions on the state vector (zero by default). Returns ------- tout : ndarray Time values for the output, as a 1-D array. yout : ndarray System response, as a 1-D array. xout : ndarray, optional Time-evolution of the state-vector. Only generated if the input is a `StateSpace` system. See Also -------- lsim, dstep, dimpulse, cont2discrete Examples -------- A simple integrator transfer function with a discrete time step of 1.0 could be implemented as: >>> import numpy as np >>> from scipy import signal >>> tf = ([1.0,], [1.0, -1.0], 1.0) >>> t_in = [0.0, 1.0, 2.0, 3.0] >>> u = np.asarray([0.0, 0.0, 1.0, 1.0]) >>> t_out, y = signal.dlsim(tf, u, t=t_in) >>> y.T array([[ 0., 0., 0., 1.]]) """ # Convert system to dlti-StateSpace if isinstance(system, lti): raise AttributeError('dlsim can only be used with discrete-time dlti ' 'systems.') elif not isinstance(system, dlti): system = dlti(*system[:-1], dt=system[-1]) # Condition needed to ensure output remains compatible is_ss_input = isinstance(system, StateSpace) system = system._as_ss() u = np.atleast_1d(u) if u.ndim == 1: u = np.atleast_2d(u).T if t is None: out_samples = len(u) stoptime = (out_samples - 1) * system.dt else: stoptime = t[-1] out_samples = int(np.floor(stoptime / system.dt)) + 1 # Pre-build output arrays xout = np.zeros((out_samples, system.A.shape[0])) yout = np.zeros((out_samples, system.C.shape[0])) tout = np.linspace(0.0, stoptime, num=out_samples) # Check initial condition if x0 is None: xout[0, :] = np.zeros((system.A.shape[1],)) else: xout[0, :] = np.asarray(x0) # Pre-interpolate inputs into the desired time steps if t is None: u_dt = u else: if len(u.shape) == 1: u = u[:, np.newaxis] u_dt = make_interp_spline(t, u, k=1)(tout) # Simulate the system for i in range(0, out_samples - 1): xout[i+1, :] = (np.dot(system.A, xout[i, :]) + np.dot(system.B, u_dt[i, :])) yout[i, :] = (np.dot(system.C, xout[i, :]) + np.dot(system.D, u_dt[i, :])) # Last point yout[out_samples-1, :] = (np.dot(system.C, xout[out_samples-1, :]) + np.dot(system.D, u_dt[out_samples-1, :])) if is_ss_input: return tout, yout, xout else: return tout, yout def dimpulse(system, x0=None, t=None, n=None): """ Impulse response of discrete-time system. Parameters ---------- system : tuple of array_like or instance of `dlti` A tuple describing the system. The following gives the number of elements in the tuple and the interpretation: * 1: (instance of `dlti`) * 3: (num, den, dt) * 4: (zeros, poles, gain, dt) * 5: (A, B, C, D, dt) x0 : array_like, optional Initial state-vector. Defaults to zero. t : array_like, optional Time points. Computed if not given. n : int, optional The number of time points to compute (if `t` is not given). Returns ------- tout : ndarray Time values for the output, as a 1-D array. yout : tuple of ndarray Impulse response of system. Each element of the tuple represents the output of the system based on an impulse in each input. See Also -------- impulse, dstep, dlsim, cont2discrete Examples -------- >>> import numpy as np >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> butter = signal.dlti(*signal.butter(3, 0.5)) >>> t, y = signal.dimpulse(butter, n=25) >>> plt.step(t, np.squeeze(y)) >>> plt.grid() >>> plt.xlabel('n [samples]') >>> plt.ylabel('Amplitude') """ # Convert system to dlti-StateSpace if isinstance(system, dlti): system = system._as_ss() elif isinstance(system, lti): raise AttributeError('dimpulse can only be used with discrete-time ' 'dlti systems.') else: system = dlti(*system[:-1], dt=system[-1])._as_ss() # Default to 100 samples if unspecified if n is None: n = 100 # If time is not specified, use the number of samples # and system dt if t is None: t = np.linspace(0, n * system.dt, n, endpoint=False) else: t = np.asarray(t) # For each input, implement a step change yout = None for i in range(0, system.inputs): u = np.zeros((t.shape[0], system.inputs)) u[0, i] = 1.0 one_output = dlsim(system, u, t=t, x0=x0) if yout is None: yout = (one_output[1],) else: yout = yout + (one_output[1],) tout = one_output[0] return tout, yout def dstep(system, x0=None, t=None, n=None): """ Step response of discrete-time system. Parameters ---------- system : tuple of array_like A tuple describing the system. The following gives the number of elements in the tuple and the interpretation: * 1: (instance of `dlti`) * 3: (num, den, dt) * 4: (zeros, poles, gain, dt) * 5: (A, B, C, D, dt) x0 : array_like, optional Initial state-vector. Defaults to zero. t : array_like, optional Time points. Computed if not given. n : int, optional The number of time points to compute (if `t` is not given). Returns ------- tout : ndarray Output time points, as a 1-D array. yout : tuple of ndarray Step response of system. Each element of the tuple represents the output of the system based on a step response to each input. See Also -------- step, dimpulse, dlsim, cont2discrete Examples -------- >>> import numpy as np >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> butter = signal.dlti(*signal.butter(3, 0.5)) >>> t, y = signal.dstep(butter, n=25) >>> plt.step(t, np.squeeze(y)) >>> plt.grid() >>> plt.xlabel('n [samples]') >>> plt.ylabel('Amplitude') """ # Convert system to dlti-StateSpace if isinstance(system, dlti): system = system._as_ss() elif isinstance(system, lti): raise AttributeError('dstep can only be used with discrete-time dlti ' 'systems.') else: system = dlti(*system[:-1], dt=system[-1])._as_ss() # Default to 100 samples if unspecified if n is None: n = 100 # If time is not specified, use the number of samples # and system dt if t is None: t = np.linspace(0, n * system.dt, n, endpoint=False) else: t = np.asarray(t) # For each input, implement a step change yout = None for i in range(0, system.inputs): u = np.zeros((t.shape[0], system.inputs)) u[:, i] = np.ones((t.shape[0],)) one_output = dlsim(system, u, t=t, x0=x0) if yout is None: yout = (one_output[1],) else: yout = yout + (one_output[1],) tout = one_output[0] return tout, yout def dfreqresp(system, w=None, n=10000, whole=False): r""" Calculate the frequency response of a discrete-time system. Parameters ---------- system : an instance of the `dlti` class or a tuple describing the system. The following gives the number of elements in the tuple and the interpretation: * 1 (instance of `dlti`) * 2 (numerator, denominator, dt) * 3 (zeros, poles, gain, dt) * 4 (A, B, C, D, dt) w : array_like, optional Array of frequencies (in radians/sample). Magnitude and phase data is calculated for every value in this array. If not given a reasonable set will be calculated. n : int, optional Number of frequency points to compute if `w` is not given. The `n` frequencies are logarithmically spaced in an interval chosen to include the influence of the poles and zeros of the system. whole : bool, optional Normally, if 'w' is not given, frequencies are computed from 0 to the Nyquist frequency, pi radians/sample (upper-half of unit-circle). If `whole` is True, compute frequencies from 0 to 2*pi radians/sample. Returns ------- w : 1D ndarray Frequency array [radians/sample] H : 1D ndarray Array of complex magnitude values Notes ----- If (num, den) is passed in for ``system``, coefficients for both the numerator and denominator should be specified in descending exponent order (e.g. ``z^2 + 3z + 5`` would be represented as ``[1, 3, 5]``). .. versionadded:: 0.18.0 Examples -------- Generating the Nyquist plot of a transfer function >>> from scipy import signal >>> import matplotlib.pyplot as plt Construct the transfer function :math:`H(z) = \frac{1}{z^2 + 2z + 3}` with a sampling time of 0.05 seconds: >>> sys = signal.TransferFunction([1], [1, 2, 3], dt=0.05) >>> w, H = signal.dfreqresp(sys) >>> plt.figure() >>> plt.plot(H.real, H.imag, "b") >>> plt.plot(H.real, -H.imag, "r") >>> plt.show() """ if not isinstance(system, dlti): if isinstance(system, lti): raise AttributeError('dfreqresp can only be used with ' 'discrete-time systems.') system = dlti(*system[:-1], dt=system[-1]) if isinstance(system, StateSpace): # No SS->ZPK code exists right now, just SS->TF->ZPK system = system._as_tf() if not isinstance(system, (TransferFunction, ZerosPolesGain)): raise ValueError('Unknown system type') if system.inputs != 1 or system.outputs != 1: raise ValueError("dfreqresp requires a SISO (single input, single " "output) system.") if w is not None: worN = w else: worN = n if isinstance(system, TransferFunction): # Convert numerator and denominator from polynomials in the variable # 'z' to polynomials in the variable 'z^-1', as freqz expects. num, den = TransferFunction._z_to_zinv(system.num.ravel(), system.den) w, h = freqz(num, den, worN=worN, whole=whole) elif isinstance(system, ZerosPolesGain): w, h = freqz_zpk(system.zeros, system.poles, system.gain, worN=worN, whole=whole) return w, h def dbode(system, w=None, n=100): r""" Calculate Bode magnitude and phase data of a discrete-time system. Parameters ---------- system : an instance of the LTI class or a tuple describing the system. The following gives the number of elements in the tuple and the interpretation: * 1 (instance of `dlti`) * 2 (num, den, dt) * 3 (zeros, poles, gain, dt) * 4 (A, B, C, D, dt) w : array_like, optional Array of frequencies (in radians/sample). Magnitude and phase data is calculated for every value in this array. If not given a reasonable set will be calculated. n : int, optional Number of frequency points to compute if `w` is not given. The `n` frequencies are logarithmically spaced in an interval chosen to include the influence of the poles and zeros of the system. Returns ------- w : 1D ndarray Frequency array [rad/time_unit] mag : 1D ndarray Magnitude array [dB] phase : 1D ndarray Phase array [deg] Notes ----- If (num, den) is passed in for ``system``, coefficients for both the numerator and denominator should be specified in descending exponent order (e.g. ``z^2 + 3z + 5`` would be represented as ``[1, 3, 5]``). .. versionadded:: 0.18.0 Examples -------- >>> from scipy import signal >>> import matplotlib.pyplot as plt Construct the transfer function :math:`H(z) = \frac{1}{z^2 + 2z + 3}` with a sampling time of 0.05 seconds: >>> sys = signal.TransferFunction([1], [1, 2, 3], dt=0.05) Equivalent: sys.bode() >>> w, mag, phase = signal.dbode(sys) >>> plt.figure() >>> plt.semilogx(w, mag) # Bode magnitude plot >>> plt.figure() >>> plt.semilogx(w, phase) # Bode phase plot >>> plt.show() """ w, y = dfreqresp(system, w=w, n=n) if isinstance(system, dlti): dt = system.dt else: dt = system[-1] mag = 20.0 * numpy.log10(abs(y)) phase = numpy.rad2deg(numpy.unwrap(numpy.angle(y))) return w / dt, mag, phase
130,684
32.380587
83
py
scipy
scipy-main/scipy/signal/_fir_filter_design.py
"""Functions for FIR filter design.""" from math import ceil, log import operator import warnings import numpy as np from numpy.fft import irfft, fft, ifft from scipy.special import sinc from scipy.linalg import (toeplitz, hankel, solve, LinAlgError, LinAlgWarning, lstsq) from scipy._lib.deprecation import _NoValue from . import _sigtools __all__ = ['kaiser_beta', 'kaiser_atten', 'kaiserord', 'firwin', 'firwin2', 'remez', 'firls', 'minimum_phase'] def _get_fs(fs, nyq): """ Utility for replacing the argument 'nyq' (with default 1) with 'fs'. """ if nyq is _NoValue and fs is None: fs = 2 elif nyq is not _NoValue: if fs is not None: raise ValueError("Values cannot be given for both 'nyq' and 'fs'.") msg = ("Keyword argument 'nyq' is deprecated in favour of 'fs' and " "will be removed in SciPy 1.14.0.") warnings.warn(msg, DeprecationWarning, stacklevel=3) if nyq is None: fs = 2 else: fs = 2*nyq return fs # Some notes on function parameters: # # `cutoff` and `width` are given as numbers between 0 and 1. These are # relative frequencies, expressed as a fraction of the Nyquist frequency. # For example, if the Nyquist frequency is 2 KHz, then width=0.15 is a width # of 300 Hz. # # The `order` of a FIR filter is one less than the number of taps. # This is a potential source of confusion, so in the following code, # we will always use the number of taps as the parameterization of # the 'size' of the filter. The "number of taps" means the number # of coefficients, which is the same as the length of the impulse # response of the filter. def kaiser_beta(a): """Compute the Kaiser parameter `beta`, given the attenuation `a`. Parameters ---------- a : float The desired attenuation in the stopband and maximum ripple in the passband, in dB. This should be a *positive* number. Returns ------- beta : float The `beta` parameter to be used in the formula for a Kaiser window. References ---------- Oppenheim, Schafer, "Discrete-Time Signal Processing", p.475-476. Examples -------- Suppose we want to design a lowpass filter, with 65 dB attenuation in the stop band. The Kaiser window parameter to be used in the window method is computed by ``kaiser_beta(65)``: >>> from scipy.signal import kaiser_beta >>> kaiser_beta(65) 6.20426 """ if a > 50: beta = 0.1102 * (a - 8.7) elif a > 21: beta = 0.5842 * (a - 21) ** 0.4 + 0.07886 * (a - 21) else: beta = 0.0 return beta def kaiser_atten(numtaps, width): """Compute the attenuation of a Kaiser FIR filter. Given the number of taps `N` and the transition width `width`, compute the attenuation `a` in dB, given by Kaiser's formula: a = 2.285 * (N - 1) * pi * width + 7.95 Parameters ---------- numtaps : int The number of taps in the FIR filter. width : float The desired width of the transition region between passband and stopband (or, in general, at any discontinuity) for the filter, expressed as a fraction of the Nyquist frequency. Returns ------- a : float The attenuation of the ripple, in dB. See Also -------- kaiserord, kaiser_beta Examples -------- Suppose we want to design a FIR filter using the Kaiser window method that will have 211 taps and a transition width of 9 Hz for a signal that is sampled at 480 Hz. Expressed as a fraction of the Nyquist frequency, the width is 9/(0.5*480) = 0.0375. The approximate attenuation (in dB) is computed as follows: >>> from scipy.signal import kaiser_atten >>> kaiser_atten(211, 0.0375) 64.48099630593983 """ a = 2.285 * (numtaps - 1) * np.pi * width + 7.95 return a def kaiserord(ripple, width): """ Determine the filter window parameters for the Kaiser window method. The parameters returned by this function are generally used to create a finite impulse response filter using the window method, with either `firwin` or `firwin2`. Parameters ---------- ripple : float Upper bound for the deviation (in dB) of the magnitude of the filter's frequency response from that of the desired filter (not including frequencies in any transition intervals). That is, if w is the frequency expressed as a fraction of the Nyquist frequency, A(w) is the actual frequency response of the filter and D(w) is the desired frequency response, the design requirement is that:: abs(A(w) - D(w))) < 10**(-ripple/20) for 0 <= w <= 1 and w not in a transition interval. width : float Width of transition region, normalized so that 1 corresponds to pi radians / sample. That is, the frequency is expressed as a fraction of the Nyquist frequency. Returns ------- numtaps : int The length of the Kaiser window. beta : float The beta parameter for the Kaiser window. See Also -------- kaiser_beta, kaiser_atten Notes ----- There are several ways to obtain the Kaiser window: - ``signal.windows.kaiser(numtaps, beta, sym=True)`` - ``signal.get_window(beta, numtaps)`` - ``signal.get_window(('kaiser', beta), numtaps)`` The empirical equations discovered by Kaiser are used. References ---------- Oppenheim, Schafer, "Discrete-Time Signal Processing", pp.475-476. Examples -------- We will use the Kaiser window method to design a lowpass FIR filter for a signal that is sampled at 1000 Hz. We want at least 65 dB rejection in the stop band, and in the pass band the gain should vary no more than 0.5%. We want a cutoff frequency of 175 Hz, with a transition between the pass band and the stop band of 24 Hz. That is, in the band [0, 163], the gain varies no more than 0.5%, and in the band [187, 500], the signal is attenuated by at least 65 dB. >>> import numpy as np >>> from scipy.signal import kaiserord, firwin, freqz >>> import matplotlib.pyplot as plt >>> fs = 1000.0 >>> cutoff = 175 >>> width = 24 The Kaiser method accepts just a single parameter to control the pass band ripple and the stop band rejection, so we use the more restrictive of the two. In this case, the pass band ripple is 0.005, or 46.02 dB, so we will use 65 dB as the design parameter. Use `kaiserord` to determine the length of the filter and the parameter for the Kaiser window. >>> numtaps, beta = kaiserord(65, width/(0.5*fs)) >>> numtaps 167 >>> beta 6.20426 Use `firwin` to create the FIR filter. >>> taps = firwin(numtaps, cutoff, window=('kaiser', beta), ... scale=False, fs=fs) Compute the frequency response of the filter. ``w`` is the array of frequencies, and ``h`` is the corresponding complex array of frequency responses. >>> w, h = freqz(taps, worN=8000) >>> w *= 0.5*fs/np.pi # Convert w to Hz. Compute the deviation of the magnitude of the filter's response from that of the ideal lowpass filter. Values in the transition region are set to ``nan``, so they won't appear in the plot. >>> ideal = w < cutoff # The "ideal" frequency response. >>> deviation = np.abs(np.abs(h) - ideal) >>> deviation[(w > cutoff - 0.5*width) & (w < cutoff + 0.5*width)] = np.nan Plot the deviation. A close look at the left end of the stop band shows that the requirement for 65 dB attenuation is violated in the first lobe by about 0.125 dB. This is not unusual for the Kaiser window method. >>> plt.plot(w, 20*np.log10(np.abs(deviation))) >>> plt.xlim(0, 0.5*fs) >>> plt.ylim(-90, -60) >>> plt.grid(alpha=0.25) >>> plt.axhline(-65, color='r', ls='--', alpha=0.3) >>> plt.xlabel('Frequency (Hz)') >>> plt.ylabel('Deviation from ideal (dB)') >>> plt.title('Lowpass Filter Frequency Response') >>> plt.show() """ A = abs(ripple) # in case somebody is confused as to what's meant if A < 8: # Formula for N is not valid in this range. raise ValueError("Requested maximum ripple attenuation %f is too " "small for the Kaiser formula." % A) beta = kaiser_beta(A) # Kaiser's formula (as given in Oppenheim and Schafer) is for the filter # order, so we have to add 1 to get the number of taps. numtaps = (A - 7.95) / 2.285 / (np.pi * width) + 1 return int(ceil(numtaps)), beta def firwin(numtaps, cutoff, width=None, window='hamming', pass_zero=True, scale=True, nyq=_NoValue, fs=None): """ FIR filter design using the window method. This function computes the coefficients of a finite impulse response filter. The filter will have linear phase; it will be Type I if `numtaps` is odd and Type II if `numtaps` is even. Type II filters always have zero response at the Nyquist frequency, so a ValueError exception is raised if firwin is called with `numtaps` even and having a passband whose right end is at the Nyquist frequency. Parameters ---------- numtaps : int Length of the filter (number of coefficients, i.e. the filter order + 1). `numtaps` must be odd if a passband includes the Nyquist frequency. cutoff : float or 1-D array_like Cutoff frequency of filter (expressed in the same units as `fs`) OR an array of cutoff frequencies (that is, band edges). In the latter case, the frequencies in `cutoff` should be positive and monotonically increasing between 0 and `fs/2`. The values 0 and `fs/2` must not be included in `cutoff`. width : float or None, optional If `width` is not None, then assume it is the approximate width of the transition region (expressed in the same units as `fs`) for use in Kaiser FIR filter design. In this case, the `window` argument is ignored. window : string or tuple of string and parameter values, optional Desired window to use. See `scipy.signal.get_window` for a list of windows and required parameters. pass_zero : {True, False, 'bandpass', 'lowpass', 'highpass', 'bandstop'}, optional If True, the gain at the frequency 0 (i.e., the "DC gain") is 1. If False, the DC gain is 0. Can also be a string argument for the desired filter type (equivalent to ``btype`` in IIR design functions). .. versionadded:: 1.3.0 Support for string arguments. scale : bool, optional Set to True to scale the coefficients so that the frequency response is exactly unity at a certain frequency. That frequency is either: - 0 (DC) if the first passband starts at 0 (i.e. pass_zero is True) - `fs/2` (the Nyquist frequency) if the first passband ends at `fs/2` (i.e the filter is a single band highpass filter); center of first passband otherwise nyq : float, optional, deprecated This is the Nyquist frequency. Each frequency in `cutoff` must be between 0 and `nyq`. Default is 1. .. deprecated:: 1.0.0 `firwin` keyword argument `nyq` is deprecated in favour of `fs` and will be removed in SciPy 1.14.0. fs : float, optional The sampling frequency of the signal. Each frequency in `cutoff` must be between 0 and ``fs/2``. Default is 2. Returns ------- h : (numtaps,) ndarray Coefficients of length `numtaps` FIR filter. Raises ------ ValueError If any value in `cutoff` is less than or equal to 0 or greater than or equal to ``fs/2``, if the values in `cutoff` are not strictly monotonically increasing, or if `numtaps` is even but a passband includes the Nyquist frequency. See Also -------- firwin2 firls minimum_phase remez Examples -------- Low-pass from 0 to f: >>> from scipy import signal >>> numtaps = 3 >>> f = 0.1 >>> signal.firwin(numtaps, f) array([ 0.06799017, 0.86401967, 0.06799017]) Use a specific window function: >>> signal.firwin(numtaps, f, window='nuttall') array([ 3.56607041e-04, 9.99286786e-01, 3.56607041e-04]) High-pass ('stop' from 0 to f): >>> signal.firwin(numtaps, f, pass_zero=False) array([-0.00859313, 0.98281375, -0.00859313]) Band-pass: >>> f1, f2 = 0.1, 0.2 >>> signal.firwin(numtaps, [f1, f2], pass_zero=False) array([ 0.06301614, 0.88770441, 0.06301614]) Band-stop: >>> signal.firwin(numtaps, [f1, f2]) array([-0.00801395, 1.0160279 , -0.00801395]) Multi-band (passbands are [0, f1], [f2, f3] and [f4, 1]): >>> f3, f4 = 0.3, 0.4 >>> signal.firwin(numtaps, [f1, f2, f3, f4]) array([-0.01376344, 1.02752689, -0.01376344]) Multi-band (passbands are [f1, f2] and [f3,f4]): >>> signal.firwin(numtaps, [f1, f2, f3, f4], pass_zero=False) array([ 0.04890915, 0.91284326, 0.04890915]) """ # noqa: E501 # The major enhancements to this function added in November 2010 were # developed by Tom Krauss (see ticket #902). nyq = 0.5 * _get_fs(fs, nyq) cutoff = np.atleast_1d(cutoff) / float(nyq) # Check for invalid input. if cutoff.ndim > 1: raise ValueError("The cutoff argument must be at most " "one-dimensional.") if cutoff.size == 0: raise ValueError("At least one cutoff frequency must be given.") if cutoff.min() <= 0 or cutoff.max() >= 1: raise ValueError("Invalid cutoff frequency: frequencies must be " "greater than 0 and less than fs/2.") if np.any(np.diff(cutoff) <= 0): raise ValueError("Invalid cutoff frequencies: the frequencies " "must be strictly increasing.") if width is not None: # A width was given. Find the beta parameter of the Kaiser window # and set `window`. This overrides the value of `window` passed in. atten = kaiser_atten(numtaps, float(width) / nyq) beta = kaiser_beta(atten) window = ('kaiser', beta) if isinstance(pass_zero, str): if pass_zero in ('bandstop', 'lowpass'): if pass_zero == 'lowpass': if cutoff.size != 1: raise ValueError('cutoff must have one element if ' 'pass_zero=="lowpass", got %s' % (cutoff.shape,)) elif cutoff.size <= 1: raise ValueError('cutoff must have at least two elements if ' 'pass_zero=="bandstop", got %s' % (cutoff.shape,)) pass_zero = True elif pass_zero in ('bandpass', 'highpass'): if pass_zero == 'highpass': if cutoff.size != 1: raise ValueError('cutoff must have one element if ' 'pass_zero=="highpass", got %s' % (cutoff.shape,)) elif cutoff.size <= 1: raise ValueError('cutoff must have at least two elements if ' 'pass_zero=="bandpass", got %s' % (cutoff.shape,)) pass_zero = False else: raise ValueError('pass_zero must be True, False, "bandpass", ' '"lowpass", "highpass", or "bandstop", got ' '{}'.format(pass_zero)) pass_zero = bool(operator.index(pass_zero)) # ensure bool-like pass_nyquist = bool(cutoff.size & 1) ^ pass_zero if pass_nyquist and numtaps % 2 == 0: raise ValueError("A filter with an even number of coefficients must " "have zero response at the Nyquist frequency.") # Insert 0 and/or 1 at the ends of cutoff so that the length of cutoff # is even, and each pair in cutoff corresponds to passband. cutoff = np.hstack(([0.0] * pass_zero, cutoff, [1.0] * pass_nyquist)) # `bands` is a 2-D array; each row gives the left and right edges of # a passband. bands = cutoff.reshape(-1, 2) # Build up the coefficients. alpha = 0.5 * (numtaps - 1) m = np.arange(0, numtaps) - alpha h = 0 for left, right in bands: h += right * sinc(right * m) h -= left * sinc(left * m) # Get and apply the window function. from .windows import get_window win = get_window(window, numtaps, fftbins=False) h *= win # Now handle scaling if desired. if scale: # Get the first passband. left, right = bands[0] if left == 0: scale_frequency = 0.0 elif right == 1: scale_frequency = 1.0 else: scale_frequency = 0.5 * (left + right) c = np.cos(np.pi * m * scale_frequency) s = np.sum(h * c) h /= s return h # Original version of firwin2 from scipy ticket #457, submitted by "tash". # # Rewritten by Warren Weckesser, 2010. def firwin2(numtaps, freq, gain, nfreqs=None, window='hamming', nyq=_NoValue, antisymmetric=False, fs=None): """ FIR filter design using the window method. From the given frequencies `freq` and corresponding gains `gain`, this function constructs an FIR filter with linear phase and (approximately) the given frequency response. Parameters ---------- numtaps : int The number of taps in the FIR filter. `numtaps` must be less than `nfreqs`. freq : array_like, 1-D The frequency sampling points. Typically 0.0 to 1.0 with 1.0 being Nyquist. The Nyquist frequency is half `fs`. The values in `freq` must be nondecreasing. A value can be repeated once to implement a discontinuity. The first value in `freq` must be 0, and the last value must be ``fs/2``. Values 0 and ``fs/2`` must not be repeated. gain : array_like The filter gains at the frequency sampling points. Certain constraints to gain values, depending on the filter type, are applied, see Notes for details. nfreqs : int, optional The size of the interpolation mesh used to construct the filter. For most efficient behavior, this should be a power of 2 plus 1 (e.g, 129, 257, etc). The default is one more than the smallest power of 2 that is not less than `numtaps`. `nfreqs` must be greater than `numtaps`. window : string or (string, float) or float, or None, optional Window function to use. Default is "hamming". See `scipy.signal.get_window` for the complete list of possible values. If None, no window function is applied. nyq : float, optional, deprecated This is the Nyquist frequency. Each frequency in `freq` must be between 0 and `nyq`. Default is 1. .. deprecated:: 1.0.0 `firwin2` keyword argument `nyq` is deprecated in favour of `fs` and will be removed in SciPy 1.14.0. antisymmetric : bool, optional Whether resulting impulse response is symmetric/antisymmetric. See Notes for more details. fs : float, optional The sampling frequency of the signal. Each frequency in `cutoff` must be between 0 and ``fs/2``. Default is 2. Returns ------- taps : ndarray The filter coefficients of the FIR filter, as a 1-D array of length `numtaps`. See Also -------- firls firwin minimum_phase remez Notes ----- From the given set of frequencies and gains, the desired response is constructed in the frequency domain. The inverse FFT is applied to the desired response to create the associated convolution kernel, and the first `numtaps` coefficients of this kernel, scaled by `window`, are returned. The FIR filter will have linear phase. The type of filter is determined by the value of 'numtaps` and `antisymmetric` flag. There are four possible combinations: - odd `numtaps`, `antisymmetric` is False, type I filter is produced - even `numtaps`, `antisymmetric` is False, type II filter is produced - odd `numtaps`, `antisymmetric` is True, type III filter is produced - even `numtaps`, `antisymmetric` is True, type IV filter is produced Magnitude response of all but type I filters are subjects to following constraints: - type II -- zero at the Nyquist frequency - type III -- zero at zero and Nyquist frequencies - type IV -- zero at zero frequency .. versionadded:: 0.9.0 References ---------- .. [1] Oppenheim, A. V. and Schafer, R. W., "Discrete-Time Signal Processing", Prentice-Hall, Englewood Cliffs, New Jersey (1989). (See, for example, Section 7.4.) .. [2] Smith, Steven W., "The Scientist and Engineer's Guide to Digital Signal Processing", Ch. 17. http://www.dspguide.com/ch17/1.htm Examples -------- A lowpass FIR filter with a response that is 1 on [0.0, 0.5], and that decreases linearly on [0.5, 1.0] from 1 to 0: >>> from scipy import signal >>> taps = signal.firwin2(150, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0]) >>> print(taps[72:78]) [-0.02286961 -0.06362756 0.57310236 0.57310236 -0.06362756 -0.02286961] """ nyq = 0.5 * _get_fs(fs, nyq) if len(freq) != len(gain): raise ValueError('freq and gain must be of same length.') if nfreqs is not None and numtaps >= nfreqs: raise ValueError(('ntaps must be less than nfreqs, but firwin2 was ' 'called with ntaps=%d and nfreqs=%s') % (numtaps, nfreqs)) if freq[0] != 0 or freq[-1] != nyq: raise ValueError('freq must start with 0 and end with fs/2.') d = np.diff(freq) if (d < 0).any(): raise ValueError('The values in freq must be nondecreasing.') d2 = d[:-1] + d[1:] if (d2 == 0).any(): raise ValueError('A value in freq must not occur more than twice.') if freq[1] == 0: raise ValueError('Value 0 must not be repeated in freq') if freq[-2] == nyq: raise ValueError('Value fs/2 must not be repeated in freq') if antisymmetric: if numtaps % 2 == 0: ftype = 4 else: ftype = 3 else: if numtaps % 2 == 0: ftype = 2 else: ftype = 1 if ftype == 2 and gain[-1] != 0.0: raise ValueError("A Type II filter must have zero gain at the " "Nyquist frequency.") elif ftype == 3 and (gain[0] != 0.0 or gain[-1] != 0.0): raise ValueError("A Type III filter must have zero gain at zero " "and Nyquist frequencies.") elif ftype == 4 and gain[0] != 0.0: raise ValueError("A Type IV filter must have zero gain at zero " "frequency.") if nfreqs is None: nfreqs = 1 + 2 ** int(ceil(log(numtaps, 2))) if (d == 0).any(): # Tweak any repeated values in freq so that interp works. freq = np.array(freq, copy=True) eps = np.finfo(float).eps * nyq for k in range(len(freq) - 1): if freq[k] == freq[k + 1]: freq[k] = freq[k] - eps freq[k + 1] = freq[k + 1] + eps # Check if freq is strictly increasing after tweak d = np.diff(freq) if (d <= 0).any(): raise ValueError("freq cannot contain numbers that are too close " "(within eps * (fs/2): " "{}) to a repeated value".format(eps)) # Linearly interpolate the desired response on a uniform mesh `x`. x = np.linspace(0.0, nyq, nfreqs) fx = np.interp(x, freq, gain) # Adjust the phases of the coefficients so that the first `ntaps` of the # inverse FFT are the desired filter coefficients. shift = np.exp(-(numtaps - 1) / 2. * 1.j * np.pi * x / nyq) if ftype > 2: shift *= 1j fx2 = fx * shift # Use irfft to compute the inverse FFT. out_full = irfft(fx2) if window is not None: # Create the window to apply to the filter coefficients. from .windows import get_window wind = get_window(window, numtaps, fftbins=False) else: wind = 1 # Keep only the first `numtaps` coefficients in `out`, and multiply by # the window. out = out_full[:numtaps] * wind if ftype == 3: out[out.size // 2] = 0.0 return out def remez(numtaps, bands, desired, weight=None, Hz=_NoValue, type='bandpass', maxiter=25, grid_density=16, fs=None): """ Calculate the minimax optimal filter using the Remez exchange algorithm. Calculate the filter-coefficients for the finite impulse response (FIR) filter whose transfer function minimizes the maximum error between the desired gain and the realized gain in the specified frequency bands using the Remez exchange algorithm. Parameters ---------- numtaps : int The desired number of taps in the filter. The number of taps is the number of terms in the filter, or the filter order plus one. bands : array_like A monotonic sequence containing the band edges. All elements must be non-negative and less than half the sampling frequency as given by `fs`. desired : array_like A sequence half the size of bands containing the desired gain in each of the specified bands. weight : array_like, optional A relative weighting to give to each band region. The length of `weight` has to be half the length of `bands`. Hz : scalar, optional, deprecated The sampling frequency in Hz. Default is 1. .. deprecated:: 1.0.0 `remez` keyword argument `Hz` is deprecated in favour of `fs` and will be removed in SciPy 1.14.0. type : {'bandpass', 'differentiator', 'hilbert'}, optional The type of filter: * 'bandpass' : flat response in bands. This is the default. * 'differentiator' : frequency proportional response in bands. * 'hilbert' : filter with odd symmetry, that is, type III (for even order) or type IV (for odd order) linear phase filters. maxiter : int, optional Maximum number of iterations of the algorithm. Default is 25. grid_density : int, optional Grid density. The dense grid used in `remez` is of size ``(numtaps + 1) * grid_density``. Default is 16. fs : float, optional The sampling frequency of the signal. Default is 1. Returns ------- out : ndarray A rank-1 array containing the coefficients of the optimal (in a minimax sense) filter. See Also -------- firls firwin firwin2 minimum_phase References ---------- .. [1] J. H. McClellan and T. W. Parks, "A unified approach to the design of optimum FIR linear phase digital filters", IEEE Trans. Circuit Theory, vol. CT-20, pp. 697-701, 1973. .. [2] J. H. McClellan, T. W. Parks and L. R. Rabiner, "A Computer Program for Designing Optimum FIR Linear Phase Digital Filters", IEEE Trans. Audio Electroacoust., vol. AU-21, pp. 506-525, 1973. Examples -------- In these examples, `remez` is used to design low-pass, high-pass, band-pass and band-stop filters. The parameters that define each filter are the filter order, the band boundaries, the transition widths of the boundaries, the desired gains in each band, and the sampling frequency. We'll use a sample frequency of 22050 Hz in all the examples. In each example, the desired gain in each band is either 0 (for a stop band) or 1 (for a pass band). `freqz` is used to compute the frequency response of each filter, and the utility function ``plot_response`` defined below is used to plot the response. >>> import numpy as np >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> fs = 22050 # Sample rate, Hz >>> def plot_response(w, h, title): ... "Utility function to plot response functions" ... fig = plt.figure() ... ax = fig.add_subplot(111) ... ax.plot(w, 20*np.log10(np.abs(h))) ... ax.set_ylim(-40, 5) ... ax.grid(True) ... ax.set_xlabel('Frequency (Hz)') ... ax.set_ylabel('Gain (dB)') ... ax.set_title(title) The first example is a low-pass filter, with cutoff frequency 8 kHz. The filter length is 325, and the transition width from pass to stop is 100 Hz. >>> cutoff = 8000.0 # Desired cutoff frequency, Hz >>> trans_width = 100 # Width of transition from pass to stop, Hz >>> numtaps = 325 # Size of the FIR filter. >>> taps = signal.remez(numtaps, [0, cutoff, cutoff + trans_width, 0.5*fs], ... [1, 0], fs=fs) >>> w, h = signal.freqz(taps, [1], worN=2000, fs=fs) >>> plot_response(w, h, "Low-pass Filter") >>> plt.show() This example shows a high-pass filter: >>> cutoff = 2000.0 # Desired cutoff frequency, Hz >>> trans_width = 250 # Width of transition from pass to stop, Hz >>> numtaps = 125 # Size of the FIR filter. >>> taps = signal.remez(numtaps, [0, cutoff - trans_width, cutoff, 0.5*fs], ... [0, 1], fs=fs) >>> w, h = signal.freqz(taps, [1], worN=2000, fs=fs) >>> plot_response(w, h, "High-pass Filter") >>> plt.show() This example shows a band-pass filter with a pass-band from 2 kHz to 5 kHz. The transition width is 260 Hz and the length of the filter is 63, which is smaller than in the other examples: >>> band = [2000, 5000] # Desired pass band, Hz >>> trans_width = 260 # Width of transition from pass to stop, Hz >>> numtaps = 63 # Size of the FIR filter. >>> edges = [0, band[0] - trans_width, band[0], band[1], ... band[1] + trans_width, 0.5*fs] >>> taps = signal.remez(numtaps, edges, [0, 1, 0], fs=fs) >>> w, h = signal.freqz(taps, [1], worN=2000, fs=fs) >>> plot_response(w, h, "Band-pass Filter") >>> plt.show() The low order leads to higher ripple and less steep transitions. The next example shows a band-stop filter. >>> band = [6000, 8000] # Desired stop band, Hz >>> trans_width = 200 # Width of transition from pass to stop, Hz >>> numtaps = 175 # Size of the FIR filter. >>> edges = [0, band[0] - trans_width, band[0], band[1], ... band[1] + trans_width, 0.5*fs] >>> taps = signal.remez(numtaps, edges, [1, 0, 1], fs=fs) >>> w, h = signal.freqz(taps, [1], worN=2000, fs=fs) >>> plot_response(w, h, "Band-stop Filter") >>> plt.show() """ if Hz is _NoValue and fs is None: fs = 1.0 elif Hz is not _NoValue: if fs is not None: raise ValueError("Values cannot be given for both 'Hz' and 'fs'.") msg = ("'remez' keyword argument 'Hz' is deprecated in favour of 'fs'" " and will be removed in SciPy 1.12.0.") warnings.warn(msg, DeprecationWarning, stacklevel=2) fs = Hz # Convert type try: tnum = {'bandpass': 1, 'differentiator': 2, 'hilbert': 3}[type] except KeyError as e: raise ValueError("Type must be 'bandpass', 'differentiator', " "or 'hilbert'") from e # Convert weight if weight is None: weight = [1] * len(desired) bands = np.asarray(bands).copy() return _sigtools._remez(numtaps, bands, desired, weight, tnum, fs, maxiter, grid_density) def firls(numtaps, bands, desired, weight=None, nyq=_NoValue, fs=None): """ FIR filter design using least-squares error minimization. Calculate the filter coefficients for the linear-phase finite impulse response (FIR) filter which has the best approximation to the desired frequency response described by `bands` and `desired` in the least squares sense (i.e., the integral of the weighted mean-squared error within the specified bands is minimized). Parameters ---------- numtaps : int The number of taps in the FIR filter. `numtaps` must be odd. bands : array_like A monotonic nondecreasing sequence containing the band edges in Hz. All elements must be non-negative and less than or equal to the Nyquist frequency given by `nyq`. The bands are specified as frequency pairs, thus, if using a 1D array, its length must be even, e.g., `np.array([0, 1, 2, 3, 4, 5])`. Alternatively, the bands can be specified as an nx2 sized 2D array, where n is the number of bands, e.g, `np.array([[0, 1], [2, 3], [4, 5]])`. desired : array_like A sequence the same size as `bands` containing the desired gain at the start and end point of each band. weight : array_like, optional A relative weighting to give to each band region when solving the least squares problem. `weight` has to be half the size of `bands`. nyq : float, optional, deprecated This is the Nyquist frequency. Each frequency in `bands` must be between 0 and `nyq` (inclusive). Default is 1. .. deprecated:: 1.0.0 `firls` keyword argument `nyq` is deprecated in favour of `fs` and will be removed in SciPy 1.14.0. fs : float, optional The sampling frequency of the signal. Each frequency in `bands` must be between 0 and ``fs/2`` (inclusive). Default is 2. Returns ------- coeffs : ndarray Coefficients of the optimal (in a least squares sense) FIR filter. See Also -------- firwin firwin2 minimum_phase remez Notes ----- This implementation follows the algorithm given in [1]_. As noted there, least squares design has multiple advantages: 1. Optimal in a least-squares sense. 2. Simple, non-iterative method. 3. The general solution can obtained by solving a linear system of equations. 4. Allows the use of a frequency dependent weighting function. This function constructs a Type I linear phase FIR filter, which contains an odd number of `coeffs` satisfying for :math:`n < numtaps`: .. math:: coeffs(n) = coeffs(numtaps - 1 - n) The odd number of coefficients and filter symmetry avoid boundary conditions that could otherwise occur at the Nyquist and 0 frequencies (e.g., for Type II, III, or IV variants). .. versionadded:: 0.18 References ---------- .. [1] Ivan Selesnick, Linear-Phase Fir Filter Design By Least Squares. OpenStax CNX. Aug 9, 2005. http://cnx.org/contents/eb1ecb35-03a9-4610-ba87-41cd771c95f2@7 Examples -------- We want to construct a band-pass filter. Note that the behavior in the frequency ranges between our stop bands and pass bands is unspecified, and thus may overshoot depending on the parameters of our filter: >>> import numpy as np >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> fig, axs = plt.subplots(2) >>> fs = 10.0 # Hz >>> desired = (0, 0, 1, 1, 0, 0) >>> for bi, bands in enumerate(((0, 1, 2, 3, 4, 5), (0, 1, 2, 4, 4.5, 5))): ... fir_firls = signal.firls(73, bands, desired, fs=fs) ... fir_remez = signal.remez(73, bands, desired[::2], fs=fs) ... fir_firwin2 = signal.firwin2(73, bands, desired, fs=fs) ... hs = list() ... ax = axs[bi] ... for fir in (fir_firls, fir_remez, fir_firwin2): ... freq, response = signal.freqz(fir) ... hs.append(ax.semilogy(0.5*fs*freq/np.pi, np.abs(response))[0]) ... for band, gains in zip(zip(bands[::2], bands[1::2]), ... zip(desired[::2], desired[1::2])): ... ax.semilogy(band, np.maximum(gains, 1e-7), 'k--', linewidth=2) ... if bi == 0: ... ax.legend(hs, ('firls', 'remez', 'firwin2'), ... loc='lower center', frameon=False) ... else: ... ax.set_xlabel('Frequency (Hz)') ... ax.grid(True) ... ax.set(title='Band-pass %d-%d Hz' % bands[2:4], ylabel='Magnitude') ... >>> fig.tight_layout() >>> plt.show() """ # noqa nyq = 0.5 * _get_fs(fs, nyq) numtaps = int(numtaps) if numtaps % 2 == 0 or numtaps < 1: raise ValueError("numtaps must be odd and >= 1") M = (numtaps-1) // 2 # normalize bands 0->1 and make it 2 columns nyq = float(nyq) if nyq <= 0: raise ValueError('nyq must be positive, got %s <= 0.' % nyq) bands = np.asarray(bands).flatten() / nyq if len(bands) % 2 != 0: raise ValueError("bands must contain frequency pairs.") if (bands < 0).any() or (bands > 1).any(): raise ValueError("bands must be between 0 and 1 relative to Nyquist") bands.shape = (-1, 2) # check remaining params desired = np.asarray(desired).flatten() if bands.size != desired.size: raise ValueError("desired must have one entry per frequency, got %s " "gains for %s frequencies." % (desired.size, bands.size)) desired.shape = (-1, 2) if (np.diff(bands) <= 0).any() or (np.diff(bands[:, 0]) < 0).any(): raise ValueError("bands must be monotonically nondecreasing and have " "width > 0.") if (bands[:-1, 1] > bands[1:, 0]).any(): raise ValueError("bands must not overlap.") if (desired < 0).any(): raise ValueError("desired must be non-negative.") if weight is None: weight = np.ones(len(desired)) weight = np.asarray(weight).flatten() if len(weight) != len(desired): raise ValueError("weight must be the same size as the number of " "band pairs ({}).".format(len(bands))) if (weight < 0).any(): raise ValueError("weight must be non-negative.") # Set up the linear matrix equation to be solved, Qa = b # We can express Q(k,n) = 0.5 Q1(k,n) + 0.5 Q2(k,n) # where Q1(k,n)=q(k-n) and Q2(k,n)=q(k+n), i.e. a Toeplitz plus Hankel. # We omit the factor of 0.5 above, instead adding it during coefficient # calculation. # We also omit the 1/π from both Q and b equations, as they cancel # during solving. # We have that: # q(n) = 1/π ∫W(ω)cos(nω)dω (over 0->π) # Using our nomalization ω=πf and with a constant weight W over each # interval f1->f2 we get: # q(n) = W∫cos(πnf)df (0->1) = Wf sin(πnf)/πnf # integrated over each f1->f2 pair (i.e., value at f2 - value at f1). n = np.arange(numtaps)[:, np.newaxis, np.newaxis] q = np.dot(np.diff(np.sinc(bands * n) * bands, axis=2)[:, :, 0], weight) # Now we assemble our sum of Toeplitz and Hankel Q1 = toeplitz(q[:M+1]) Q2 = hankel(q[:M+1], q[M:]) Q = Q1 + Q2 # Now for b(n) we have that: # b(n) = 1/π ∫ W(ω)D(ω)cos(nω)dω (over 0->π) # Using our normalization ω=πf and with a constant weight W over each # interval and a linear term for D(ω) we get (over each f1->f2 interval): # b(n) = W ∫ (mf+c)cos(πnf)df # = f(mf+c)sin(πnf)/πnf + mf**2 cos(nπf)/(πnf)**2 # integrated over each f1->f2 pair (i.e., value at f2 - value at f1). n = n[:M + 1] # only need this many coefficients here # Choose m and c such that we are at the start and end weights m = (np.diff(desired, axis=1) / np.diff(bands, axis=1)) c = desired[:, [0]] - bands[:, [0]] * m b = bands * (m*bands + c) * np.sinc(bands * n) # Use L'Hospital's rule here for cos(nπf)/(πnf)**2 @ n=0 b[0] -= m * bands * bands / 2. b[1:] += m * np.cos(n[1:] * np.pi * bands) / (np.pi * n[1:]) ** 2 b = np.dot(np.diff(b, axis=2)[:, :, 0], weight) # Now we can solve the equation try: # try the fast way with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') a = solve(Q, b, assume_a="pos", check_finite=False) for ww in w: if (ww.category == LinAlgWarning and str(ww.message).startswith('Ill-conditioned matrix')): raise LinAlgError(str(ww.message)) except LinAlgError: # in case Q is rank deficient # This is faster than pinvh, even though we don't explicitly use # the symmetry here. gelsy was faster than gelsd and gelss in # some non-exhaustive tests. a = lstsq(Q, b, lapack_driver='gelsy')[0] # make coefficients symmetric (linear phase) coeffs = np.hstack((a[:0:-1], 2 * a[0], a[1:])) return coeffs def _dhtm(mag): """Compute the modified 1-D discrete Hilbert transform Parameters ---------- mag : ndarray The magnitude spectrum. Should be 1-D with an even length, and preferably a fast length for FFT/IFFT. """ # Adapted based on code by Niranjan Damera-Venkata, # Brian L. Evans and Shawn R. McCaslin (see refs for `minimum_phase`) sig = np.zeros(len(mag)) # Leave Nyquist and DC at 0, knowing np.abs(fftfreq(N)[midpt]) == 0.5 midpt = len(mag) // 2 sig[1:midpt] = 1 sig[midpt+1:] = -1 # eventually if we want to support complex filters, we will need a # np.abs() on the mag inside the log, and should remove the .real recon = ifft(mag * np.exp(fft(sig * ifft(np.log(mag))))).real return recon def minimum_phase(h, method='homomorphic', n_fft=None): """Convert a linear-phase FIR filter to minimum phase Parameters ---------- h : array Linear-phase FIR filter coefficients. method : {'hilbert', 'homomorphic'} The method to use: 'homomorphic' (default) This method [4]_ [5]_ works best with filters with an odd number of taps, and the resulting minimum phase filter will have a magnitude response that approximates the square root of the original filter's magnitude response. 'hilbert' This method [1]_ is designed to be used with equiripple filters (e.g., from `remez`) with unity or zero gain regions. n_fft : int The number of points to use for the FFT. Should be at least a few times larger than the signal length (see Notes). Returns ------- h_minimum : array The minimum-phase version of the filter, with length ``(length(h) + 1) // 2``. See Also -------- firwin firwin2 remez Notes ----- Both the Hilbert [1]_ or homomorphic [4]_ [5]_ methods require selection of an FFT length to estimate the complex cepstrum of the filter. In the case of the Hilbert method, the deviation from the ideal spectrum ``epsilon`` is related to the number of stopband zeros ``n_stop`` and FFT length ``n_fft`` as:: epsilon = 2. * n_stop / n_fft For example, with 100 stopband zeros and a FFT length of 2048, ``epsilon = 0.0976``. If we conservatively assume that the number of stopband zeros is one less than the filter length, we can take the FFT length to be the next power of 2 that satisfies ``epsilon=0.01`` as:: n_fft = 2 ** int(np.ceil(np.log2(2 * (len(h) - 1) / 0.01))) This gives reasonable results for both the Hilbert and homomorphic methods, and gives the value used when ``n_fft=None``. Alternative implementations exist for creating minimum-phase filters, including zero inversion [2]_ and spectral factorization [3]_ [4]_. For more information, see: http://dspguru.com/dsp/howtos/how-to-design-minimum-phase-fir-filters References ---------- .. [1] N. Damera-Venkata and B. L. Evans, "Optimal design of real and complex minimum phase digital FIR filters," Acoustics, Speech, and Signal Processing, 1999. Proceedings., 1999 IEEE International Conference on, Phoenix, AZ, 1999, pp. 1145-1148 vol.3. :doi:`10.1109/ICASSP.1999.756179` .. [2] X. Chen and T. W. Parks, "Design of optimal minimum phase FIR filters by direct factorization," Signal Processing, vol. 10, no. 4, pp. 369-383, Jun. 1986. .. [3] T. Saramaki, "Finite Impulse Response Filter Design," in Handbook for Digital Signal Processing, chapter 4, New York: Wiley-Interscience, 1993. .. [4] J. S. Lim, Advanced Topics in Signal Processing. Englewood Cliffs, N.J.: Prentice Hall, 1988. .. [5] A. V. Oppenheim, R. W. Schafer, and J. R. Buck, "Discrete-Time Signal Processing," 2nd edition. Upper Saddle River, N.J.: Prentice Hall, 1999. Examples -------- Create an optimal linear-phase filter, then convert it to minimum phase: >>> import numpy as np >>> from scipy.signal import remez, minimum_phase, freqz, group_delay >>> import matplotlib.pyplot as plt >>> freq = [0, 0.2, 0.3, 1.0] >>> desired = [1, 0] >>> h_linear = remez(151, freq, desired, fs=2.) Convert it to minimum phase: >>> h_min_hom = minimum_phase(h_linear, method='homomorphic') >>> h_min_hil = minimum_phase(h_linear, method='hilbert') Compare the three filters: >>> fig, axs = plt.subplots(4, figsize=(4, 8)) >>> for h, style, color in zip((h_linear, h_min_hom, h_min_hil), ... ('-', '-', '--'), ('k', 'r', 'c')): ... w, H = freqz(h) ... w, gd = group_delay((h, 1)) ... w /= np.pi ... axs[0].plot(h, color=color, linestyle=style) ... axs[1].plot(w, np.abs(H), color=color, linestyle=style) ... axs[2].plot(w, 20 * np.log10(np.abs(H)), color=color, linestyle=style) ... axs[3].plot(w, gd, color=color, linestyle=style) >>> for ax in axs: ... ax.grid(True, color='0.5') ... ax.fill_between(freq[1:3], *ax.get_ylim(), color='#ffeeaa', zorder=1) >>> axs[0].set(xlim=[0, len(h_linear) - 1], ylabel='Amplitude', xlabel='Samples') >>> axs[1].legend(['Linear', 'Min-Hom', 'Min-Hil'], title='Phase') >>> for ax, ylim in zip(axs[1:], ([0, 1.1], [-150, 10], [-60, 60])): ... ax.set(xlim=[0, 1], ylim=ylim, xlabel='Frequency') >>> axs[1].set(ylabel='Magnitude') >>> axs[2].set(ylabel='Magnitude (dB)') >>> axs[3].set(ylabel='Group delay') >>> plt.tight_layout() """ # noqa h = np.asarray(h) if np.iscomplexobj(h): raise ValueError('Complex filters not supported') if h.ndim != 1 or h.size <= 2: raise ValueError('h must be 1-D and at least 2 samples long') n_half = len(h) // 2 if not np.allclose(h[-n_half:][::-1], h[:n_half]): warnings.warn('h does not appear to by symmetric, conversion may ' 'fail', RuntimeWarning) if not isinstance(method, str) or method not in \ ('homomorphic', 'hilbert',): raise ValueError('method must be "homomorphic" or "hilbert", got %r' % (method,)) if n_fft is None: n_fft = 2 ** int(np.ceil(np.log2(2 * (len(h) - 1) / 0.01))) n_fft = int(n_fft) if n_fft < len(h): raise ValueError('n_fft must be at least len(h)==%s' % len(h)) if method == 'hilbert': w = np.arange(n_fft) * (2 * np.pi / n_fft * n_half) H = np.real(fft(h, n_fft) * np.exp(1j * w)) dp = max(H) - 1 ds = 0 - min(H) S = 4. / (np.sqrt(1+dp+ds) + np.sqrt(1-dp+ds)) ** 2 H += ds H *= S H = np.sqrt(H, out=H) H += 1e-10 # ensure that the log does not explode h_minimum = _dhtm(H) else: # method == 'homomorphic' # zero-pad; calculate the DFT h_temp = np.abs(fft(h, n_fft)) # take 0.25*log(|H|**2) = 0.5*log(|H|) h_temp += 1e-7 * h_temp[h_temp > 0].min() # don't let log blow up np.log(h_temp, out=h_temp) h_temp *= 0.5 # IDFT h_temp = ifft(h_temp).real # multiply pointwise by the homomorphic filter # lmin[n] = 2u[n] - d[n] win = np.zeros(n_fft) win[0] = 1 stop = (len(h) + 1) // 2 win[1:stop] = 2 if len(h) % 2: win[stop] = 1 h_temp *= win h_temp = ifft(np.exp(fft(h_temp))) h_minimum = h_temp.real n_out = n_half + len(h) % 2 return h_minimum[:n_out]
49,160
36.816154
86
py
scipy
scipy-main/scipy/signal/signaltools.py
# This file is not meant for public use and will be removed in SciPy v2.0.0. # Use the `scipy.signal` namespace for importing the functions # included below. import warnings from . import _signaltools __all__ = [ # noqa: F822 'correlate', 'correlation_lags', 'correlate2d', 'convolve', 'convolve2d', 'fftconvolve', 'oaconvolve', 'order_filter', 'medfilt', 'medfilt2d', 'wiener', 'lfilter', 'lfiltic', 'sosfilt', 'deconvolve', 'hilbert', 'hilbert2', 'cmplx_sort', 'unique_roots', 'invres', 'invresz', 'residue', 'residuez', 'resample', 'resample_poly', 'detrend', 'lfilter_zi', 'sosfilt_zi', 'sosfiltfilt', 'choose_conv_method', 'filtfilt', 'decimate', 'vectorstrength', 'timeit', 'cKDTree', 'dlti', 'upfirdn', 'linalg', 'sp_fft', 'lambertw', 'get_window', 'axis_slice', 'axis_reverse', 'odd_ext', 'even_ext', 'const_ext', 'cheby1', 'firwin' ] def __dir__(): return __all__ def __getattr__(name): if name not in __all__: raise AttributeError( "scipy.signal.signaltools is deprecated and has no attribute " f"{name}. Try looking in scipy.signal instead.") warnings.warn(f"Please use `{name}` from the `scipy.signal` namespace, " "the `scipy.signal.signaltools` namespace is deprecated.", category=DeprecationWarning, stacklevel=2) return getattr(_signaltools, name)
1,401
35.894737
76
py
scipy
scipy-main/scipy/signal/_peak_finding.py
""" Functions for identifying peaks in signals. """ import math import numpy as np from scipy.signal._wavelets import cwt, ricker from scipy.stats import scoreatpercentile from ._peak_finding_utils import ( _local_maxima_1d, _select_by_peak_distance, _peak_prominences, _peak_widths ) __all__ = ['argrelmin', 'argrelmax', 'argrelextrema', 'peak_prominences', 'peak_widths', 'find_peaks', 'find_peaks_cwt'] def _boolrelextrema(data, comparator, axis=0, order=1, mode='clip'): """ Calculate the relative extrema of `data`. Relative extrema are calculated by finding locations where ``comparator(data[n], data[n+1:n+order+1])`` is True. Parameters ---------- data : ndarray Array in which to find the relative extrema. comparator : callable Function to use to compare two data points. Should take two arrays as arguments. axis : int, optional Axis over which to select from `data`. Default is 0. order : int, optional How many points on each side to use for the comparison to consider ``comparator(n,n+x)`` to be True. mode : str, optional How the edges of the vector are treated. 'wrap' (wrap around) or 'clip' (treat overflow as the same as the last (or first) element). Default 'clip'. See numpy.take. Returns ------- extrema : ndarray Boolean array of the same shape as `data` that is True at an extrema, False otherwise. See also -------- argrelmax, argrelmin Examples -------- >>> import numpy as np >>> testdata = np.array([1,2,3,2,1]) >>> _boolrelextrema(testdata, np.greater, axis=0) array([False, False, True, False, False], dtype=bool) """ if (int(order) != order) or (order < 1): raise ValueError('Order must be an int >= 1') datalen = data.shape[axis] locs = np.arange(0, datalen) results = np.ones(data.shape, dtype=bool) main = data.take(locs, axis=axis, mode=mode) for shift in range(1, order + 1): plus = data.take(locs + shift, axis=axis, mode=mode) minus = data.take(locs - shift, axis=axis, mode=mode) results &= comparator(main, plus) results &= comparator(main, minus) if ~results.any(): return results return results def argrelmin(data, axis=0, order=1, mode='clip'): """ Calculate the relative minima of `data`. Parameters ---------- data : ndarray Array in which to find the relative minima. axis : int, optional Axis over which to select from `data`. Default is 0. order : int, optional How many points on each side to use for the comparison to consider ``comparator(n, n+x)`` to be True. mode : str, optional How the edges of the vector are treated. Available options are 'wrap' (wrap around) or 'clip' (treat overflow as the same as the last (or first) element). Default 'clip'. See numpy.take. Returns ------- extrema : tuple of ndarrays Indices of the minima in arrays of integers. ``extrema[k]`` is the array of indices of axis `k` of `data`. Note that the return value is a tuple even when `data` is 1-D. See Also -------- argrelextrema, argrelmax, find_peaks Notes ----- This function uses `argrelextrema` with np.less as comparator. Therefore, it requires a strict inequality on both sides of a value to consider it a minimum. This means flat minima (more than one sample wide) are not detected. In case of 1-D `data` `find_peaks` can be used to detect all local minima, including flat ones, by calling it with negated `data`. .. versionadded:: 0.11.0 Examples -------- >>> import numpy as np >>> from scipy.signal import argrelmin >>> x = np.array([2, 1, 2, 3, 2, 0, 1, 0]) >>> argrelmin(x) (array([1, 5]),) >>> y = np.array([[1, 2, 1, 2], ... [2, 2, 0, 0], ... [5, 3, 4, 4]]) ... >>> argrelmin(y, axis=1) (array([0, 2]), array([2, 1])) """ return argrelextrema(data, np.less, axis, order, mode) def argrelmax(data, axis=0, order=1, mode='clip'): """ Calculate the relative maxima of `data`. Parameters ---------- data : ndarray Array in which to find the relative maxima. axis : int, optional Axis over which to select from `data`. Default is 0. order : int, optional How many points on each side to use for the comparison to consider ``comparator(n, n+x)`` to be True. mode : str, optional How the edges of the vector are treated. Available options are 'wrap' (wrap around) or 'clip' (treat overflow as the same as the last (or first) element). Default 'clip'. See `numpy.take`. Returns ------- extrema : tuple of ndarrays Indices of the maxima in arrays of integers. ``extrema[k]`` is the array of indices of axis `k` of `data`. Note that the return value is a tuple even when `data` is 1-D. See Also -------- argrelextrema, argrelmin, find_peaks Notes ----- This function uses `argrelextrema` with np.greater as comparator. Therefore, it requires a strict inequality on both sides of a value to consider it a maximum. This means flat maxima (more than one sample wide) are not detected. In case of 1-D `data` `find_peaks` can be used to detect all local maxima, including flat ones. .. versionadded:: 0.11.0 Examples -------- >>> import numpy as np >>> from scipy.signal import argrelmax >>> x = np.array([2, 1, 2, 3, 2, 0, 1, 0]) >>> argrelmax(x) (array([3, 6]),) >>> y = np.array([[1, 2, 1, 2], ... [2, 2, 0, 0], ... [5, 3, 4, 4]]) ... >>> argrelmax(y, axis=1) (array([0]), array([1])) """ return argrelextrema(data, np.greater, axis, order, mode) def argrelextrema(data, comparator, axis=0, order=1, mode='clip'): """ Calculate the relative extrema of `data`. Parameters ---------- data : ndarray Array in which to find the relative extrema. comparator : callable Function to use to compare two data points. Should take two arrays as arguments. axis : int, optional Axis over which to select from `data`. Default is 0. order : int, optional How many points on each side to use for the comparison to consider ``comparator(n, n+x)`` to be True. mode : str, optional How the edges of the vector are treated. 'wrap' (wrap around) or 'clip' (treat overflow as the same as the last (or first) element). Default is 'clip'. See `numpy.take`. Returns ------- extrema : tuple of ndarrays Indices of the maxima in arrays of integers. ``extrema[k]`` is the array of indices of axis `k` of `data`. Note that the return value is a tuple even when `data` is 1-D. See Also -------- argrelmin, argrelmax Notes ----- .. versionadded:: 0.11.0 Examples -------- >>> import numpy as np >>> from scipy.signal import argrelextrema >>> x = np.array([2, 1, 2, 3, 2, 0, 1, 0]) >>> argrelextrema(x, np.greater) (array([3, 6]),) >>> y = np.array([[1, 2, 1, 2], ... [2, 2, 0, 0], ... [5, 3, 4, 4]]) ... >>> argrelextrema(y, np.less, axis=1) (array([0, 2]), array([2, 1])) """ results = _boolrelextrema(data, comparator, axis, order, mode) return np.nonzero(results) def _arg_x_as_expected(value): """Ensure argument `x` is a 1-D C-contiguous array of dtype('float64'). Used in `find_peaks`, `peak_prominences` and `peak_widths` to make `x` compatible with the signature of the wrapped Cython functions. Returns ------- value : ndarray A 1-D C-contiguous array with dtype('float64'). """ value = np.asarray(value, order='C', dtype=np.float64) if value.ndim != 1: raise ValueError('`x` must be a 1-D array') return value def _arg_peaks_as_expected(value): """Ensure argument `peaks` is a 1-D C-contiguous array of dtype('intp'). Used in `peak_prominences` and `peak_widths` to make `peaks` compatible with the signature of the wrapped Cython functions. Returns ------- value : ndarray A 1-D C-contiguous array with dtype('intp'). """ value = np.asarray(value) if value.size == 0: # Empty arrays default to np.float64 but are valid input value = np.array([], dtype=np.intp) try: # Safely convert to C-contiguous array of type np.intp value = value.astype(np.intp, order='C', casting='safe', subok=False, copy=False) except TypeError as e: raise TypeError("cannot safely cast `peaks` to dtype('intp')") from e if value.ndim != 1: raise ValueError('`peaks` must be a 1-D array') return value def _arg_wlen_as_expected(value): """Ensure argument `wlen` is of type `np.intp` and larger than 1. Used in `peak_prominences` and `peak_widths`. Returns ------- value : np.intp The original `value` rounded up to an integer or -1 if `value` was None. """ if value is None: # _peak_prominences expects an intp; -1 signals that no value was # supplied by the user value = -1 elif 1 < value: # Round up to a positive integer if not np.can_cast(value, np.intp, "safe"): value = math.ceil(value) value = np.intp(value) else: raise ValueError('`wlen` must be larger than 1, was {}' .format(value)) return value def peak_prominences(x, peaks, wlen=None): """ Calculate the prominence of each peak in a signal. The prominence of a peak measures how much a peak stands out from the surrounding baseline of the signal and is defined as the vertical distance between the peak and its lowest contour line. Parameters ---------- x : sequence A signal with peaks. peaks : sequence Indices of peaks in `x`. wlen : int, optional A window length in samples that optionally limits the evaluated area for each peak to a subset of `x`. The peak is always placed in the middle of the window therefore the given length is rounded up to the next odd integer. This parameter can speed up the calculation (see Notes). Returns ------- prominences : ndarray The calculated prominences for each peak in `peaks`. left_bases, right_bases : ndarray The peaks' bases as indices in `x` to the left and right of each peak. The higher base of each pair is a peak's lowest contour line. Raises ------ ValueError If a value in `peaks` is an invalid index for `x`. Warns ----- PeakPropertyWarning For indices in `peaks` that don't point to valid local maxima in `x`, the returned prominence will be 0 and this warning is raised. This also happens if `wlen` is smaller than the plateau size of a peak. Warnings -------- This function may return unexpected results for data containing NaNs. To avoid this, NaNs should either be removed or replaced. See Also -------- find_peaks Find peaks inside a signal based on peak properties. peak_widths Calculate the width of peaks. Notes ----- Strategy to compute a peak's prominence: 1. Extend a horizontal line from the current peak to the left and right until the line either reaches the window border (see `wlen`) or intersects the signal again at the slope of a higher peak. An intersection with a peak of the same height is ignored. 2. On each side find the minimal signal value within the interval defined above. These points are the peak's bases. 3. The higher one of the two bases marks the peak's lowest contour line. The prominence can then be calculated as the vertical difference between the peaks height itself and its lowest contour line. Searching for the peak's bases can be slow for large `x` with periodic behavior because large chunks or even the full signal need to be evaluated for the first algorithmic step. This evaluation area can be limited with the parameter `wlen` which restricts the algorithm to a window around the current peak and can shorten the calculation time if the window length is short in relation to `x`. However, this may stop the algorithm from finding the true global contour line if the peak's true bases are outside this window. Instead, a higher contour line is found within the restricted window leading to a smaller calculated prominence. In practice, this is only relevant for the highest set of peaks in `x`. This behavior may even be used intentionally to calculate "local" prominences. .. versionadded:: 1.1.0 References ---------- .. [1] Wikipedia Article for Topographic Prominence: https://en.wikipedia.org/wiki/Topographic_prominence Examples -------- >>> import numpy as np >>> from scipy.signal import find_peaks, peak_prominences >>> import matplotlib.pyplot as plt Create a test signal with two overlayed harmonics >>> x = np.linspace(0, 6 * np.pi, 1000) >>> x = np.sin(x) + 0.6 * np.sin(2.6 * x) Find all peaks and calculate prominences >>> peaks, _ = find_peaks(x) >>> prominences = peak_prominences(x, peaks)[0] >>> prominences array([1.24159486, 0.47840168, 0.28470524, 3.10716793, 0.284603 , 0.47822491, 2.48340261, 0.47822491]) Calculate the height of each peak's contour line and plot the results >>> contour_heights = x[peaks] - prominences >>> plt.plot(x) >>> plt.plot(peaks, x[peaks], "x") >>> plt.vlines(x=peaks, ymin=contour_heights, ymax=x[peaks]) >>> plt.show() Let's evaluate a second example that demonstrates several edge cases for one peak at index 5. >>> x = np.array([0, 1, 0, 3, 1, 3, 0, 4, 0]) >>> peaks = np.array([5]) >>> plt.plot(x) >>> plt.plot(peaks, x[peaks], "x") >>> plt.show() >>> peak_prominences(x, peaks) # -> (prominences, left_bases, right_bases) (array([3.]), array([2]), array([6])) Note how the peak at index 3 of the same height is not considered as a border while searching for the left base. Instead, two minima at 0 and 2 are found in which case the one closer to the evaluated peak is always chosen. On the right side, however, the base must be placed at 6 because the higher peak represents the right border to the evaluated area. >>> peak_prominences(x, peaks, wlen=3.1) (array([2.]), array([4]), array([6])) Here, we restricted the algorithm to a window from 3 to 7 (the length is 5 samples because `wlen` was rounded up to the next odd integer). Thus, the only two candidates in the evaluated area are the two neighboring samples and a smaller prominence is calculated. """ x = _arg_x_as_expected(x) peaks = _arg_peaks_as_expected(peaks) wlen = _arg_wlen_as_expected(wlen) return _peak_prominences(x, peaks, wlen) def peak_widths(x, peaks, rel_height=0.5, prominence_data=None, wlen=None): """ Calculate the width of each peak in a signal. This function calculates the width of a peak in samples at a relative distance to the peak's height and prominence. Parameters ---------- x : sequence A signal with peaks. peaks : sequence Indices of peaks in `x`. rel_height : float, optional Chooses the relative height at which the peak width is measured as a percentage of its prominence. 1.0 calculates the width of the peak at its lowest contour line while 0.5 evaluates at half the prominence height. Must be at least 0. See notes for further explanation. prominence_data : tuple, optional A tuple of three arrays matching the output of `peak_prominences` when called with the same arguments `x` and `peaks`. This data are calculated internally if not provided. wlen : int, optional A window length in samples passed to `peak_prominences` as an optional argument for internal calculation of `prominence_data`. This argument is ignored if `prominence_data` is given. Returns ------- widths : ndarray The widths for each peak in samples. width_heights : ndarray The height of the contour lines at which the `widths` where evaluated. left_ips, right_ips : ndarray Interpolated positions of left and right intersection points of a horizontal line at the respective evaluation height. Raises ------ ValueError If `prominence_data` is supplied but doesn't satisfy the condition ``0 <= left_base <= peak <= right_base < x.shape[0]`` for each peak, has the wrong dtype, is not C-contiguous or does not have the same shape. Warns ----- PeakPropertyWarning Raised if any calculated width is 0. This may stem from the supplied `prominence_data` or if `rel_height` is set to 0. Warnings -------- This function may return unexpected results for data containing NaNs. To avoid this, NaNs should either be removed or replaced. See Also -------- find_peaks Find peaks inside a signal based on peak properties. peak_prominences Calculate the prominence of peaks. Notes ----- The basic algorithm to calculate a peak's width is as follows: * Calculate the evaluation height :math:`h_{eval}` with the formula :math:`h_{eval} = h_{Peak} - P \\cdot R`, where :math:`h_{Peak}` is the height of the peak itself, :math:`P` is the peak's prominence and :math:`R` a positive ratio specified with the argument `rel_height`. * Draw a horizontal line at the evaluation height to both sides, starting at the peak's current vertical position until the lines either intersect a slope, the signal border or cross the vertical position of the peak's base (see `peak_prominences` for an definition). For the first case, intersection with the signal, the true intersection point is estimated with linear interpolation. * Calculate the width as the horizontal distance between the chosen endpoints on both sides. As a consequence of this the maximal possible width for each peak is the horizontal distance between its bases. As shown above to calculate a peak's width its prominence and bases must be known. You can supply these yourself with the argument `prominence_data`. Otherwise, they are internally calculated (see `peak_prominences`). .. versionadded:: 1.1.0 Examples -------- >>> import numpy as np >>> from scipy.signal import chirp, find_peaks, peak_widths >>> import matplotlib.pyplot as plt Create a test signal with two overlayed harmonics >>> x = np.linspace(0, 6 * np.pi, 1000) >>> x = np.sin(x) + 0.6 * np.sin(2.6 * x) Find all peaks and calculate their widths at the relative height of 0.5 (contour line at half the prominence height) and 1 (at the lowest contour line at full prominence height). >>> peaks, _ = find_peaks(x) >>> results_half = peak_widths(x, peaks, rel_height=0.5) >>> results_half[0] # widths array([ 64.25172825, 41.29465463, 35.46943289, 104.71586081, 35.46729324, 41.30429622, 181.93835853, 45.37078546]) >>> results_full = peak_widths(x, peaks, rel_height=1) >>> results_full[0] # widths array([181.9396084 , 72.99284945, 61.28657872, 373.84622694, 61.78404617, 72.48822812, 253.09161876, 79.36860878]) Plot signal, peaks and contour lines at which the widths where calculated >>> plt.plot(x) >>> plt.plot(peaks, x[peaks], "x") >>> plt.hlines(*results_half[1:], color="C2") >>> plt.hlines(*results_full[1:], color="C3") >>> plt.show() """ x = _arg_x_as_expected(x) peaks = _arg_peaks_as_expected(peaks) if prominence_data is None: # Calculate prominence if not supplied and use wlen if supplied. wlen = _arg_wlen_as_expected(wlen) prominence_data = _peak_prominences(x, peaks, wlen) return _peak_widths(x, peaks, rel_height, *prominence_data) def _unpack_condition_args(interval, x, peaks): """ Parse condition arguments for `find_peaks`. Parameters ---------- interval : number or ndarray or sequence Either a number or ndarray or a 2-element sequence of the former. The first value is always interpreted as `imin` and the second, if supplied, as `imax`. x : ndarray The signal with `peaks`. peaks : ndarray An array with indices used to reduce `imin` and / or `imax` if those are arrays. Returns ------- imin, imax : number or ndarray or None Minimal and maximal value in `argument`. Raises ------ ValueError : If interval border is given as array and its size does not match the size of `x`. Notes ----- .. versionadded:: 1.1.0 """ try: imin, imax = interval except (TypeError, ValueError): imin, imax = (interval, None) # Reduce arrays if arrays if isinstance(imin, np.ndarray): if imin.size != x.size: raise ValueError('array size of lower interval border must match x') imin = imin[peaks] if isinstance(imax, np.ndarray): if imax.size != x.size: raise ValueError('array size of upper interval border must match x') imax = imax[peaks] return imin, imax def _select_by_property(peak_properties, pmin, pmax): """ Evaluate where the generic property of peaks confirms to an interval. Parameters ---------- peak_properties : ndarray An array with properties for each peak. pmin : None or number or ndarray Lower interval boundary for `peak_properties`. ``None`` is interpreted as an open border. pmax : None or number or ndarray Upper interval boundary for `peak_properties`. ``None`` is interpreted as an open border. Returns ------- keep : bool A boolean mask evaluating to true where `peak_properties` confirms to the interval. See Also -------- find_peaks Notes ----- .. versionadded:: 1.1.0 """ keep = np.ones(peak_properties.size, dtype=bool) if pmin is not None: keep &= (pmin <= peak_properties) if pmax is not None: keep &= (peak_properties <= pmax) return keep def _select_by_peak_threshold(x, peaks, tmin, tmax): """ Evaluate which peaks fulfill the threshold condition. Parameters ---------- x : ndarray A 1-D array which is indexable by `peaks`. peaks : ndarray Indices of peaks in `x`. tmin, tmax : scalar or ndarray or None Minimal and / or maximal required thresholds. If supplied as ndarrays their size must match `peaks`. ``None`` is interpreted as an open border. Returns ------- keep : bool A boolean mask evaluating to true where `peaks` fulfill the threshold condition. left_thresholds, right_thresholds : ndarray Array matching `peak` containing the thresholds of each peak on both sides. Notes ----- .. versionadded:: 1.1.0 """ # Stack thresholds on both sides to make min / max operations easier: # tmin is compared with the smaller, and tmax with the greater thresold to # each peak's side stacked_thresholds = np.vstack([x[peaks] - x[peaks - 1], x[peaks] - x[peaks + 1]]) keep = np.ones(peaks.size, dtype=bool) if tmin is not None: min_thresholds = np.min(stacked_thresholds, axis=0) keep &= (tmin <= min_thresholds) if tmax is not None: max_thresholds = np.max(stacked_thresholds, axis=0) keep &= (max_thresholds <= tmax) return keep, stacked_thresholds[0], stacked_thresholds[1] def find_peaks(x, height=None, threshold=None, distance=None, prominence=None, width=None, wlen=None, rel_height=0.5, plateau_size=None): """ Find peaks inside a signal based on peak properties. This function takes a 1-D array and finds all local maxima by simple comparison of neighboring values. Optionally, a subset of these peaks can be selected by specifying conditions for a peak's properties. Parameters ---------- x : sequence A signal with peaks. height : number or ndarray or sequence, optional Required height of peaks. Either a number, ``None``, an array matching `x` or a 2-element sequence of the former. The first element is always interpreted as the minimal and the second, if supplied, as the maximal required height. threshold : number or ndarray or sequence, optional Required threshold of peaks, the vertical distance to its neighboring samples. Either a number, ``None``, an array matching `x` or a 2-element sequence of the former. The first element is always interpreted as the minimal and the second, if supplied, as the maximal required threshold. distance : number, optional Required minimal horizontal distance (>= 1) in samples between neighbouring peaks. Smaller peaks are removed first until the condition is fulfilled for all remaining peaks. prominence : number or ndarray or sequence, optional Required prominence of peaks. Either a number, ``None``, an array matching `x` or a 2-element sequence of the former. The first element is always interpreted as the minimal and the second, if supplied, as the maximal required prominence. width : number or ndarray or sequence, optional Required width of peaks in samples. Either a number, ``None``, an array matching `x` or a 2-element sequence of the former. The first element is always interpreted as the minimal and the second, if supplied, as the maximal required width. wlen : int, optional Used for calculation of the peaks prominences, thus it is only used if one of the arguments `prominence` or `width` is given. See argument `wlen` in `peak_prominences` for a full description of its effects. rel_height : float, optional Used for calculation of the peaks width, thus it is only used if `width` is given. See argument `rel_height` in `peak_widths` for a full description of its effects. plateau_size : number or ndarray or sequence, optional Required size of the flat top of peaks in samples. Either a number, ``None``, an array matching `x` or a 2-element sequence of the former. The first element is always interpreted as the minimal and the second, if supplied as the maximal required plateau size. .. versionadded:: 1.2.0 Returns ------- peaks : ndarray Indices of peaks in `x` that satisfy all given conditions. properties : dict A dictionary containing properties of the returned peaks which were calculated as intermediate results during evaluation of the specified conditions: * 'peak_heights' If `height` is given, the height of each peak in `x`. * 'left_thresholds', 'right_thresholds' If `threshold` is given, these keys contain a peaks vertical distance to its neighbouring samples. * 'prominences', 'right_bases', 'left_bases' If `prominence` is given, these keys are accessible. See `peak_prominences` for a description of their content. * 'width_heights', 'left_ips', 'right_ips' If `width` is given, these keys are accessible. See `peak_widths` for a description of their content. * 'plateau_sizes', left_edges', 'right_edges' If `plateau_size` is given, these keys are accessible and contain the indices of a peak's edges (edges are still part of the plateau) and the calculated plateau sizes. .. versionadded:: 1.2.0 To calculate and return properties without excluding peaks, provide the open interval ``(None, None)`` as a value to the appropriate argument (excluding `distance`). Warns ----- PeakPropertyWarning Raised if a peak's properties have unexpected values (see `peak_prominences` and `peak_widths`). Warnings -------- This function may return unexpected results for data containing NaNs. To avoid this, NaNs should either be removed or replaced. See Also -------- find_peaks_cwt Find peaks using the wavelet transformation. peak_prominences Directly calculate the prominence of peaks. peak_widths Directly calculate the width of peaks. Notes ----- In the context of this function, a peak or local maximum is defined as any sample whose two direct neighbours have a smaller amplitude. For flat peaks (more than one sample of equal amplitude wide) the index of the middle sample is returned (rounded down in case the number of samples is even). For noisy signals the peak locations can be off because the noise might change the position of local maxima. In those cases consider smoothing the signal before searching for peaks or use other peak finding and fitting methods (like `find_peaks_cwt`). Some additional comments on specifying conditions: * Almost all conditions (excluding `distance`) can be given as half-open or closed intervals, e.g., ``1`` or ``(1, None)`` defines the half-open interval :math:`[1, \\infty]` while ``(None, 1)`` defines the interval :math:`[-\\infty, 1]`. The open interval ``(None, None)`` can be specified as well, which returns the matching properties without exclusion of peaks. * The border is always included in the interval used to select valid peaks. * For several conditions the interval borders can be specified with arrays matching `x` in shape which enables dynamic constrains based on the sample position. * The conditions are evaluated in the following order: `plateau_size`, `height`, `threshold`, `distance`, `prominence`, `width`. In most cases this order is the fastest one because faster operations are applied first to reduce the number of peaks that need to be evaluated later. * While indices in `peaks` are guaranteed to be at least `distance` samples apart, edges of flat peaks may be closer than the allowed `distance`. * Use `wlen` to reduce the time it takes to evaluate the conditions for `prominence` or `width` if `x` is large or has many local maxima (see `peak_prominences`). .. versionadded:: 1.1.0 Examples -------- To demonstrate this function's usage we use a signal `x` supplied with SciPy (see `scipy.datasets.electrocardiogram`). Let's find all peaks (local maxima) in `x` whose amplitude lies above 0. >>> import numpy as np >>> import matplotlib.pyplot as plt >>> from scipy.datasets import electrocardiogram >>> from scipy.signal import find_peaks >>> x = electrocardiogram()[2000:4000] >>> peaks, _ = find_peaks(x, height=0) >>> plt.plot(x) >>> plt.plot(peaks, x[peaks], "x") >>> plt.plot(np.zeros_like(x), "--", color="gray") >>> plt.show() We can select peaks below 0 with ``height=(None, 0)`` or use arrays matching `x` in size to reflect a changing condition for different parts of the signal. >>> border = np.sin(np.linspace(0, 3 * np.pi, x.size)) >>> peaks, _ = find_peaks(x, height=(-border, border)) >>> plt.plot(x) >>> plt.plot(-border, "--", color="gray") >>> plt.plot(border, ":", color="gray") >>> plt.plot(peaks, x[peaks], "x") >>> plt.show() Another useful condition for periodic signals can be given with the `distance` argument. In this case, we can easily select the positions of QRS complexes within the electrocardiogram (ECG) by demanding a distance of at least 150 samples. >>> peaks, _ = find_peaks(x, distance=150) >>> np.diff(peaks) array([186, 180, 177, 171, 177, 169, 167, 164, 158, 162, 172]) >>> plt.plot(x) >>> plt.plot(peaks, x[peaks], "x") >>> plt.show() Especially for noisy signals peaks can be easily grouped by their prominence (see `peak_prominences`). E.g., we can select all peaks except for the mentioned QRS complexes by limiting the allowed prominence to 0.6. >>> peaks, properties = find_peaks(x, prominence=(None, 0.6)) >>> properties["prominences"].max() 0.5049999999999999 >>> plt.plot(x) >>> plt.plot(peaks, x[peaks], "x") >>> plt.show() And, finally, let's examine a different section of the ECG which contains beat forms of different shape. To select only the atypical heart beats, we combine two conditions: a minimal prominence of 1 and width of at least 20 samples. >>> x = electrocardiogram()[17000:18000] >>> peaks, properties = find_peaks(x, prominence=1, width=20) >>> properties["prominences"], properties["widths"] (array([1.495, 2.3 ]), array([36.93773946, 39.32723577])) >>> plt.plot(x) >>> plt.plot(peaks, x[peaks], "x") >>> plt.vlines(x=peaks, ymin=x[peaks] - properties["prominences"], ... ymax = x[peaks], color = "C1") >>> plt.hlines(y=properties["width_heights"], xmin=properties["left_ips"], ... xmax=properties["right_ips"], color = "C1") >>> plt.show() """ # _argmaxima1d expects array of dtype 'float64' x = _arg_x_as_expected(x) if distance is not None and distance < 1: raise ValueError('`distance` must be greater or equal to 1') peaks, left_edges, right_edges = _local_maxima_1d(x) properties = {} if plateau_size is not None: # Evaluate plateau size plateau_sizes = right_edges - left_edges + 1 pmin, pmax = _unpack_condition_args(plateau_size, x, peaks) keep = _select_by_property(plateau_sizes, pmin, pmax) peaks = peaks[keep] properties["plateau_sizes"] = plateau_sizes properties["left_edges"] = left_edges properties["right_edges"] = right_edges properties = {key: array[keep] for key, array in properties.items()} if height is not None: # Evaluate height condition peak_heights = x[peaks] hmin, hmax = _unpack_condition_args(height, x, peaks) keep = _select_by_property(peak_heights, hmin, hmax) peaks = peaks[keep] properties["peak_heights"] = peak_heights properties = {key: array[keep] for key, array in properties.items()} if threshold is not None: # Evaluate threshold condition tmin, tmax = _unpack_condition_args(threshold, x, peaks) keep, left_thresholds, right_thresholds = _select_by_peak_threshold( x, peaks, tmin, tmax) peaks = peaks[keep] properties["left_thresholds"] = left_thresholds properties["right_thresholds"] = right_thresholds properties = {key: array[keep] for key, array in properties.items()} if distance is not None: # Evaluate distance condition keep = _select_by_peak_distance(peaks, x[peaks], distance) peaks = peaks[keep] properties = {key: array[keep] for key, array in properties.items()} if prominence is not None or width is not None: # Calculate prominence (required for both conditions) wlen = _arg_wlen_as_expected(wlen) properties.update(zip( ['prominences', 'left_bases', 'right_bases'], _peak_prominences(x, peaks, wlen=wlen) )) if prominence is not None: # Evaluate prominence condition pmin, pmax = _unpack_condition_args(prominence, x, peaks) keep = _select_by_property(properties['prominences'], pmin, pmax) peaks = peaks[keep] properties = {key: array[keep] for key, array in properties.items()} if width is not None: # Calculate widths properties.update(zip( ['widths', 'width_heights', 'left_ips', 'right_ips'], _peak_widths(x, peaks, rel_height, properties['prominences'], properties['left_bases'], properties['right_bases']) )) # Evaluate width condition wmin, wmax = _unpack_condition_args(width, x, peaks) keep = _select_by_property(properties['widths'], wmin, wmax) peaks = peaks[keep] properties = {key: array[keep] for key, array in properties.items()} return peaks, properties def _identify_ridge_lines(matr, max_distances, gap_thresh): """ Identify ridges in the 2-D matrix. Expect that the width of the wavelet feature increases with increasing row number. Parameters ---------- matr : 2-D ndarray Matrix in which to identify ridge lines. max_distances : 1-D sequence At each row, a ridge line is only connected if the relative max at row[n] is within `max_distances`[n] from the relative max at row[n+1]. gap_thresh : int If a relative maximum is not found within `max_distances`, there will be a gap. A ridge line is discontinued if there are more than `gap_thresh` points without connecting a new relative maximum. Returns ------- ridge_lines : tuple Tuple of 2 1-D sequences. `ridge_lines`[ii][0] are the rows of the ii-th ridge-line, `ridge_lines`[ii][1] are the columns. Empty if none found. Each ridge-line will be sorted by row (increasing), but the order of the ridge lines is not specified. References ---------- .. [1] Bioinformatics (2006) 22 (17): 2059-2065. :doi:`10.1093/bioinformatics/btl355` Examples -------- >>> import numpy as np >>> rng = np.random.default_rng() >>> data = rng.random((5,5)) >>> max_dist = 3 >>> max_distances = np.full(20, max_dist) >>> ridge_lines = _identify_ridge_lines(data, max_distances, 1) Notes ----- This function is intended to be used in conjunction with `cwt` as part of `find_peaks_cwt`. """ if len(max_distances) < matr.shape[0]: raise ValueError('Max_distances must have at least as many rows ' 'as matr') all_max_cols = _boolrelextrema(matr, np.greater, axis=1, order=1) # Highest row for which there are any relative maxima has_relmax = np.nonzero(all_max_cols.any(axis=1))[0] if len(has_relmax) == 0: return [] start_row = has_relmax[-1] # Each ridge line is a 3-tuple: # rows, cols,Gap number ridge_lines = [[[start_row], [col], 0] for col in np.nonzero(all_max_cols[start_row])[0]] final_lines = [] rows = np.arange(start_row - 1, -1, -1) cols = np.arange(0, matr.shape[1]) for row in rows: this_max_cols = cols[all_max_cols[row]] # Increment gap number of each line, # set it to zero later if appropriate for line in ridge_lines: line[2] += 1 # XXX These should always be all_max_cols[row] # But the order might be different. Might be an efficiency gain # to make sure the order is the same and avoid this iteration prev_ridge_cols = np.array([line[1][-1] for line in ridge_lines]) # Look through every relative maximum found at current row # Attempt to connect them with existing ridge lines. for ind, col in enumerate(this_max_cols): # If there is a previous ridge line within # the max_distance to connect to, do so. # Otherwise start a new one. line = None if len(prev_ridge_cols) > 0: diffs = np.abs(col - prev_ridge_cols) closest = np.argmin(diffs) if diffs[closest] <= max_distances[row]: line = ridge_lines[closest] if line is not None: # Found a point close enough, extend current ridge line line[1].append(col) line[0].append(row) line[2] = 0 else: new_line = [[row], [col], 0] ridge_lines.append(new_line) # Remove the ridge lines with gap_number too high # XXX Modifying a list while iterating over it. # Should be safe, since we iterate backwards, but # still tacky. for ind in range(len(ridge_lines) - 1, -1, -1): line = ridge_lines[ind] if line[2] > gap_thresh: final_lines.append(line) del ridge_lines[ind] out_lines = [] for line in (final_lines + ridge_lines): sortargs = np.array(np.argsort(line[0])) rows, cols = np.zeros_like(sortargs), np.zeros_like(sortargs) rows[sortargs] = line[0] cols[sortargs] = line[1] out_lines.append([rows, cols]) return out_lines def _filter_ridge_lines(cwt, ridge_lines, window_size=None, min_length=None, min_snr=1, noise_perc=10): """ Filter ridge lines according to prescribed criteria. Intended to be used for finding relative maxima. Parameters ---------- cwt : 2-D ndarray Continuous wavelet transform from which the `ridge_lines` were defined. ridge_lines : 1-D sequence Each element should contain 2 sequences, the rows and columns of the ridge line (respectively). window_size : int, optional Size of window to use to calculate noise floor. Default is ``cwt.shape[1] / 20``. min_length : int, optional Minimum length a ridge line needs to be acceptable. Default is ``cwt.shape[0] / 4``, ie 1/4-th the number of widths. min_snr : float, optional Minimum SNR ratio. Default 1. The signal is the value of the cwt matrix at the shortest length scale (``cwt[0, loc]``), the noise is the `noise_perc`th percentile of datapoints contained within a window of `window_size` around ``cwt[0, loc]``. noise_perc : float, optional When calculating the noise floor, percentile of data points examined below which to consider noise. Calculated using scipy.stats.scoreatpercentile. References ---------- .. [1] Bioinformatics (2006) 22 (17): 2059-2065. :doi:`10.1093/bioinformatics/btl355` """ num_points = cwt.shape[1] if min_length is None: min_length = np.ceil(cwt.shape[0] / 4) if window_size is None: window_size = np.ceil(num_points / 20) window_size = int(window_size) hf_window, odd = divmod(window_size, 2) # Filter based on SNR row_one = cwt[0, :] noises = np.empty_like(row_one) for ind, val in enumerate(row_one): window_start = max(ind - hf_window, 0) window_end = min(ind + hf_window + odd, num_points) noises[ind] = scoreatpercentile(row_one[window_start:window_end], per=noise_perc) def filt_func(line): if len(line[0]) < min_length: return False snr = abs(cwt[line[0][0], line[1][0]] / noises[line[1][0]]) if snr < min_snr: return False return True return list(filter(filt_func, ridge_lines)) def find_peaks_cwt(vector, widths, wavelet=None, max_distances=None, gap_thresh=None, min_length=None, min_snr=1, noise_perc=10, window_size=None): """ Find peaks in a 1-D array with wavelet transformation. The general approach is to smooth `vector` by convolving it with `wavelet(width)` for each width in `widths`. Relative maxima which appear at enough length scales, and with sufficiently high SNR, are accepted. Parameters ---------- vector : ndarray 1-D array in which to find the peaks. widths : float or sequence Single width or 1-D array-like of widths to use for calculating the CWT matrix. In general, this range should cover the expected width of peaks of interest. wavelet : callable, optional Should take two parameters and return a 1-D array to convolve with `vector`. The first parameter determines the number of points of the returned wavelet array, the second parameter is the scale (`width`) of the wavelet. Should be normalized and symmetric. Default is the ricker wavelet. max_distances : ndarray, optional At each row, a ridge line is only connected if the relative max at row[n] is within ``max_distances[n]`` from the relative max at ``row[n+1]``. Default value is ``widths/4``. gap_thresh : float, optional If a relative maximum is not found within `max_distances`, there will be a gap. A ridge line is discontinued if there are more than `gap_thresh` points without connecting a new relative maximum. Default is the first value of the widths array i.e. widths[0]. min_length : int, optional Minimum length a ridge line needs to be acceptable. Default is ``cwt.shape[0] / 4``, ie 1/4-th the number of widths. min_snr : float, optional Minimum SNR ratio. Default 1. The signal is the maximum CWT coefficient on the largest ridge line. The noise is `noise_perc` th percentile of datapoints contained within the same ridge line. noise_perc : float, optional When calculating the noise floor, percentile of data points examined below which to consider noise. Calculated using `stats.scoreatpercentile`. Default is 10. window_size : int, optional Size of window to use to calculate noise floor. Default is ``cwt.shape[1] / 20``. Returns ------- peaks_indices : ndarray Indices of the locations in the `vector` where peaks were found. The list is sorted. See Also -------- cwt Continuous wavelet transform. find_peaks Find peaks inside a signal based on peak properties. Notes ----- This approach was designed for finding sharp peaks among noisy data, however with proper parameter selection it should function well for different peak shapes. The algorithm is as follows: 1. Perform a continuous wavelet transform on `vector`, for the supplied `widths`. This is a convolution of `vector` with `wavelet(width)` for each width in `widths`. See `cwt`. 2. Identify "ridge lines" in the cwt matrix. These are relative maxima at each row, connected across adjacent rows. See identify_ridge_lines 3. Filter the ridge_lines using filter_ridge_lines. .. versionadded:: 0.11.0 References ---------- .. [1] Bioinformatics (2006) 22 (17): 2059-2065. :doi:`10.1093/bioinformatics/btl355` Examples -------- >>> import numpy as np >>> from scipy import signal >>> xs = np.arange(0, np.pi, 0.05) >>> data = np.sin(xs) >>> peakind = signal.find_peaks_cwt(data, np.arange(1,10)) >>> peakind, xs[peakind], data[peakind] ([32], array([ 1.6]), array([ 0.9995736])) """ widths = np.array(widths, copy=False, ndmin=1) if gap_thresh is None: gap_thresh = np.ceil(widths[0]) if max_distances is None: max_distances = widths / 4.0 if wavelet is None: wavelet = ricker cwt_dat = cwt(vector, wavelet, widths) ridge_lines = _identify_ridge_lines(cwt_dat, max_distances, gap_thresh) filtered = _filter_ridge_lines(cwt_dat, ridge_lines, min_length=min_length, window_size=window_size, min_snr=min_snr, noise_perc=noise_perc) max_locs = np.asarray([x[1][0] for x in filtered]) max_locs.sort() return max_locs
48,807
36.20122
81
py
scipy
scipy-main/scipy/signal/bsplines.py
# This file is not meant for public use and will be removed in SciPy v2.0.0. # Use the `scipy.signal` namespace for importing the functions # included below. import warnings from . import _bsplines __all__ = [ # noqa: F822 'spline_filter', 'bspline', 'gauss_spline', 'cubic', 'quadratic', 'cspline1d', 'qspline1d', 'cspline1d_eval', 'qspline1d_eval', 'logical_and', 'zeros_like', 'piecewise', 'array', 'arctan2', 'tan', 'arange', 'floor', 'exp', 'greater', 'less', 'add', 'less_equal', 'greater_equal', 'cspline2d', 'sepfir2d', 'comb', 'float_factorial' ] def __dir__(): return __all__ def __getattr__(name): if name not in __all__: raise AttributeError( "scipy.signal.bsplines is deprecated and has no attribute " f"{name}. Try looking in scipy.signal instead.") warnings.warn(f"Please use `{name}` from the `scipy.signal` namespace, " "the `scipy.signal.bsplines` namespace is deprecated.", category=DeprecationWarning, stacklevel=2) return getattr(_bsplines, name)
1,085
31.909091
76
py
scipy
scipy-main/scipy/signal/spectral.py
# This file is not meant for public use and will be removed in SciPy v2.0.0. # Use the `scipy.signal` namespace for importing the functions # included below. import warnings from . import _spectral_py __all__ = [ # noqa: F822 'periodogram', 'welch', 'lombscargle', 'csd', 'coherence', 'spectrogram', 'stft', 'istft', 'check_COLA', 'check_NOLA', 'sp_fft', 'get_window', 'const_ext', 'even_ext', 'odd_ext', 'zero_ext' ] def __dir__(): return __all__ def __getattr__(name): if name not in __all__: raise AttributeError( "scipy.signal.spectral is deprecated and has no attribute " f"{name}. Try looking in scipy.signal instead.") warnings.warn(f"Please use `{name}` from the `scipy.signal` namespace, " "the `scipy.signal.spectral` namespace is deprecated.", category=DeprecationWarning, stacklevel=2) return getattr(_spectral_py, name)
944
27.636364
76
py
scipy
scipy-main/scipy/signal/_spectral.py
# Author: Pim Schellart # 2010 - 2011 """Tools for spectral analysis of unequally sampled signals.""" import numpy as np #pythran export _lombscargle(float64[], float64[], float64[]) def _lombscargle(x, y, freqs): """ _lombscargle(x, y, freqs) Computes the Lomb-Scargle periodogram. Parameters ---------- x : array_like Sample times. y : array_like Measurement values (must be registered so the mean is zero). freqs : array_like Angular frequencies for output periodogram. Returns ------- pgram : array_like Lomb-Scargle periodogram. Raises ------ ValueError If the input arrays `x` and `y` do not have the same shape. See also -------- lombscargle """ # Check input sizes if x.shape != y.shape: raise ValueError("Input arrays do not have the same size.") # Create empty array for output periodogram pgram = np.empty_like(freqs) c = np.empty_like(x) s = np.empty_like(x) for i in range(freqs.shape[0]): xc = 0. xs = 0. cc = 0. ss = 0. cs = 0. c[:] = np.cos(freqs[i] * x) s[:] = np.sin(freqs[i] * x) for j in range(x.shape[0]): xc += y[j] * c[j] xs += y[j] * s[j] cc += c[j] * c[j] ss += s[j] * s[j] cs += c[j] * s[j] if freqs[i] == 0: raise ZeroDivisionError() tau = np.arctan2(2 * cs, cc - ss) / (2 * freqs[i]) c_tau = np.cos(freqs[i] * tau) s_tau = np.sin(freqs[i] * tau) c_tau2 = c_tau * c_tau s_tau2 = s_tau * s_tau cs_tau = 2 * c_tau * s_tau pgram[i] = 0.5 * (((c_tau * xc + s_tau * xs)**2 / (c_tau2 * cc + cs_tau * cs + s_tau2 * ss)) + ((c_tau * xs - s_tau * xc)**2 / (c_tau2 * ss - cs_tau * cs + s_tau2 * cc))) return pgram
1,940
22.107143
68
py
scipy
scipy-main/scipy/signal/waveforms.py
# This file is not meant for public use and will be removed in SciPy v2.0.0. # Use the `scipy.signal` namespace for importing the functions # included below. import warnings from . import _waveforms __all__ = [ # noqa: F822 'sawtooth', 'square', 'gausspulse', 'chirp', 'sweep_poly', 'unit_impulse', 'place', 'nan', 'mod', 'extract', 'log', 'exp', 'polyval', 'polyint' ] def __dir__(): return __all__ def __getattr__(name): if name not in __all__: raise AttributeError( "scipy.signal.waveforms is deprecated and has no attribute " f"{name}. Try looking in scipy.signal instead.") warnings.warn(f"Please use `{name}` from the `scipy.signal` namespace, " "the `scipy.signal.waveforms` namespace is deprecated.", category=DeprecationWarning, stacklevel=2) return getattr(_waveforms, name)
890
28.7
76
py
scipy
scipy-main/scipy/signal/__init__.py
""" ======================================= Signal processing (:mod:`scipy.signal`) ======================================= Convolution =========== .. autosummary:: :toctree: generated/ convolve -- N-D convolution. correlate -- N-D correlation. fftconvolve -- N-D convolution using the FFT. oaconvolve -- N-D convolution using the overlap-add method. convolve2d -- 2-D convolution (more options). correlate2d -- 2-D correlation (more options). sepfir2d -- Convolve with a 2-D separable FIR filter. choose_conv_method -- Chooses faster of FFT and direct convolution methods. correlation_lags -- Determines lag indices for 1D cross-correlation. B-splines ========= .. autosummary:: :toctree: generated/ bspline -- B-spline basis function of order n. cubic -- B-spline basis function of order 3. quadratic -- B-spline basis function of order 2. gauss_spline -- Gaussian approximation to the B-spline basis function. cspline1d -- Coefficients for 1-D cubic (3rd order) B-spline. qspline1d -- Coefficients for 1-D quadratic (2nd order) B-spline. cspline2d -- Coefficients for 2-D cubic (3rd order) B-spline. qspline2d -- Coefficients for 2-D quadratic (2nd order) B-spline. cspline1d_eval -- Evaluate a cubic spline at the given points. qspline1d_eval -- Evaluate a quadratic spline at the given points. spline_filter -- Smoothing spline (cubic) filtering of a rank-2 array. Filtering ========= .. autosummary:: :toctree: generated/ order_filter -- N-D order filter. medfilt -- N-D median filter. medfilt2d -- 2-D median filter (faster). wiener -- N-D Wiener filter. symiirorder1 -- 2nd-order IIR filter (cascade of first-order systems). symiirorder2 -- 4th-order IIR filter (cascade of second-order systems). lfilter -- 1-D FIR and IIR digital linear filtering. lfiltic -- Construct initial conditions for `lfilter`. lfilter_zi -- Compute an initial state zi for the lfilter function that -- corresponds to the steady state of the step response. filtfilt -- A forward-backward filter. savgol_filter -- Filter a signal using the Savitzky-Golay filter. deconvolve -- 1-D deconvolution using lfilter. sosfilt -- 1-D IIR digital linear filtering using -- a second-order sections filter representation. sosfilt_zi -- Compute an initial state zi for the sosfilt function that -- corresponds to the steady state of the step response. sosfiltfilt -- A forward-backward filter for second-order sections. hilbert -- Compute 1-D analytic signal, using the Hilbert transform. hilbert2 -- Compute 2-D analytic signal, using the Hilbert transform. decimate -- Downsample a signal. detrend -- Remove linear and/or constant trends from data. resample -- Resample using Fourier method. resample_poly -- Resample using polyphase filtering method. upfirdn -- Upsample, apply FIR filter, downsample. Filter design ============= .. autosummary:: :toctree: generated/ bilinear -- Digital filter from an analog filter using -- the bilinear transform. bilinear_zpk -- Digital filter from an analog filter using -- the bilinear transform. findfreqs -- Find array of frequencies for computing filter response. firls -- FIR filter design using least-squares error minimization. firwin -- Windowed FIR filter design, with frequency response -- defined as pass and stop bands. firwin2 -- Windowed FIR filter design, with arbitrary frequency -- response. freqs -- Analog filter frequency response from TF coefficients. freqs_zpk -- Analog filter frequency response from ZPK coefficients. freqz -- Digital filter frequency response from TF coefficients. freqz_zpk -- Digital filter frequency response from ZPK coefficients. sosfreqz -- Digital filter frequency response for SOS format filter. gammatone -- FIR and IIR gammatone filter design. group_delay -- Digital filter group delay. iirdesign -- IIR filter design given bands and gains. iirfilter -- IIR filter design given order and critical frequencies. kaiser_atten -- Compute the attenuation of a Kaiser FIR filter, given -- the number of taps and the transition width at -- discontinuities in the frequency response. kaiser_beta -- Compute the Kaiser parameter beta, given the desired -- FIR filter attenuation. kaiserord -- Design a Kaiser window to limit ripple and width of -- transition region. minimum_phase -- Convert a linear phase FIR filter to minimum phase. savgol_coeffs -- Compute the FIR filter coefficients for a Savitzky-Golay -- filter. remez -- Optimal FIR filter design. unique_roots -- Unique roots and their multiplicities. residue -- Partial fraction expansion of b(s) / a(s). residuez -- Partial fraction expansion of b(z) / a(z). invres -- Inverse partial fraction expansion for analog filter. invresz -- Inverse partial fraction expansion for digital filter. BadCoefficients -- Warning on badly conditioned filter coefficients. Lower-level filter design functions: .. autosummary:: :toctree: generated/ abcd_normalize -- Check state-space matrices and ensure they are rank-2. band_stop_obj -- Band Stop Objective Function for order minimization. besselap -- Return (z,p,k) for analog prototype of Bessel filter. buttap -- Return (z,p,k) for analog prototype of Butterworth filter. cheb1ap -- Return (z,p,k) for type I Chebyshev filter. cheb2ap -- Return (z,p,k) for type II Chebyshev filter. cmplx_sort -- Sort roots based on magnitude. ellipap -- Return (z,p,k) for analog prototype of elliptic filter. lp2bp -- Transform a lowpass filter prototype to a bandpass filter. lp2bp_zpk -- Transform a lowpass filter prototype to a bandpass filter. lp2bs -- Transform a lowpass filter prototype to a bandstop filter. lp2bs_zpk -- Transform a lowpass filter prototype to a bandstop filter. lp2hp -- Transform a lowpass filter prototype to a highpass filter. lp2hp_zpk -- Transform a lowpass filter prototype to a highpass filter. lp2lp -- Transform a lowpass filter prototype to a lowpass filter. lp2lp_zpk -- Transform a lowpass filter prototype to a lowpass filter. normalize -- Normalize polynomial representation of a transfer function. Matlab-style IIR filter design ============================== .. autosummary:: :toctree: generated/ butter -- Butterworth buttord cheby1 -- Chebyshev Type I cheb1ord cheby2 -- Chebyshev Type II cheb2ord ellip -- Elliptic (Cauer) ellipord bessel -- Bessel (no order selection available -- try butterod) iirnotch -- Design second-order IIR notch digital filter. iirpeak -- Design second-order IIR peak (resonant) digital filter. iircomb -- Design IIR comb filter. Continuous-time linear systems ============================== .. autosummary:: :toctree: generated/ lti -- Continuous-time linear time invariant system base class. StateSpace -- Linear time invariant system in state space form. TransferFunction -- Linear time invariant system in transfer function form. ZerosPolesGain -- Linear time invariant system in zeros, poles, gain form. lsim -- Continuous-time simulation of output to linear system. lsim2 -- Like lsim, but `scipy.integrate.odeint` is used. impulse -- Impulse response of linear, time-invariant (LTI) system. impulse2 -- Like impulse, but `scipy.integrate.odeint` is used. step -- Step response of continuous-time LTI system. step2 -- Like step, but `scipy.integrate.odeint` is used. freqresp -- Frequency response of a continuous-time LTI system. bode -- Bode magnitude and phase data (continuous-time LTI). Discrete-time linear systems ============================ .. autosummary:: :toctree: generated/ dlti -- Discrete-time linear time invariant system base class. StateSpace -- Linear time invariant system in state space form. TransferFunction -- Linear time invariant system in transfer function form. ZerosPolesGain -- Linear time invariant system in zeros, poles, gain form. dlsim -- Simulation of output to a discrete-time linear system. dimpulse -- Impulse response of a discrete-time LTI system. dstep -- Step response of a discrete-time LTI system. dfreqresp -- Frequency response of a discrete-time LTI system. dbode -- Bode magnitude and phase data (discrete-time LTI). LTI representations =================== .. autosummary:: :toctree: generated/ tf2zpk -- Transfer function to zero-pole-gain. tf2sos -- Transfer function to second-order sections. tf2ss -- Transfer function to state-space. zpk2tf -- Zero-pole-gain to transfer function. zpk2sos -- Zero-pole-gain to second-order sections. zpk2ss -- Zero-pole-gain to state-space. ss2tf -- State-pace to transfer function. ss2zpk -- State-space to pole-zero-gain. sos2zpk -- Second-order sections to zero-pole-gain. sos2tf -- Second-order sections to transfer function. cont2discrete -- Continuous-time to discrete-time LTI conversion. place_poles -- Pole placement. Waveforms ========= .. autosummary:: :toctree: generated/ chirp -- Frequency swept cosine signal, with several freq functions. gausspulse -- Gaussian modulated sinusoid. max_len_seq -- Maximum length sequence. sawtooth -- Periodic sawtooth. square -- Square wave. sweep_poly -- Frequency swept cosine signal; freq is arbitrary polynomial. unit_impulse -- Discrete unit impulse. Window functions ================ For window functions, see the `scipy.signal.windows` namespace. In the `scipy.signal` namespace, there is a convenience function to obtain these windows by name: .. autosummary:: :toctree: generated/ get_window -- Return a window of a given length and type. Wavelets ======== .. autosummary:: :toctree: generated/ cascade -- Compute scaling function and wavelet from coefficients. daub -- Return low-pass. morlet -- Complex Morlet wavelet. qmf -- Return quadrature mirror filter from low-pass. ricker -- Return ricker wavelet. morlet2 -- Return Morlet wavelet, compatible with cwt. cwt -- Perform continuous wavelet transform. Peak finding ============ .. autosummary:: :toctree: generated/ argrelmin -- Calculate the relative minima of data. argrelmax -- Calculate the relative maxima of data. argrelextrema -- Calculate the relative extrema of data. find_peaks -- Find a subset of peaks inside a signal. find_peaks_cwt -- Find peaks in a 1-D array with wavelet transformation. peak_prominences -- Calculate the prominence of each peak in a signal. peak_widths -- Calculate the width of each peak in a signal. Spectral analysis ================= .. autosummary:: :toctree: generated/ periodogram -- Compute a (modified) periodogram. welch -- Compute a periodogram using Welch's method. csd -- Compute the cross spectral density, using Welch's method. coherence -- Compute the magnitude squared coherence, using Welch's method. spectrogram -- Compute the spectrogram. lombscargle -- Computes the Lomb-Scargle periodogram. vectorstrength -- Computes the vector strength. ShortTimeFFT -- Interface for calculating the \ :ref:`Short Time Fourier Transform <tutorial_stft>` and \ its inverse. stft -- Compute the Short Time Fourier Transform (legacy). istft -- Compute the Inverse Short Time Fourier Transform (legacy). check_COLA -- Check the COLA constraint for iSTFT reconstruction. check_NOLA -- Check the NOLA constraint for iSTFT reconstruction. Chirp Z-transform and Zoom FFT ============================================ .. autosummary:: :toctree: generated/ czt - Chirp z-transform convenience function zoom_fft - Zoom FFT convenience function CZT - Chirp z-transform function generator ZoomFFT - Zoom FFT function generator czt_points - Output the z-plane points sampled by a chirp z-transform The functions are simpler to use than the classes, but are less efficient when using the same transform on many arrays of the same length, since they repeatedly generate the same chirp signal with every call. In these cases, use the classes to create a reusable function instead. """ import warnings from . import _sigtools, windows from ._waveforms import * from ._max_len_seq import max_len_seq from ._upfirdn import upfirdn from ._spline import ( # noqa: F401 cspline2d, qspline2d, sepfir2d, symiirorder1, symiirorder2, ) from ._bsplines import * from ._filter_design import * from ._fir_filter_design import * from ._ltisys import * from ._lti_conversion import * from ._signaltools import * from ._savitzky_golay import savgol_coeffs, savgol_filter from ._spectral_py import * from ._short_time_fft import * from ._wavelets import * from ._peak_finding import * from ._czt import * from .windows import get_window # keep this one in signal namespace # Deprecated namespaces, to be removed in v2.0.0 from . import ( bsplines, filter_design, fir_filter_design, lti_conversion, ltisys, spectral, signaltools, waveforms, wavelets, spline ) # deal with * -> windows.* doc-only soft-deprecation deprecated_windows = ('boxcar', 'triang', 'parzen', 'bohman', 'blackman', 'nuttall', 'blackmanharris', 'flattop', 'bartlett', 'barthann', 'hamming', 'kaiser', 'gaussian', 'general_gaussian', 'chebwin', 'cosine', 'hann', 'exponential', 'tukey') def deco(name): f = getattr(windows, name) # Add deprecation to docstring def wrapped(*args, **kwargs): warnings.warn(f"Importing {name} from 'scipy.signal' is deprecated " "and will raise an error in SciPy 1.13.0. Please use " f"'scipy.signal.windows.{name}' or the convenience " "function 'scipy.signal.get_window' instead.", DeprecationWarning, stacklevel=2) return f(*args, **kwargs) wrapped.__name__ = name wrapped.__module__ = 'scipy.signal' if hasattr(f, '__qualname__'): wrapped.__qualname__ = f.__qualname__ if f.__doc__: lines = f.__doc__.splitlines() for li, line in enumerate(lines): if line.strip() == 'Parameters': break else: raise RuntimeError('dev error: badly formatted doc') spacing = ' ' * line.find('P') lines.insert(li, ('{0}.. warning:: scipy.signal.{1} is deprecated,\n' '{0} use scipy.signal.windows.{1} ' 'instead.\n'.format(spacing, name))) wrapped.__doc__ = '\n'.join(lines) return wrapped for name in deprecated_windows: locals()[name] = deco(name) del deprecated_windows, name, deco __all__ = [s for s in dir() if not s.startswith('_') and s != "warnings"] from scipy._lib._testutils import PytestTester test = PytestTester(__name__) del PytestTester
16,116
39.494975
83
py
scipy
scipy-main/scipy/signal/_upfirdn.py
# Code adapted from "upfirdn" python library with permission: # # Copyright (c) 2009, Motorola, Inc # # All Rights Reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # * Neither the name of Motorola nor the names of its contributors may be # used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import numpy as np from ._upfirdn_apply import _output_len, _apply, mode_enum __all__ = ['upfirdn', '_output_len'] _upfirdn_modes = [ 'constant', 'wrap', 'edge', 'smooth', 'symmetric', 'reflect', 'antisymmetric', 'antireflect', 'line', ] def _pad_h(h, up): """Store coefficients in a transposed, flipped arrangement. For example, suppose upRate is 3, and the input number of coefficients is 10, represented as h[0], ..., h[9]. Then the internal buffer will look like this:: h[9], h[6], h[3], h[0], // flipped phase 0 coefs 0, h[7], h[4], h[1], // flipped phase 1 coefs (zero-padded) 0, h[8], h[5], h[2], // flipped phase 2 coefs (zero-padded) """ h_padlen = len(h) + (-len(h) % up) h_full = np.zeros(h_padlen, h.dtype) h_full[:len(h)] = h h_full = h_full.reshape(-1, up).T[:, ::-1].ravel() return h_full def _check_mode(mode): mode = mode.lower() enum = mode_enum(mode) return enum class _UpFIRDn: """Helper for resampling.""" def __init__(self, h, x_dtype, up, down): h = np.asarray(h) if h.ndim != 1 or h.size == 0: raise ValueError('h must be 1-D with non-zero length') self._output_type = np.result_type(h.dtype, x_dtype, np.float32) h = np.asarray(h, self._output_type) self._up = int(up) self._down = int(down) if self._up < 1 or self._down < 1: raise ValueError('Both up and down must be >= 1') # This both transposes, and "flips" each phase for filtering self._h_trans_flip = _pad_h(h, self._up) self._h_trans_flip = np.ascontiguousarray(self._h_trans_flip) self._h_len_orig = len(h) def apply_filter(self, x, axis=-1, mode='constant', cval=0): """Apply the prepared filter to the specified axis of N-D signal x.""" output_len = _output_len(self._h_len_orig, x.shape[axis], self._up, self._down) # Explicit use of np.int64 for output_shape dtype avoids OverflowError # when allocating large array on platforms where np.int_ is 32 bits output_shape = np.asarray(x.shape, dtype=np.int64) output_shape[axis] = output_len out = np.zeros(output_shape, dtype=self._output_type, order='C') axis = axis % x.ndim mode = _check_mode(mode) _apply(np.asarray(x, self._output_type), self._h_trans_flip, out, self._up, self._down, axis, mode, cval) return out def upfirdn(h, x, up=1, down=1, axis=-1, mode='constant', cval=0): """Upsample, FIR filter, and downsample. Parameters ---------- h : array_like 1-D FIR (finite-impulse response) filter coefficients. x : array_like Input signal array. up : int, optional Upsampling rate. Default is 1. down : int, optional Downsampling rate. Default is 1. axis : int, optional The axis of the input data array along which to apply the linear filter. The filter is applied to each subarray along this axis. Default is -1. mode : str, optional The signal extension mode to use. The set ``{"constant", "symmetric", "reflect", "edge", "wrap"}`` correspond to modes provided by `numpy.pad`. ``"smooth"`` implements a smooth extension by extending based on the slope of the last 2 points at each end of the array. ``"antireflect"`` and ``"antisymmetric"`` are anti-symmetric versions of ``"reflect"`` and ``"symmetric"``. The mode `"line"` extends the signal based on a linear trend defined by the first and last points along the ``axis``. .. versionadded:: 1.4.0 cval : float, optional The constant value to use when ``mode == "constant"``. .. versionadded:: 1.4.0 Returns ------- y : ndarray The output signal array. Dimensions will be the same as `x` except for along `axis`, which will change size according to the `h`, `up`, and `down` parameters. Notes ----- The algorithm is an implementation of the block diagram shown on page 129 of the Vaidyanathan text [1]_ (Figure 4.3-8d). The direct approach of upsampling by factor of P with zero insertion, FIR filtering of length ``N``, and downsampling by factor of Q is O(N*Q) per output sample. The polyphase implementation used here is O(N/P). .. versionadded:: 0.18 References ---------- .. [1] P. P. Vaidyanathan, Multirate Systems and Filter Banks, Prentice Hall, 1993. Examples -------- Simple operations: >>> import numpy as np >>> from scipy.signal import upfirdn >>> upfirdn([1, 1, 1], [1, 1, 1]) # FIR filter array([ 1., 2., 3., 2., 1.]) >>> upfirdn([1], [1, 2, 3], 3) # upsampling with zeros insertion array([ 1., 0., 0., 2., 0., 0., 3.]) >>> upfirdn([1, 1, 1], [1, 2, 3], 3) # upsampling with sample-and-hold array([ 1., 1., 1., 2., 2., 2., 3., 3., 3.]) >>> upfirdn([.5, 1, .5], [1, 1, 1], 2) # linear interpolation array([ 0.5, 1. , 1. , 1. , 1. , 1. , 0.5]) >>> upfirdn([1], np.arange(10), 1, 3) # decimation by 3 array([ 0., 3., 6., 9.]) >>> upfirdn([.5, 1, .5], np.arange(10), 2, 3) # linear interp, rate 2/3 array([ 0. , 1. , 2.5, 4. , 5.5, 7. , 8.5]) Apply a single filter to multiple signals: >>> x = np.reshape(np.arange(8), (4, 2)) >>> x array([[0, 1], [2, 3], [4, 5], [6, 7]]) Apply along the last dimension of ``x``: >>> h = [1, 1] >>> upfirdn(h, x, 2) array([[ 0., 0., 1., 1.], [ 2., 2., 3., 3.], [ 4., 4., 5., 5.], [ 6., 6., 7., 7.]]) Apply along the 0th dimension of ``x``: >>> upfirdn(h, x, 2, axis=0) array([[ 0., 1.], [ 0., 1.], [ 2., 3.], [ 2., 3.], [ 4., 5.], [ 4., 5.], [ 6., 7.], [ 6., 7.]]) """ x = np.asarray(x) ufd = _UpFIRDn(h, x.dtype, up, down) # This is equivalent to (but faster than) using np.apply_along_axis return ufd.apply_filter(x, axis, mode, cval)
7,884
35.336406
78
py
scipy
scipy-main/scipy/signal/spline.py
# This file is not meant for public use and will be removed in the future # versions of SciPy. Use the `scipy.signal` namespace for importing the # functions included below. import warnings from . import _spline __all__ = [ # noqa: F822 'cspline2d', 'qspline2d', 'sepfir2d', 'symiirorder1', 'symiirorder2'] def __dir__(): return __all__ def __getattr__(name): if name not in __all__: raise AttributeError( f"scipy.signal.spline is deprecated and has no attribute {name}. " "Try looking in scipy.signal instead.") warnings.warn(f"Please use `{name}` from the `scipy.signal` namespace, " "the `scipy.signal.spline` namespace is deprecated.", category=DeprecationWarning, stacklevel=2) return getattr(_spline, name)
810
29.037037
78
py
scipy
scipy-main/scipy/signal/_signaltools.py
# Author: Travis Oliphant # 1999 -- 2002 import operator import math from math import prod as _prod import timeit import warnings from scipy.spatial import cKDTree from . import _sigtools from ._ltisys import dlti from ._upfirdn import upfirdn, _output_len, _upfirdn_modes from scipy import linalg, fft as sp_fft from scipy import ndimage from scipy.fft._helper import _init_nd_shape_and_axes import numpy as np from scipy.special import lambertw from .windows import get_window from ._arraytools import axis_slice, axis_reverse, odd_ext, even_ext, const_ext from ._filter_design import cheby1, _validate_sos, zpk2sos from ._fir_filter_design import firwin from ._sosfilt import _sosfilt __all__ = ['correlate', 'correlation_lags', 'correlate2d', 'convolve', 'convolve2d', 'fftconvolve', 'oaconvolve', 'order_filter', 'medfilt', 'medfilt2d', 'wiener', 'lfilter', 'lfiltic', 'sosfilt', 'deconvolve', 'hilbert', 'hilbert2', 'cmplx_sort', 'unique_roots', 'invres', 'invresz', 'residue', 'residuez', 'resample', 'resample_poly', 'detrend', 'lfilter_zi', 'sosfilt_zi', 'sosfiltfilt', 'choose_conv_method', 'filtfilt', 'decimate', 'vectorstrength'] _modedict = {'valid': 0, 'same': 1, 'full': 2} _boundarydict = {'fill': 0, 'pad': 0, 'wrap': 2, 'circular': 2, 'symm': 1, 'symmetric': 1, 'reflect': 4} def _valfrommode(mode): try: return _modedict[mode] except KeyError as e: raise ValueError("Acceptable mode flags are 'valid'," " 'same', or 'full'.") from e def _bvalfromboundary(boundary): try: return _boundarydict[boundary] << 2 except KeyError as e: raise ValueError("Acceptable boundary flags are 'fill', 'circular' " "(or 'wrap'), and 'symmetric' (or 'symm').") from e def _inputs_swap_needed(mode, shape1, shape2, axes=None): """Determine if inputs arrays need to be swapped in `"valid"` mode. If in `"valid"` mode, returns whether or not the input arrays need to be swapped depending on whether `shape1` is at least as large as `shape2` in every calculated dimension. This is important for some of the correlation and convolution implementations in this module, where the larger array input needs to come before the smaller array input when operating in this mode. Note that if the mode provided is not 'valid', False is immediately returned. """ if mode != 'valid': return False if not shape1: return False if axes is None: axes = range(len(shape1)) ok1 = all(shape1[i] >= shape2[i] for i in axes) ok2 = all(shape2[i] >= shape1[i] for i in axes) if not (ok1 or ok2): raise ValueError("For 'valid' mode, one must be at least " "as large as the other in every dimension") return not ok1 def correlate(in1, in2, mode='full', method='auto'): r""" Cross-correlate two N-dimensional arrays. Cross-correlate `in1` and `in2`, with the output size determined by the `mode` argument. Parameters ---------- in1 : array_like First input. in2 : array_like Second input. Should have the same number of dimensions as `in1`. mode : str {'full', 'valid', 'same'}, optional A string indicating the size of the output: ``full`` The output is the full discrete linear cross-correlation of the inputs. (Default) ``valid`` The output consists only of those elements that do not rely on the zero-padding. In 'valid' mode, either `in1` or `in2` must be at least as large as the other in every dimension. ``same`` The output is the same size as `in1`, centered with respect to the 'full' output. method : str {'auto', 'direct', 'fft'}, optional A string indicating which method to use to calculate the correlation. ``direct`` The correlation is determined directly from sums, the definition of correlation. ``fft`` The Fast Fourier Transform is used to perform the correlation more quickly (only available for numerical arrays.) ``auto`` Automatically chooses direct or Fourier method based on an estimate of which is faster (default). See `convolve` Notes for more detail. .. versionadded:: 0.19.0 Returns ------- correlate : array An N-dimensional array containing a subset of the discrete linear cross-correlation of `in1` with `in2`. See Also -------- choose_conv_method : contains more documentation on `method`. correlation_lags : calculates the lag / displacement indices array for 1D cross-correlation. Notes ----- The correlation z of two d-dimensional arrays x and y is defined as:: z[...,k,...] = sum[..., i_l, ...] x[..., i_l,...] * conj(y[..., i_l - k,...]) This way, if x and y are 1-D arrays and ``z = correlate(x, y, 'full')`` then .. math:: z[k] = (x * y)(k - N + 1) = \sum_{l=0}^{||x||-1}x_l y_{l-k+N-1}^{*} for :math:`k = 0, 1, ..., ||x|| + ||y|| - 2` where :math:`||x||` is the length of ``x``, :math:`N = \max(||x||,||y||)`, and :math:`y_m` is 0 when m is outside the range of y. ``method='fft'`` only works for numerical arrays as it relies on `fftconvolve`. In certain cases (i.e., arrays of objects or when rounding integers can lose precision), ``method='direct'`` is always used. When using "same" mode with even-length inputs, the outputs of `correlate` and `correlate2d` differ: There is a 1-index offset between them. Examples -------- Implement a matched filter using cross-correlation, to recover a signal that has passed through a noisy channel. >>> import numpy as np >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> rng = np.random.default_rng() >>> sig = np.repeat([0., 1., 1., 0., 1., 0., 0., 1.], 128) >>> sig_noise = sig + rng.standard_normal(len(sig)) >>> corr = signal.correlate(sig_noise, np.ones(128), mode='same') / 128 >>> clock = np.arange(64, len(sig), 128) >>> fig, (ax_orig, ax_noise, ax_corr) = plt.subplots(3, 1, sharex=True) >>> ax_orig.plot(sig) >>> ax_orig.plot(clock, sig[clock], 'ro') >>> ax_orig.set_title('Original signal') >>> ax_noise.plot(sig_noise) >>> ax_noise.set_title('Signal with noise') >>> ax_corr.plot(corr) >>> ax_corr.plot(clock, corr[clock], 'ro') >>> ax_corr.axhline(0.5, ls=':') >>> ax_corr.set_title('Cross-correlated with rectangular pulse') >>> ax_orig.margins(0, 0.1) >>> fig.tight_layout() >>> plt.show() Compute the cross-correlation of a noisy signal with the original signal. >>> x = np.arange(128) / 128 >>> sig = np.sin(2 * np.pi * x) >>> sig_noise = sig + rng.standard_normal(len(sig)) >>> corr = signal.correlate(sig_noise, sig) >>> lags = signal.correlation_lags(len(sig), len(sig_noise)) >>> corr /= np.max(corr) >>> fig, (ax_orig, ax_noise, ax_corr) = plt.subplots(3, 1, figsize=(4.8, 4.8)) >>> ax_orig.plot(sig) >>> ax_orig.set_title('Original signal') >>> ax_orig.set_xlabel('Sample Number') >>> ax_noise.plot(sig_noise) >>> ax_noise.set_title('Signal with noise') >>> ax_noise.set_xlabel('Sample Number') >>> ax_corr.plot(lags, corr) >>> ax_corr.set_title('Cross-correlated signal') >>> ax_corr.set_xlabel('Lag') >>> ax_orig.margins(0, 0.1) >>> ax_noise.margins(0, 0.1) >>> ax_corr.margins(0, 0.1) >>> fig.tight_layout() >>> plt.show() """ in1 = np.asarray(in1) in2 = np.asarray(in2) if in1.ndim == in2.ndim == 0: return in1 * in2.conj() elif in1.ndim != in2.ndim: raise ValueError("in1 and in2 should have the same dimensionality") # Don't use _valfrommode, since correlate should not accept numeric modes try: val = _modedict[mode] except KeyError as e: raise ValueError("Acceptable mode flags are 'valid'," " 'same', or 'full'.") from e # this either calls fftconvolve or this function with method=='direct' if method in ('fft', 'auto'): return convolve(in1, _reverse_and_conj(in2), mode, method) elif method == 'direct': # fastpath to faster numpy.correlate for 1d inputs when possible if _np_conv_ok(in1, in2, mode): return np.correlate(in1, in2, mode) # _correlateND is far slower when in2.size > in1.size, so swap them # and then undo the effect afterward if mode == 'full'. Also, it fails # with 'valid' mode if in2 is larger than in1, so swap those, too. # Don't swap inputs for 'same' mode, since shape of in1 matters. swapped_inputs = ((mode == 'full') and (in2.size > in1.size) or _inputs_swap_needed(mode, in1.shape, in2.shape)) if swapped_inputs: in1, in2 = in2, in1 if mode == 'valid': ps = [i - j + 1 for i, j in zip(in1.shape, in2.shape)] out = np.empty(ps, in1.dtype) z = _sigtools._correlateND(in1, in2, out, val) else: ps = [i + j - 1 for i, j in zip(in1.shape, in2.shape)] # zero pad input in1zpadded = np.zeros(ps, in1.dtype) sc = tuple(slice(0, i) for i in in1.shape) in1zpadded[sc] = in1.copy() if mode == 'full': out = np.empty(ps, in1.dtype) elif mode == 'same': out = np.empty(in1.shape, in1.dtype) z = _sigtools._correlateND(in1zpadded, in2, out, val) if swapped_inputs: # Reverse and conjugate to undo the effect of swapping inputs z = _reverse_and_conj(z) return z else: raise ValueError("Acceptable method flags are 'auto'," " 'direct', or 'fft'.") def correlation_lags(in1_len, in2_len, mode='full'): r""" Calculates the lag / displacement indices array for 1D cross-correlation. Parameters ---------- in1_len : int First input size. in2_len : int Second input size. mode : str {'full', 'valid', 'same'}, optional A string indicating the size of the output. See the documentation `correlate` for more information. Returns ------- lags : array Returns an array containing cross-correlation lag/displacement indices. Indices can be indexed with the np.argmax of the correlation to return the lag/displacement. See Also -------- correlate : Compute the N-dimensional cross-correlation. Notes ----- Cross-correlation for continuous functions :math:`f` and :math:`g` is defined as: .. math:: \left ( f\star g \right )\left ( \tau \right ) \triangleq \int_{t_0}^{t_0 +T} \overline{f\left ( t \right )}g\left ( t+\tau \right )dt Where :math:`\tau` is defined as the displacement, also known as the lag. Cross correlation for discrete functions :math:`f` and :math:`g` is defined as: .. math:: \left ( f\star g \right )\left [ n \right ] \triangleq \sum_{-\infty}^{\infty} \overline{f\left [ m \right ]}g\left [ m+n \right ] Where :math:`n` is the lag. Examples -------- Cross-correlation of a signal with its time-delayed self. >>> import numpy as np >>> from scipy import signal >>> rng = np.random.default_rng() >>> x = rng.standard_normal(1000) >>> y = np.concatenate([rng.standard_normal(100), x]) >>> correlation = signal.correlate(x, y, mode="full") >>> lags = signal.correlation_lags(x.size, y.size, mode="full") >>> lag = lags[np.argmax(correlation)] """ # calculate lag ranges in different modes of operation if mode == "full": # the output is the full discrete linear convolution # of the inputs. (Default) lags = np.arange(-in2_len + 1, in1_len) elif mode == "same": # the output is the same size as `in1`, centered # with respect to the 'full' output. # calculate the full output lags = np.arange(-in2_len + 1, in1_len) # determine the midpoint in the full output mid = lags.size // 2 # determine lag_bound to be used with respect # to the midpoint lag_bound = in1_len // 2 # calculate lag ranges for even and odd scenarios if in1_len % 2 == 0: lags = lags[(mid-lag_bound):(mid+lag_bound)] else: lags = lags[(mid-lag_bound):(mid+lag_bound)+1] elif mode == "valid": # the output consists only of those elements that do not # rely on the zero-padding. In 'valid' mode, either `in1` or `in2` # must be at least as large as the other in every dimension. # the lag_bound will be either negative or positive # this let's us infer how to present the lag range lag_bound = in1_len - in2_len if lag_bound >= 0: lags = np.arange(lag_bound + 1) else: lags = np.arange(lag_bound, 1) return lags def _centered(arr, newshape): # Return the center newshape portion of the array. newshape = np.asarray(newshape) currshape = np.array(arr.shape) startind = (currshape - newshape) // 2 endind = startind + newshape myslice = [slice(startind[k], endind[k]) for k in range(len(endind))] return arr[tuple(myslice)] def _init_freq_conv_axes(in1, in2, mode, axes, sorted_axes=False): """Handle the axes argument for frequency-domain convolution. Returns the inputs and axes in a standard form, eliminating redundant axes, swapping the inputs if necessary, and checking for various potential errors. Parameters ---------- in1 : array First input. in2 : array Second input. mode : str {'full', 'valid', 'same'}, optional A string indicating the size of the output. See the documentation `fftconvolve` for more information. axes : list of ints Axes over which to compute the FFTs. sorted_axes : bool, optional If `True`, sort the axes. Default is `False`, do not sort. Returns ------- in1 : array The first input, possible swapped with the second input. in2 : array The second input, possible swapped with the first input. axes : list of ints Axes over which to compute the FFTs. """ s1 = in1.shape s2 = in2.shape noaxes = axes is None _, axes = _init_nd_shape_and_axes(in1, shape=None, axes=axes) if not noaxes and not len(axes): raise ValueError("when provided, axes cannot be empty") # Axes of length 1 can rely on broadcasting rules for multipy, # no fft needed. axes = [a for a in axes if s1[a] != 1 and s2[a] != 1] if sorted_axes: axes.sort() if not all(s1[a] == s2[a] or s1[a] == 1 or s2[a] == 1 for a in range(in1.ndim) if a not in axes): raise ValueError("incompatible shapes for in1 and in2:" " {} and {}".format(s1, s2)) # Check that input sizes are compatible with 'valid' mode. if _inputs_swap_needed(mode, s1, s2, axes=axes): # Convolution is commutative; order doesn't have any effect on output. in1, in2 = in2, in1 return in1, in2, axes def _freq_domain_conv(in1, in2, axes, shape, calc_fast_len=False): """Convolve two arrays in the frequency domain. This function implements only base the FFT-related operations. Specifically, it converts the signals to the frequency domain, multiplies them, then converts them back to the time domain. Calculations of axes, shapes, convolution mode, etc. are implemented in higher level-functions, such as `fftconvolve` and `oaconvolve`. Those functions should be used instead of this one. Parameters ---------- in1 : array_like First input. in2 : array_like Second input. Should have the same number of dimensions as `in1`. axes : array_like of ints Axes over which to compute the FFTs. shape : array_like of ints The sizes of the FFTs. calc_fast_len : bool, optional If `True`, set each value of `shape` to the next fast FFT length. Default is `False`, use `axes` as-is. Returns ------- out : array An N-dimensional array containing the discrete linear convolution of `in1` with `in2`. """ if not len(axes): return in1 * in2 complex_result = (in1.dtype.kind == 'c' or in2.dtype.kind == 'c') if calc_fast_len: # Speed up FFT by padding to optimal size. fshape = [ sp_fft.next_fast_len(shape[a], not complex_result) for a in axes] else: fshape = shape if not complex_result: fft, ifft = sp_fft.rfftn, sp_fft.irfftn else: fft, ifft = sp_fft.fftn, sp_fft.ifftn sp1 = fft(in1, fshape, axes=axes) sp2 = fft(in2, fshape, axes=axes) ret = ifft(sp1 * sp2, fshape, axes=axes) if calc_fast_len: fslice = tuple([slice(sz) for sz in shape]) ret = ret[fslice] return ret def _apply_conv_mode(ret, s1, s2, mode, axes): """Calculate the convolution result shape based on the `mode` argument. Returns the result sliced to the correct size for the given mode. Parameters ---------- ret : array The result array, with the appropriate shape for the 'full' mode. s1 : list of int The shape of the first input. s2 : list of int The shape of the second input. mode : str {'full', 'valid', 'same'} A string indicating the size of the output. See the documentation `fftconvolve` for more information. axes : list of ints Axes over which to compute the convolution. Returns ------- ret : array A copy of `res`, sliced to the correct size for the given `mode`. """ if mode == "full": return ret.copy() elif mode == "same": return _centered(ret, s1).copy() elif mode == "valid": shape_valid = [ret.shape[a] if a not in axes else s1[a] - s2[a] + 1 for a in range(ret.ndim)] return _centered(ret, shape_valid).copy() else: raise ValueError("acceptable mode flags are 'valid'," " 'same', or 'full'") def fftconvolve(in1, in2, mode="full", axes=None): """Convolve two N-dimensional arrays using FFT. Convolve `in1` and `in2` using the fast Fourier transform method, with the output size determined by the `mode` argument. This is generally much faster than `convolve` for large arrays (n > ~500), but can be slower when only a few output values are needed, and can only output float arrays (int or object array inputs will be cast to float). As of v0.19, `convolve` automatically chooses this method or the direct method based on an estimation of which is faster. Parameters ---------- in1 : array_like First input. in2 : array_like Second input. Should have the same number of dimensions as `in1`. mode : str {'full', 'valid', 'same'}, optional A string indicating the size of the output: ``full`` The output is the full discrete linear convolution of the inputs. (Default) ``valid`` The output consists only of those elements that do not rely on the zero-padding. In 'valid' mode, either `in1` or `in2` must be at least as large as the other in every dimension. ``same`` The output is the same size as `in1`, centered with respect to the 'full' output. axes : int or array_like of ints or None, optional Axes over which to compute the convolution. The default is over all axes. Returns ------- out : array An N-dimensional array containing a subset of the discrete linear convolution of `in1` with `in2`. See Also -------- convolve : Uses the direct convolution or FFT convolution algorithm depending on which is faster. oaconvolve : Uses the overlap-add method to do convolution, which is generally faster when the input arrays are large and significantly different in size. Examples -------- Autocorrelation of white noise is an impulse. >>> import numpy as np >>> from scipy import signal >>> rng = np.random.default_rng() >>> sig = rng.standard_normal(1000) >>> autocorr = signal.fftconvolve(sig, sig[::-1], mode='full') >>> import matplotlib.pyplot as plt >>> fig, (ax_orig, ax_mag) = plt.subplots(2, 1) >>> ax_orig.plot(sig) >>> ax_orig.set_title('White noise') >>> ax_mag.plot(np.arange(-len(sig)+1,len(sig)), autocorr) >>> ax_mag.set_title('Autocorrelation') >>> fig.tight_layout() >>> fig.show() Gaussian blur implemented using FFT convolution. Notice the dark borders around the image, due to the zero-padding beyond its boundaries. The `convolve2d` function allows for other types of image boundaries, but is far slower. >>> from scipy import datasets >>> face = datasets.face(gray=True) >>> kernel = np.outer(signal.windows.gaussian(70, 8), ... signal.windows.gaussian(70, 8)) >>> blurred = signal.fftconvolve(face, kernel, mode='same') >>> fig, (ax_orig, ax_kernel, ax_blurred) = plt.subplots(3, 1, ... figsize=(6, 15)) >>> ax_orig.imshow(face, cmap='gray') >>> ax_orig.set_title('Original') >>> ax_orig.set_axis_off() >>> ax_kernel.imshow(kernel, cmap='gray') >>> ax_kernel.set_title('Gaussian kernel') >>> ax_kernel.set_axis_off() >>> ax_blurred.imshow(blurred, cmap='gray') >>> ax_blurred.set_title('Blurred') >>> ax_blurred.set_axis_off() >>> fig.show() """ in1 = np.asarray(in1) in2 = np.asarray(in2) if in1.ndim == in2.ndim == 0: # scalar inputs return in1 * in2 elif in1.ndim != in2.ndim: raise ValueError("in1 and in2 should have the same dimensionality") elif in1.size == 0 or in2.size == 0: # empty arrays return np.array([]) in1, in2, axes = _init_freq_conv_axes(in1, in2, mode, axes, sorted_axes=False) s1 = in1.shape s2 = in2.shape shape = [max((s1[i], s2[i])) if i not in axes else s1[i] + s2[i] - 1 for i in range(in1.ndim)] ret = _freq_domain_conv(in1, in2, axes, shape, calc_fast_len=True) return _apply_conv_mode(ret, s1, s2, mode, axes) def _calc_oa_lens(s1, s2): """Calculate the optimal FFT lengths for overlapp-add convolution. The calculation is done for a single dimension. Parameters ---------- s1 : int Size of the dimension for the first array. s2 : int Size of the dimension for the second array. Returns ------- block_size : int The size of the FFT blocks. overlap : int The amount of overlap between two blocks. in1_step : int The size of each step for the first array. in2_step : int The size of each step for the first array. """ # Set up the arguments for the conventional FFT approach. fallback = (s1+s2-1, None, s1, s2) # Use conventional FFT convolve if sizes are same. if s1 == s2 or s1 == 1 or s2 == 1: return fallback if s2 > s1: s1, s2 = s2, s1 swapped = True else: swapped = False # There cannot be a useful block size if s2 is more than half of s1. if s2 >= s1/2: return fallback # Derivation of optimal block length # For original formula see: # https://en.wikipedia.org/wiki/Overlap-add_method # # Formula: # K = overlap = s2-1 # N = block_size # C = complexity # e = exponential, exp(1) # # C = (N*(log2(N)+1))/(N-K) # C = (N*log2(2N))/(N-K) # C = N/(N-K) * log2(2N) # C1 = N/(N-K) # C2 = log2(2N) = ln(2N)/ln(2) # # dC1/dN = (1*(N-K)-N)/(N-K)^2 = -K/(N-K)^2 # dC2/dN = 2/(2*N*ln(2)) = 1/(N*ln(2)) # # dC/dN = dC1/dN*C2 + dC2/dN*C1 # dC/dN = -K*ln(2N)/(ln(2)*(N-K)^2) + N/(N*ln(2)*(N-K)) # dC/dN = -K*ln(2N)/(ln(2)*(N-K)^2) + 1/(ln(2)*(N-K)) # dC/dN = -K*ln(2N)/(ln(2)*(N-K)^2) + (N-K)/(ln(2)*(N-K)^2) # dC/dN = (-K*ln(2N) + (N-K)/(ln(2)*(N-K)^2) # dC/dN = (N - K*ln(2N) - K)/(ln(2)*(N-K)^2) # # Solve for minimum, where dC/dN = 0 # 0 = (N - K*ln(2N) - K)/(ln(2)*(N-K)^2) # 0 * ln(2)*(N-K)^2 = N - K*ln(2N) - K # 0 = N - K*ln(2N) - K # 0 = N - K*(ln(2N) + 1) # 0 = N - K*ln(2Ne) # N = K*ln(2Ne) # N/K = ln(2Ne) # # e^(N/K) = e^ln(2Ne) # e^(N/K) = 2Ne # 1/e^(N/K) = 1/(2*N*e) # e^(N/-K) = 1/(2*N*e) # e^(N/-K) = K/N*1/(2*K*e) # N/K*e^(N/-K) = 1/(2*e*K) # N/-K*e^(N/-K) = -1/(2*e*K) # # Using Lambert W function # https://en.wikipedia.org/wiki/Lambert_W_function # x = W(y) It is the solution to y = x*e^x # x = N/-K # y = -1/(2*e*K) # # N/-K = W(-1/(2*e*K)) # # N = -K*W(-1/(2*e*K)) overlap = s2-1 opt_size = -overlap*lambertw(-1/(2*math.e*overlap), k=-1).real block_size = sp_fft.next_fast_len(math.ceil(opt_size)) # Use conventional FFT convolve if there is only going to be one block. if block_size >= s1: return fallback if not swapped: in1_step = block_size-s2+1 in2_step = s2 else: in1_step = s2 in2_step = block_size-s2+1 return block_size, overlap, in1_step, in2_step def oaconvolve(in1, in2, mode="full", axes=None): """Convolve two N-dimensional arrays using the overlap-add method. Convolve `in1` and `in2` using the overlap-add method, with the output size determined by the `mode` argument. This is generally much faster than `convolve` for large arrays (n > ~500), and generally much faster than `fftconvolve` when one array is much larger than the other, but can be slower when only a few output values are needed or when the arrays are very similar in shape, and can only output float arrays (int or object array inputs will be cast to float). Parameters ---------- in1 : array_like First input. in2 : array_like Second input. Should have the same number of dimensions as `in1`. mode : str {'full', 'valid', 'same'}, optional A string indicating the size of the output: ``full`` The output is the full discrete linear convolution of the inputs. (Default) ``valid`` The output consists only of those elements that do not rely on the zero-padding. In 'valid' mode, either `in1` or `in2` must be at least as large as the other in every dimension. ``same`` The output is the same size as `in1`, centered with respect to the 'full' output. axes : int or array_like of ints or None, optional Axes over which to compute the convolution. The default is over all axes. Returns ------- out : array An N-dimensional array containing a subset of the discrete linear convolution of `in1` with `in2`. See Also -------- convolve : Uses the direct convolution or FFT convolution algorithm depending on which is faster. fftconvolve : An implementation of convolution using FFT. Notes ----- .. versionadded:: 1.4.0 References ---------- .. [1] Wikipedia, "Overlap-add_method". https://en.wikipedia.org/wiki/Overlap-add_method .. [2] Richard G. Lyons. Understanding Digital Signal Processing, Third Edition, 2011. Chapter 13.10. ISBN 13: 978-0137-02741-5 Examples -------- Convolve a 100,000 sample signal with a 512-sample filter. >>> import numpy as np >>> from scipy import signal >>> rng = np.random.default_rng() >>> sig = rng.standard_normal(100000) >>> filt = signal.firwin(512, 0.01) >>> fsig = signal.oaconvolve(sig, filt) >>> import matplotlib.pyplot as plt >>> fig, (ax_orig, ax_mag) = plt.subplots(2, 1) >>> ax_orig.plot(sig) >>> ax_orig.set_title('White noise') >>> ax_mag.plot(fsig) >>> ax_mag.set_title('Filtered noise') >>> fig.tight_layout() >>> fig.show() """ in1 = np.asarray(in1) in2 = np.asarray(in2) if in1.ndim == in2.ndim == 0: # scalar inputs return in1 * in2 elif in1.ndim != in2.ndim: raise ValueError("in1 and in2 should have the same dimensionality") elif in1.size == 0 or in2.size == 0: # empty arrays return np.array([]) elif in1.shape == in2.shape: # Equivalent to fftconvolve return fftconvolve(in1, in2, mode=mode, axes=axes) in1, in2, axes = _init_freq_conv_axes(in1, in2, mode, axes, sorted_axes=True) s1 = in1.shape s2 = in2.shape if not axes: ret = in1 * in2 return _apply_conv_mode(ret, s1, s2, mode, axes) # Calculate this now since in1 is changed later shape_final = [None if i not in axes else s1[i] + s2[i] - 1 for i in range(in1.ndim)] # Calculate the block sizes for the output, steps, first and second inputs. # It is simpler to calculate them all together than doing them in separate # loops due to all the special cases that need to be handled. optimal_sizes = ((-1, -1, s1[i], s2[i]) if i not in axes else _calc_oa_lens(s1[i], s2[i]) for i in range(in1.ndim)) block_size, overlaps, \ in1_step, in2_step = zip(*optimal_sizes) # Fall back to fftconvolve if there is only one block in every dimension. if in1_step == s1 and in2_step == s2: return fftconvolve(in1, in2, mode=mode, axes=axes) # Figure out the number of steps and padding. # This would get too complicated in a list comprehension. nsteps1 = [] nsteps2 = [] pad_size1 = [] pad_size2 = [] for i in range(in1.ndim): if i not in axes: pad_size1 += [(0, 0)] pad_size2 += [(0, 0)] continue if s1[i] > in1_step[i]: curnstep1 = math.ceil((s1[i]+1)/in1_step[i]) if (block_size[i] - overlaps[i])*curnstep1 < shape_final[i]: curnstep1 += 1 curpad1 = curnstep1*in1_step[i] - s1[i] else: curnstep1 = 1 curpad1 = 0 if s2[i] > in2_step[i]: curnstep2 = math.ceil((s2[i]+1)/in2_step[i]) if (block_size[i] - overlaps[i])*curnstep2 < shape_final[i]: curnstep2 += 1 curpad2 = curnstep2*in2_step[i] - s2[i] else: curnstep2 = 1 curpad2 = 0 nsteps1 += [curnstep1] nsteps2 += [curnstep2] pad_size1 += [(0, curpad1)] pad_size2 += [(0, curpad2)] # Pad the array to a size that can be reshaped to the desired shape # if necessary. if not all(curpad == (0, 0) for curpad in pad_size1): in1 = np.pad(in1, pad_size1, mode='constant', constant_values=0) if not all(curpad == (0, 0) for curpad in pad_size2): in2 = np.pad(in2, pad_size2, mode='constant', constant_values=0) # Reshape the overlap-add parts to input block sizes. split_axes = [iax+i for i, iax in enumerate(axes)] fft_axes = [iax+1 for iax in split_axes] # We need to put each new dimension before the corresponding dimension # being reshaped in order to get the data in the right layout at the end. reshape_size1 = list(in1_step) reshape_size2 = list(in2_step) for i, iax in enumerate(split_axes): reshape_size1.insert(iax, nsteps1[i]) reshape_size2.insert(iax, nsteps2[i]) in1 = in1.reshape(*reshape_size1) in2 = in2.reshape(*reshape_size2) # Do the convolution. fft_shape = [block_size[i] for i in axes] ret = _freq_domain_conv(in1, in2, fft_axes, fft_shape, calc_fast_len=False) # Do the overlap-add. for ax, ax_fft, ax_split in zip(axes, fft_axes, split_axes): overlap = overlaps[ax] if overlap is None: continue ret, overpart = np.split(ret, [-overlap], ax_fft) overpart = np.split(overpart, [-1], ax_split)[0] ret_overpart = np.split(ret, [overlap], ax_fft)[0] ret_overpart = np.split(ret_overpart, [1], ax_split)[1] ret_overpart += overpart # Reshape back to the correct dimensionality. shape_ret = [ret.shape[i] if i not in fft_axes else ret.shape[i]*ret.shape[i-1] for i in range(ret.ndim) if i not in split_axes] ret = ret.reshape(*shape_ret) # Slice to the correct size. slice_final = tuple([slice(islice) for islice in shape_final]) ret = ret[slice_final] return _apply_conv_mode(ret, s1, s2, mode, axes) def _numeric_arrays(arrays, kinds='buifc'): """ See if a list of arrays are all numeric. Parameters ---------- arrays : array or list of arrays arrays to check if numeric. kinds : string-like The dtypes of the arrays to be checked. If the dtype.kind of the ndarrays are not in this string the function returns False and otherwise returns True. """ if type(arrays) == np.ndarray: return arrays.dtype.kind in kinds for array_ in arrays: if array_.dtype.kind not in kinds: return False return True def _conv_ops(x_shape, h_shape, mode): """ Find the number of operations required for direct/fft methods of convolution. The direct operations were recorded by making a dummy class to record the number of operations by overriding ``__mul__`` and ``__add__``. The FFT operations rely on the (well-known) computational complexity of the FFT (and the implementation of ``_freq_domain_conv``). """ if mode == "full": out_shape = [n + k - 1 for n, k in zip(x_shape, h_shape)] elif mode == "valid": out_shape = [abs(n - k) + 1 for n, k in zip(x_shape, h_shape)] elif mode == "same": out_shape = x_shape else: raise ValueError("Acceptable mode flags are 'valid'," " 'same', or 'full', not mode={}".format(mode)) s1, s2 = x_shape, h_shape if len(x_shape) == 1: s1, s2 = s1[0], s2[0] if mode == "full": direct_ops = s1 * s2 elif mode == "valid": direct_ops = (s2 - s1 + 1) * s1 if s2 >= s1 else (s1 - s2 + 1) * s2 elif mode == "same": direct_ops = (s1 * s2 if s1 < s2 else s1 * s2 - (s2 // 2) * ((s2 + 1) // 2)) else: if mode == "full": direct_ops = min(_prod(s1), _prod(s2)) * _prod(out_shape) elif mode == "valid": direct_ops = min(_prod(s1), _prod(s2)) * _prod(out_shape) elif mode == "same": direct_ops = _prod(s1) * _prod(s2) full_out_shape = [n + k - 1 for n, k in zip(x_shape, h_shape)] N = _prod(full_out_shape) fft_ops = 3 * N * np.log(N) # 3 separate FFTs of size full_out_shape return fft_ops, direct_ops def _fftconv_faster(x, h, mode): """ See if using fftconvolve or convolve is faster. Parameters ---------- x : np.ndarray Signal h : np.ndarray Kernel mode : str Mode passed to convolve Returns ------- fft_faster : bool Notes ----- See docstring of `choose_conv_method` for details on tuning hardware. See pull request 11031 for more detail: https://github.com/scipy/scipy/pull/11031. """ fft_ops, direct_ops = _conv_ops(x.shape, h.shape, mode) offset = -1e-3 if x.ndim == 1 else -1e-4 constants = { "valid": (1.89095737e-9, 2.1364985e-10, offset), "full": (1.7649070e-9, 2.1414831e-10, offset), "same": (3.2646654e-9, 2.8478277e-10, offset) if h.size <= x.size else (3.21635404e-9, 1.1773253e-8, -1e-5), } if x.ndim == 1 else { "valid": (1.85927e-9, 2.11242e-8, offset), "full": (1.99817e-9, 1.66174e-8, offset), "same": (2.04735e-9, 1.55367e-8, offset), } O_fft, O_direct, O_offset = constants[mode] return O_fft * fft_ops < O_direct * direct_ops + O_offset def _reverse_and_conj(x): """ Reverse array `x` in all dimensions and perform the complex conjugate """ reverse = (slice(None, None, -1),) * x.ndim return x[reverse].conj() def _np_conv_ok(volume, kernel, mode): """ See if numpy supports convolution of `volume` and `kernel` (i.e. both are 1D ndarrays and of the appropriate shape). NumPy's 'same' mode uses the size of the larger input, while SciPy's uses the size of the first input. Invalid mode strings will return False and be caught by the calling func. """ if volume.ndim == kernel.ndim == 1: if mode in ('full', 'valid'): return True elif mode == 'same': return volume.size >= kernel.size else: return False def _timeit_fast(stmt="pass", setup="pass", repeat=3): """ Returns the time the statement/function took, in seconds. Faster, less precise version of IPython's timeit. `stmt` can be a statement written as a string or a callable. Will do only 1 loop (like IPython's timeit) with no repetitions (unlike IPython) for very slow functions. For fast functions, only does enough loops to take 5 ms, which seems to produce similar results (on Windows at least), and avoids doing an extraneous cycle that isn't measured. """ timer = timeit.Timer(stmt, setup) # determine number of calls per rep so total time for 1 rep >= 5 ms x = 0 for p in range(0, 10): number = 10**p x = timer.timeit(number) # seconds if x >= 5e-3 / 10: # 5 ms for final test, 1/10th that for this one break if x > 1: # second # If it's macroscopic, don't bother with repetitions best = x else: number *= 10 r = timer.repeat(repeat, number) best = min(r) sec = best / number return sec def choose_conv_method(in1, in2, mode='full', measure=False): """ Find the fastest convolution/correlation method. This primarily exists to be called during the ``method='auto'`` option in `convolve` and `correlate`. It can also be used to determine the value of ``method`` for many different convolutions of the same dtype/shape. In addition, it supports timing the convolution to adapt the value of ``method`` to a particular set of inputs and/or hardware. Parameters ---------- in1 : array_like The first argument passed into the convolution function. in2 : array_like The second argument passed into the convolution function. mode : str {'full', 'valid', 'same'}, optional A string indicating the size of the output: ``full`` The output is the full discrete linear convolution of the inputs. (Default) ``valid`` The output consists only of those elements that do not rely on the zero-padding. ``same`` The output is the same size as `in1`, centered with respect to the 'full' output. measure : bool, optional If True, run and time the convolution of `in1` and `in2` with both methods and return the fastest. If False (default), predict the fastest method using precomputed values. Returns ------- method : str A string indicating which convolution method is fastest, either 'direct' or 'fft' times : dict, optional A dictionary containing the times (in seconds) needed for each method. This value is only returned if ``measure=True``. See Also -------- convolve correlate Notes ----- Generally, this method is 99% accurate for 2D signals and 85% accurate for 1D signals for randomly chosen input sizes. For precision, use ``measure=True`` to find the fastest method by timing the convolution. This can be used to avoid the minimal overhead of finding the fastest ``method`` later, or to adapt the value of ``method`` to a particular set of inputs. Experiments were run on an Amazon EC2 r5a.2xlarge machine to test this function. These experiments measured the ratio between the time required when using ``method='auto'`` and the time required for the fastest method (i.e., ``ratio = time_auto / min(time_fft, time_direct)``). In these experiments, we found: * There is a 95% chance of this ratio being less than 1.5 for 1D signals and a 99% chance of being less than 2.5 for 2D signals. * The ratio was always less than 2.5/5 for 1D/2D signals respectively. * This function is most inaccurate for 1D convolutions that take between 1 and 10 milliseconds with ``method='direct'``. A good proxy for this (at least in our experiments) is ``1e6 <= in1.size * in2.size <= 1e7``. The 2D results almost certainly generalize to 3D/4D/etc because the implementation is the same (the 1D implementation is different). All the numbers above are specific to the EC2 machine. However, we did find that this function generalizes fairly decently across hardware. The speed tests were of similar quality (and even slightly better) than the same tests performed on the machine to tune this function's numbers (a mid-2014 15-inch MacBook Pro with 16GB RAM and a 2.5GHz Intel i7 processor). There are cases when `fftconvolve` supports the inputs but this function returns `direct` (e.g., to protect against floating point integer precision). .. versionadded:: 0.19 Examples -------- Estimate the fastest method for a given input: >>> import numpy as np >>> from scipy import signal >>> rng = np.random.default_rng() >>> img = rng.random((32, 32)) >>> filter = rng.random((8, 8)) >>> method = signal.choose_conv_method(img, filter, mode='same') >>> method 'fft' This can then be applied to other arrays of the same dtype and shape: >>> img2 = rng.random((32, 32)) >>> filter2 = rng.random((8, 8)) >>> corr2 = signal.correlate(img2, filter2, mode='same', method=method) >>> conv2 = signal.convolve(img2, filter2, mode='same', method=method) The output of this function (``method``) works with `correlate` and `convolve`. """ volume = np.asarray(in1) kernel = np.asarray(in2) if measure: times = {} for method in ['fft', 'direct']: times[method] = _timeit_fast(lambda: convolve(volume, kernel, mode=mode, method=method)) chosen_method = 'fft' if times['fft'] < times['direct'] else 'direct' return chosen_method, times # for integer input, # catch when more precision required than float provides (representing an # integer as float can lose precision in fftconvolve if larger than 2**52) if any([_numeric_arrays([x], kinds='ui') for x in [volume, kernel]]): max_value = int(np.abs(volume).max()) * int(np.abs(kernel).max()) max_value *= int(min(volume.size, kernel.size)) if max_value > 2**np.finfo('float').nmant - 1: return 'direct' if _numeric_arrays([volume, kernel], kinds='b'): return 'direct' if _numeric_arrays([volume, kernel]): if _fftconv_faster(volume, kernel, mode): return 'fft' return 'direct' def convolve(in1, in2, mode='full', method='auto'): """ Convolve two N-dimensional arrays. Convolve `in1` and `in2`, with the output size determined by the `mode` argument. Parameters ---------- in1 : array_like First input. in2 : array_like Second input. Should have the same number of dimensions as `in1`. mode : str {'full', 'valid', 'same'}, optional A string indicating the size of the output: ``full`` The output is the full discrete linear convolution of the inputs. (Default) ``valid`` The output consists only of those elements that do not rely on the zero-padding. In 'valid' mode, either `in1` or `in2` must be at least as large as the other in every dimension. ``same`` The output is the same size as `in1`, centered with respect to the 'full' output. method : str {'auto', 'direct', 'fft'}, optional A string indicating which method to use to calculate the convolution. ``direct`` The convolution is determined directly from sums, the definition of convolution. ``fft`` The Fourier Transform is used to perform the convolution by calling `fftconvolve`. ``auto`` Automatically chooses direct or Fourier method based on an estimate of which is faster (default). See Notes for more detail. .. versionadded:: 0.19.0 Returns ------- convolve : array An N-dimensional array containing a subset of the discrete linear convolution of `in1` with `in2`. Warns ----- RuntimeWarning Use of the FFT convolution on input containing NAN or INF will lead to the entire output being NAN or INF. Use method='direct' when your input contains NAN or INF values. See Also -------- numpy.polymul : performs polynomial multiplication (same operation, but also accepts poly1d objects) choose_conv_method : chooses the fastest appropriate convolution method fftconvolve : Always uses the FFT method. oaconvolve : Uses the overlap-add method to do convolution, which is generally faster when the input arrays are large and significantly different in size. Notes ----- By default, `convolve` and `correlate` use ``method='auto'``, which calls `choose_conv_method` to choose the fastest method using pre-computed values (`choose_conv_method` can also measure real-world timing with a keyword argument). Because `fftconvolve` relies on floating point numbers, there are certain constraints that may force `method=direct` (more detail in `choose_conv_method` docstring). Examples -------- Smooth a square pulse using a Hann window: >>> import numpy as np >>> from scipy import signal >>> sig = np.repeat([0., 1., 0.], 100) >>> win = signal.windows.hann(50) >>> filtered = signal.convolve(sig, win, mode='same') / sum(win) >>> import matplotlib.pyplot as plt >>> fig, (ax_orig, ax_win, ax_filt) = plt.subplots(3, 1, sharex=True) >>> ax_orig.plot(sig) >>> ax_orig.set_title('Original pulse') >>> ax_orig.margins(0, 0.1) >>> ax_win.plot(win) >>> ax_win.set_title('Filter impulse response') >>> ax_win.margins(0, 0.1) >>> ax_filt.plot(filtered) >>> ax_filt.set_title('Filtered signal') >>> ax_filt.margins(0, 0.1) >>> fig.tight_layout() >>> fig.show() """ volume = np.asarray(in1) kernel = np.asarray(in2) if volume.ndim == kernel.ndim == 0: return volume * kernel elif volume.ndim != kernel.ndim: raise ValueError("volume and kernel should have the same " "dimensionality") if _inputs_swap_needed(mode, volume.shape, kernel.shape): # Convolution is commutative; order doesn't have any effect on output volume, kernel = kernel, volume if method == 'auto': method = choose_conv_method(volume, kernel, mode=mode) if method == 'fft': out = fftconvolve(volume, kernel, mode=mode) result_type = np.result_type(volume, kernel) if result_type.kind in {'u', 'i'}: out = np.around(out) if np.isnan(out.flat[0]) or np.isinf(out.flat[0]): warnings.warn("Use of fft convolution on input with NAN or inf" " results in NAN or inf output. Consider using" " method='direct' instead.", category=RuntimeWarning, stacklevel=2) return out.astype(result_type) elif method == 'direct': # fastpath to faster numpy.convolve for 1d inputs when possible if _np_conv_ok(volume, kernel, mode): return np.convolve(volume, kernel, mode) return correlate(volume, _reverse_and_conj(kernel), mode, 'direct') else: raise ValueError("Acceptable method flags are 'auto'," " 'direct', or 'fft'.") def order_filter(a, domain, rank): """ Perform an order filter on an N-D array. Perform an order filter on the array in. The domain argument acts as a mask centered over each pixel. The non-zero elements of domain are used to select elements surrounding each input pixel which are placed in a list. The list is sorted, and the output for that pixel is the element corresponding to rank in the sorted list. Parameters ---------- a : ndarray The N-dimensional input array. domain : array_like A mask array with the same number of dimensions as `a`. Each dimension should have an odd number of elements. rank : int A non-negative integer which selects the element from the sorted list (0 corresponds to the smallest element, 1 is the next smallest element, etc.). Returns ------- out : ndarray The results of the order filter in an array with the same shape as `a`. Examples -------- >>> import numpy as np >>> from scipy import signal >>> x = np.arange(25).reshape(5, 5) >>> domain = np.identity(3) >>> x array([[ 0, 1, 2, 3, 4], [ 5, 6, 7, 8, 9], [10, 11, 12, 13, 14], [15, 16, 17, 18, 19], [20, 21, 22, 23, 24]]) >>> signal.order_filter(x, domain, 0) array([[ 0., 0., 0., 0., 0.], [ 0., 0., 1., 2., 0.], [ 0., 5., 6., 7., 0.], [ 0., 10., 11., 12., 0.], [ 0., 0., 0., 0., 0.]]) >>> signal.order_filter(x, domain, 2) array([[ 6., 7., 8., 9., 4.], [ 11., 12., 13., 14., 9.], [ 16., 17., 18., 19., 14.], [ 21., 22., 23., 24., 19.], [ 20., 21., 22., 23., 24.]]) """ domain = np.asarray(domain) for dimsize in domain.shape: if (dimsize % 2) != 1: raise ValueError("Each dimension of domain argument " "should have an odd number of elements.") a = np.asarray(a) if a.dtype in [object, 'float128']: mesg = (f"Using order_filter with arrays of dtype {a.dtype} is " f"deprecated in SciPy 1.11 and will be removed in SciPy 1.13") warnings.warn(mesg, DeprecationWarning, stacklevel=2) result = _sigtools._order_filterND(a, domain, rank) else: result = ndimage.rank_filter(a, rank, footprint=domain, mode='constant') return result def medfilt(volume, kernel_size=None): """ Perform a median filter on an N-dimensional array. Apply a median filter to the input array using a local window-size given by `kernel_size`. The array will automatically be zero-padded. Parameters ---------- volume : array_like An N-dimensional input array. kernel_size : array_like, optional A scalar or an N-length list giving the size of the median filter window in each dimension. Elements of `kernel_size` should be odd. If `kernel_size` is a scalar, then this scalar is used as the size in each dimension. Default size is 3 for each dimension. Returns ------- out : ndarray An array the same size as input containing the median filtered result. Warns ----- UserWarning If array size is smaller than kernel size along any dimension See Also -------- scipy.ndimage.median_filter scipy.signal.medfilt2d Notes ----- The more general function `scipy.ndimage.median_filter` has a more efficient implementation of a median filter and therefore runs much faster. For 2-dimensional images with ``uint8``, ``float32`` or ``float64`` dtypes, the specialised function `scipy.signal.medfilt2d` may be faster. """ volume = np.atleast_1d(volume) if kernel_size is None: kernel_size = [3] * volume.ndim kernel_size = np.asarray(kernel_size) if kernel_size.shape == (): kernel_size = np.repeat(kernel_size.item(), volume.ndim) for k in range(volume.ndim): if (kernel_size[k] % 2) != 1: raise ValueError("Each element of kernel_size should be odd.") if any(k > s for k, s in zip(kernel_size, volume.shape)): warnings.warn('kernel_size exceeds volume extent: the volume will be ' 'zero-padded.') domain = np.ones(kernel_size, dtype=volume.dtype) numels = np.prod(kernel_size, axis=0) order = numels // 2 if volume.dtype in [np.bool_, np.cfloat, np.cdouble, np.clongdouble, np.float16]: raise ValueError(f"dtype={volume.dtype} is not supported by medfilt") if volume.dtype.char in ['O', 'g']: mesg = (f"Using medfilt with arrays of dtype {volume.dtype} is " f"deprecated in SciPy 1.11 and will be removed in SciPy 1.13") warnings.warn(mesg, DeprecationWarning, stacklevel=2) result = _sigtools._order_filterND(volume, domain, order) else: size = math.prod(kernel_size) result = ndimage.rank_filter(volume, size // 2, size=kernel_size, mode='constant') return result def wiener(im, mysize=None, noise=None): """ Perform a Wiener filter on an N-dimensional array. Apply a Wiener filter to the N-dimensional array `im`. Parameters ---------- im : ndarray An N-dimensional array. mysize : int or array_like, optional A scalar or an N-length list giving the size of the Wiener filter window in each dimension. Elements of mysize should be odd. If mysize is a scalar, then this scalar is used as the size in each dimension. noise : float, optional The noise-power to use. If None, then noise is estimated as the average of the local variance of the input. Returns ------- out : ndarray Wiener filtered result with the same shape as `im`. Notes ----- This implementation is similar to wiener2 in Matlab/Octave. For more details see [1]_ References ---------- .. [1] Lim, Jae S., Two-Dimensional Signal and Image Processing, Englewood Cliffs, NJ, Prentice Hall, 1990, p. 548. Examples -------- >>> from scipy.datasets import face >>> from scipy.signal import wiener >>> import matplotlib.pyplot as plt >>> import numpy as np >>> rng = np.random.default_rng() >>> img = rng.random((40, 40)) #Create a random image >>> filtered_img = wiener(img, (5, 5)) #Filter the image >>> f, (plot1, plot2) = plt.subplots(1, 2) >>> plot1.imshow(img) >>> plot2.imshow(filtered_img) >>> plt.show() """ im = np.asarray(im) if mysize is None: mysize = [3] * im.ndim mysize = np.asarray(mysize) if mysize.shape == (): mysize = np.repeat(mysize.item(), im.ndim) # Estimate the local mean lMean = correlate(im, np.ones(mysize), 'same') / np.prod(mysize, axis=0) # Estimate the local variance lVar = (correlate(im ** 2, np.ones(mysize), 'same') / np.prod(mysize, axis=0) - lMean ** 2) # Estimate the noise power if needed. if noise is None: noise = np.mean(np.ravel(lVar), axis=0) res = (im - lMean) res *= (1 - noise / lVar) res += lMean out = np.where(lVar < noise, lMean, res) return out def convolve2d(in1, in2, mode='full', boundary='fill', fillvalue=0): """ Convolve two 2-dimensional arrays. Convolve `in1` and `in2` with output size determined by `mode`, and boundary conditions determined by `boundary` and `fillvalue`. Parameters ---------- in1 : array_like First input. in2 : array_like Second input. Should have the same number of dimensions as `in1`. mode : str {'full', 'valid', 'same'}, optional A string indicating the size of the output: ``full`` The output is the full discrete linear convolution of the inputs. (Default) ``valid`` The output consists only of those elements that do not rely on the zero-padding. In 'valid' mode, either `in1` or `in2` must be at least as large as the other in every dimension. ``same`` The output is the same size as `in1`, centered with respect to the 'full' output. boundary : str {'fill', 'wrap', 'symm'}, optional A flag indicating how to handle boundaries: ``fill`` pad input arrays with fillvalue. (default) ``wrap`` circular boundary conditions. ``symm`` symmetrical boundary conditions. fillvalue : scalar, optional Value to fill pad input arrays with. Default is 0. Returns ------- out : ndarray A 2-dimensional array containing a subset of the discrete linear convolution of `in1` with `in2`. Examples -------- Compute the gradient of an image by 2D convolution with a complex Scharr operator. (Horizontal operator is real, vertical is imaginary.) Use symmetric boundary condition to avoid creating edges at the image boundaries. >>> import numpy as np >>> from scipy import signal >>> from scipy import datasets >>> ascent = datasets.ascent() >>> scharr = np.array([[ -3-3j, 0-10j, +3 -3j], ... [-10+0j, 0+ 0j, +10 +0j], ... [ -3+3j, 0+10j, +3 +3j]]) # Gx + j*Gy >>> grad = signal.convolve2d(ascent, scharr, boundary='symm', mode='same') >>> import matplotlib.pyplot as plt >>> fig, (ax_orig, ax_mag, ax_ang) = plt.subplots(3, 1, figsize=(6, 15)) >>> ax_orig.imshow(ascent, cmap='gray') >>> ax_orig.set_title('Original') >>> ax_orig.set_axis_off() >>> ax_mag.imshow(np.absolute(grad), cmap='gray') >>> ax_mag.set_title('Gradient magnitude') >>> ax_mag.set_axis_off() >>> ax_ang.imshow(np.angle(grad), cmap='hsv') # hsv is cyclic, like angles >>> ax_ang.set_title('Gradient orientation') >>> ax_ang.set_axis_off() >>> fig.show() """ in1 = np.asarray(in1) in2 = np.asarray(in2) if not in1.ndim == in2.ndim == 2: raise ValueError('convolve2d inputs must both be 2-D arrays') if _inputs_swap_needed(mode, in1.shape, in2.shape): in1, in2 = in2, in1 val = _valfrommode(mode) bval = _bvalfromboundary(boundary) out = _sigtools._convolve2d(in1, in2, 1, val, bval, fillvalue) return out def correlate2d(in1, in2, mode='full', boundary='fill', fillvalue=0): """ Cross-correlate two 2-dimensional arrays. Cross correlate `in1` and `in2` with output size determined by `mode`, and boundary conditions determined by `boundary` and `fillvalue`. Parameters ---------- in1 : array_like First input. in2 : array_like Second input. Should have the same number of dimensions as `in1`. mode : str {'full', 'valid', 'same'}, optional A string indicating the size of the output: ``full`` The output is the full discrete linear cross-correlation of the inputs. (Default) ``valid`` The output consists only of those elements that do not rely on the zero-padding. In 'valid' mode, either `in1` or `in2` must be at least as large as the other in every dimension. ``same`` The output is the same size as `in1`, centered with respect to the 'full' output. boundary : str {'fill', 'wrap', 'symm'}, optional A flag indicating how to handle boundaries: ``fill`` pad input arrays with fillvalue. (default) ``wrap`` circular boundary conditions. ``symm`` symmetrical boundary conditions. fillvalue : scalar, optional Value to fill pad input arrays with. Default is 0. Returns ------- correlate2d : ndarray A 2-dimensional array containing a subset of the discrete linear cross-correlation of `in1` with `in2`. Notes ----- When using "same" mode with even-length inputs, the outputs of `correlate` and `correlate2d` differ: There is a 1-index offset between them. Examples -------- Use 2D cross-correlation to find the location of a template in a noisy image: >>> import numpy as np >>> from scipy import signal >>> from scipy import datasets >>> rng = np.random.default_rng() >>> face = datasets.face(gray=True) - datasets.face(gray=True).mean() >>> template = np.copy(face[300:365, 670:750]) # right eye >>> template -= template.mean() >>> face = face + rng.standard_normal(face.shape) * 50 # add noise >>> corr = signal.correlate2d(face, template, boundary='symm', mode='same') >>> y, x = np.unravel_index(np.argmax(corr), corr.shape) # find the match >>> import matplotlib.pyplot as plt >>> fig, (ax_orig, ax_template, ax_corr) = plt.subplots(3, 1, ... figsize=(6, 15)) >>> ax_orig.imshow(face, cmap='gray') >>> ax_orig.set_title('Original') >>> ax_orig.set_axis_off() >>> ax_template.imshow(template, cmap='gray') >>> ax_template.set_title('Template') >>> ax_template.set_axis_off() >>> ax_corr.imshow(corr, cmap='gray') >>> ax_corr.set_title('Cross-correlation') >>> ax_corr.set_axis_off() >>> ax_orig.plot(x, y, 'ro') >>> fig.show() """ in1 = np.asarray(in1) in2 = np.asarray(in2) if not in1.ndim == in2.ndim == 2: raise ValueError('correlate2d inputs must both be 2-D arrays') swapped_inputs = _inputs_swap_needed(mode, in1.shape, in2.shape) if swapped_inputs: in1, in2 = in2, in1 val = _valfrommode(mode) bval = _bvalfromboundary(boundary) out = _sigtools._convolve2d(in1, in2.conj(), 0, val, bval, fillvalue) if swapped_inputs: out = out[::-1, ::-1] return out def medfilt2d(input, kernel_size=3): """ Median filter a 2-dimensional array. Apply a median filter to the `input` array using a local window-size given by `kernel_size` (must be odd). The array is zero-padded automatically. Parameters ---------- input : array_like A 2-dimensional input array. kernel_size : array_like, optional A scalar or a list of length 2, giving the size of the median filter window in each dimension. Elements of `kernel_size` should be odd. If `kernel_size` is a scalar, then this scalar is used as the size in each dimension. Default is a kernel of size (3, 3). Returns ------- out : ndarray An array the same size as input containing the median filtered result. See Also -------- scipy.ndimage.median_filter Notes ----- This is faster than `medfilt` when the input dtype is ``uint8``, ``float32``, or ``float64``; for other types, this falls back to `medfilt`. In some situations, `scipy.ndimage.median_filter` may be faster than this function. Examples -------- >>> import numpy as np >>> from scipy import signal >>> x = np.arange(25).reshape(5, 5) >>> x array([[ 0, 1, 2, 3, 4], [ 5, 6, 7, 8, 9], [10, 11, 12, 13, 14], [15, 16, 17, 18, 19], [20, 21, 22, 23, 24]]) # Replaces i,j with the median out of 5*5 window >>> signal.medfilt2d(x, kernel_size=5) array([[ 0, 0, 2, 0, 0], [ 0, 3, 7, 4, 0], [ 2, 8, 12, 9, 4], [ 0, 8, 12, 9, 0], [ 0, 0, 12, 0, 0]]) # Replaces i,j with the median out of default 3*3 window >>> signal.medfilt2d(x) array([[ 0, 1, 2, 3, 0], [ 1, 6, 7, 8, 4], [ 6, 11, 12, 13, 9], [11, 16, 17, 18, 14], [ 0, 16, 17, 18, 0]]) # Replaces i,j with the median out of default 5*3 window >>> signal.medfilt2d(x, kernel_size=[5,3]) array([[ 0, 1, 2, 3, 0], [ 0, 6, 7, 8, 3], [ 5, 11, 12, 13, 8], [ 5, 11, 12, 13, 8], [ 0, 11, 12, 13, 0]]) # Replaces i,j with the median out of default 3*5 window >>> signal.medfilt2d(x, kernel_size=[3,5]) array([[ 0, 0, 2, 1, 0], [ 1, 5, 7, 6, 3], [ 6, 10, 12, 11, 8], [11, 15, 17, 16, 13], [ 0, 15, 17, 16, 0]]) # As seen in the examples, # kernel numbers must be odd and not exceed original array dim """ image = np.asarray(input) # checking dtype.type, rather than just dtype, is necessary for # excluding np.longdouble with MS Visual C. if image.dtype.type not in (np.ubyte, np.single, np.double): return medfilt(image, kernel_size) if kernel_size is None: kernel_size = [3] * 2 kernel_size = np.asarray(kernel_size) if kernel_size.shape == (): kernel_size = np.repeat(kernel_size.item(), 2) for size in kernel_size: if (size % 2) != 1: raise ValueError("Each element of kernel_size should be odd.") return _sigtools._medfilt2d(image, kernel_size) def lfilter(b, a, x, axis=-1, zi=None): """ Filter data along one-dimension with an IIR or FIR filter. Filter a data sequence, `x`, using a digital filter. This works for many fundamental data types (including Object type). The filter is a direct form II transposed implementation of the standard difference equation (see Notes). The function `sosfilt` (and filter design using ``output='sos'``) should be preferred over `lfilter` for most filtering tasks, as second-order sections have fewer numerical problems. Parameters ---------- b : array_like The numerator coefficient vector in a 1-D sequence. a : array_like The denominator coefficient vector in a 1-D sequence. If ``a[0]`` is not 1, then both `a` and `b` are normalized by ``a[0]``. x : array_like An N-dimensional input array. axis : int, optional The axis of the input data array along which to apply the linear filter. The filter is applied to each subarray along this axis. Default is -1. zi : array_like, optional Initial conditions for the filter delays. It is a vector (or array of vectors for an N-dimensional input) of length ``max(len(a), len(b)) - 1``. If `zi` is None or is not given then initial rest is assumed. See `lfiltic` for more information. Returns ------- y : array The output of the digital filter. zf : array, optional If `zi` is None, this is not returned, otherwise, `zf` holds the final filter delay values. See Also -------- lfiltic : Construct initial conditions for `lfilter`. lfilter_zi : Compute initial state (steady state of step response) for `lfilter`. filtfilt : A forward-backward filter, to obtain a filter with zero phase. savgol_filter : A Savitzky-Golay filter. sosfilt: Filter data using cascaded second-order sections. sosfiltfilt: A forward-backward filter using second-order sections. Notes ----- The filter function is implemented as a direct II transposed structure. This means that the filter implements:: a[0]*y[n] = b[0]*x[n] + b[1]*x[n-1] + ... + b[M]*x[n-M] - a[1]*y[n-1] - ... - a[N]*y[n-N] where `M` is the degree of the numerator, `N` is the degree of the denominator, and `n` is the sample number. It is implemented using the following difference equations (assuming M = N):: a[0]*y[n] = b[0] * x[n] + d[0][n-1] d[0][n] = b[1] * x[n] - a[1] * y[n] + d[1][n-1] d[1][n] = b[2] * x[n] - a[2] * y[n] + d[2][n-1] ... d[N-2][n] = b[N-1]*x[n] - a[N-1]*y[n] + d[N-1][n-1] d[N-1][n] = b[N] * x[n] - a[N] * y[n] where `d` are the state variables. The rational transfer function describing this filter in the z-transform domain is:: -1 -M b[0] + b[1]z + ... + b[M] z Y(z) = -------------------------------- X(z) -1 -N a[0] + a[1]z + ... + a[N] z Examples -------- Generate a noisy signal to be filtered: >>> import numpy as np >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> rng = np.random.default_rng() >>> t = np.linspace(-1, 1, 201) >>> x = (np.sin(2*np.pi*0.75*t*(1-t) + 2.1) + ... 0.1*np.sin(2*np.pi*1.25*t + 1) + ... 0.18*np.cos(2*np.pi*3.85*t)) >>> xn = x + rng.standard_normal(len(t)) * 0.08 Create an order 3 lowpass butterworth filter: >>> b, a = signal.butter(3, 0.05) Apply the filter to xn. Use lfilter_zi to choose the initial condition of the filter: >>> zi = signal.lfilter_zi(b, a) >>> z, _ = signal.lfilter(b, a, xn, zi=zi*xn[0]) Apply the filter again, to have a result filtered at an order the same as filtfilt: >>> z2, _ = signal.lfilter(b, a, z, zi=zi*z[0]) Use filtfilt to apply the filter: >>> y = signal.filtfilt(b, a, xn) Plot the original signal and the various filtered versions: >>> plt.figure >>> plt.plot(t, xn, 'b', alpha=0.75) >>> plt.plot(t, z, 'r--', t, z2, 'r', t, y, 'k') >>> plt.legend(('noisy signal', 'lfilter, once', 'lfilter, twice', ... 'filtfilt'), loc='best') >>> plt.grid(True) >>> plt.show() """ a = np.atleast_1d(a) if len(a) == 1: # This path only supports types fdgFDGO to mirror _linear_filter below. # Any of b, a, x, or zi can set the dtype, but there is no default # casting of other types; instead a NotImplementedError is raised. b = np.asarray(b) a = np.asarray(a) if b.ndim != 1 and a.ndim != 1: raise ValueError('object of too small depth for desired array') x = _validate_x(x) inputs = [b, a, x] if zi is not None: # _linear_filter does not broadcast zi, but does do expansion of # singleton dims. zi = np.asarray(zi) if zi.ndim != x.ndim: raise ValueError('object of too small depth for desired array') expected_shape = list(x.shape) expected_shape[axis] = b.shape[0] - 1 expected_shape = tuple(expected_shape) # check the trivial case where zi is the right shape first if zi.shape != expected_shape: strides = zi.ndim * [None] if axis < 0: axis += zi.ndim for k in range(zi.ndim): if k == axis and zi.shape[k] == expected_shape[k]: strides[k] = zi.strides[k] elif k != axis and zi.shape[k] == expected_shape[k]: strides[k] = zi.strides[k] elif k != axis and zi.shape[k] == 1: strides[k] = 0 else: raise ValueError('Unexpected shape for zi: expected ' '%s, found %s.' % (expected_shape, zi.shape)) zi = np.lib.stride_tricks.as_strided(zi, expected_shape, strides) inputs.append(zi) dtype = np.result_type(*inputs) if dtype.char not in 'fdgFDGO': raise NotImplementedError("input type '%s' not supported" % dtype) b = np.array(b, dtype=dtype) a = np.array(a, dtype=dtype, copy=False) b /= a[0] x = np.array(x, dtype=dtype, copy=False) out_full = np.apply_along_axis(lambda y: np.convolve(b, y), axis, x) ind = out_full.ndim * [slice(None)] if zi is not None: ind[axis] = slice(zi.shape[axis]) out_full[tuple(ind)] += zi ind[axis] = slice(out_full.shape[axis] - len(b) + 1) out = out_full[tuple(ind)] if zi is None: return out else: ind[axis] = slice(out_full.shape[axis] - len(b) + 1, None) zf = out_full[tuple(ind)] return out, zf else: if zi is None: return _sigtools._linear_filter(b, a, x, axis) else: return _sigtools._linear_filter(b, a, x, axis, zi) def lfiltic(b, a, y, x=None): """ Construct initial conditions for lfilter given input and output vectors. Given a linear filter (b, a) and initial conditions on the output `y` and the input `x`, return the initial conditions on the state vector zi which is used by `lfilter` to generate the output given the input. Parameters ---------- b : array_like Linear filter term. a : array_like Linear filter term. y : array_like Initial conditions. If ``N = len(a) - 1``, then ``y = {y[-1], y[-2], ..., y[-N]}``. If `y` is too short, it is padded with zeros. x : array_like, optional Initial conditions. If ``M = len(b) - 1``, then ``x = {x[-1], x[-2], ..., x[-M]}``. If `x` is not given, its initial conditions are assumed zero. If `x` is too short, it is padded with zeros. Returns ------- zi : ndarray The state vector ``zi = {z_0[-1], z_1[-1], ..., z_K-1[-1]}``, where ``K = max(M, N)``. See Also -------- lfilter, lfilter_zi """ N = np.size(a) - 1 M = np.size(b) - 1 K = max(M, N) y = np.asarray(y) if x is None: result_type = np.result_type(np.asarray(b), np.asarray(a), y) if result_type.kind in 'bui': result_type = np.float64 x = np.zeros(M, dtype=result_type) else: x = np.asarray(x) result_type = np.result_type(np.asarray(b), np.asarray(a), y, x) if result_type.kind in 'bui': result_type = np.float64 x = x.astype(result_type) L = np.size(x) if L < M: x = np.r_[x, np.zeros(M - L)] y = y.astype(result_type) zi = np.zeros(K, result_type) L = np.size(y) if L < N: y = np.r_[y, np.zeros(N - L)] for m in range(M): zi[m] = np.sum(b[m + 1:] * x[:M - m], axis=0) for m in range(N): zi[m] -= np.sum(a[m + 1:] * y[:N - m], axis=0) return zi def deconvolve(signal, divisor): """Deconvolves ``divisor`` out of ``signal`` using inverse filtering. Returns the quotient and remainder such that ``signal = convolve(divisor, quotient) + remainder`` Parameters ---------- signal : (N,) array_like Signal data, typically a recorded signal divisor : (N,) array_like Divisor data, typically an impulse response or filter that was applied to the original signal Returns ------- quotient : ndarray Quotient, typically the recovered original signal remainder : ndarray Remainder See Also -------- numpy.polydiv : performs polynomial division (same operation, but also accepts poly1d objects) Examples -------- Deconvolve a signal that's been filtered: >>> from scipy import signal >>> original = [0, 1, 0, 0, 1, 1, 0, 0] >>> impulse_response = [2, 1] >>> recorded = signal.convolve(impulse_response, original) >>> recorded array([0, 2, 1, 0, 2, 3, 1, 0, 0]) >>> recovered, remainder = signal.deconvolve(recorded, impulse_response) >>> recovered array([ 0., 1., 0., 0., 1., 1., 0., 0.]) """ num = np.atleast_1d(signal) den = np.atleast_1d(divisor) if num.ndim > 1: raise ValueError("signal must be 1-D.") if den.ndim > 1: raise ValueError("divisor must be 1-D.") N = len(num) D = len(den) if D > N: quot = [] rem = num else: input = np.zeros(N - D + 1, float) input[0] = 1 quot = lfilter(num, den, input) rem = num - convolve(den, quot, mode='full') return quot, rem def hilbert(x, N=None, axis=-1): """ Compute the analytic signal, using the Hilbert transform. The transformation is done along the last axis by default. Parameters ---------- x : array_like Signal data. Must be real. N : int, optional Number of Fourier components. Default: ``x.shape[axis]`` axis : int, optional Axis along which to do the transformation. Default: -1. Returns ------- xa : ndarray Analytic signal of `x`, of each 1-D array along `axis` Notes ----- The analytic signal ``x_a(t)`` of signal ``x(t)`` is: .. math:: x_a = F^{-1}(F(x) 2U) = x + i y where `F` is the Fourier transform, `U` the unit step function, and `y` the Hilbert transform of `x`. [1]_ In other words, the negative half of the frequency spectrum is zeroed out, turning the real-valued signal into a complex signal. The Hilbert transformed signal can be obtained from ``np.imag(hilbert(x))``, and the original signal from ``np.real(hilbert(x))``. References ---------- .. [1] Wikipedia, "Analytic signal". https://en.wikipedia.org/wiki/Analytic_signal .. [2] Leon Cohen, "Time-Frequency Analysis", 1995. Chapter 2. .. [3] Alan V. Oppenheim, Ronald W. Schafer. Discrete-Time Signal Processing, Third Edition, 2009. Chapter 12. ISBN 13: 978-1292-02572-8 Examples -------- In this example we use the Hilbert transform to determine the amplitude envelope and instantaneous frequency of an amplitude-modulated signal. >>> import numpy as np >>> import matplotlib.pyplot as plt >>> from scipy.signal import hilbert, chirp >>> duration = 1.0 >>> fs = 400.0 >>> samples = int(fs*duration) >>> t = np.arange(samples) / fs We create a chirp of which the frequency increases from 20 Hz to 100 Hz and apply an amplitude modulation. >>> signal = chirp(t, 20.0, t[-1], 100.0) >>> signal *= (1.0 + 0.5 * np.sin(2.0*np.pi*3.0*t) ) The amplitude envelope is given by magnitude of the analytic signal. The instantaneous frequency can be obtained by differentiating the instantaneous phase in respect to time. The instantaneous phase corresponds to the phase angle of the analytic signal. >>> analytic_signal = hilbert(signal) >>> amplitude_envelope = np.abs(analytic_signal) >>> instantaneous_phase = np.unwrap(np.angle(analytic_signal)) >>> instantaneous_frequency = (np.diff(instantaneous_phase) / ... (2.0*np.pi) * fs) >>> fig, (ax0, ax1) = plt.subplots(nrows=2) >>> ax0.plot(t, signal, label='signal') >>> ax0.plot(t, amplitude_envelope, label='envelope') >>> ax0.set_xlabel("time in seconds") >>> ax0.legend() >>> ax1.plot(t[1:], instantaneous_frequency) >>> ax1.set_xlabel("time in seconds") >>> ax1.set_ylim(0.0, 120.0) >>> fig.tight_layout() """ x = np.asarray(x) if np.iscomplexobj(x): raise ValueError("x must be real.") if N is None: N = x.shape[axis] if N <= 0: raise ValueError("N must be positive.") Xf = sp_fft.fft(x, N, axis=axis) h = np.zeros(N, dtype=Xf.dtype) if N % 2 == 0: h[0] = h[N // 2] = 1 h[1:N // 2] = 2 else: h[0] = 1 h[1:(N + 1) // 2] = 2 if x.ndim > 1: ind = [np.newaxis] * x.ndim ind[axis] = slice(None) h = h[tuple(ind)] x = sp_fft.ifft(Xf * h, axis=axis) return x def hilbert2(x, N=None): """ Compute the '2-D' analytic signal of `x` Parameters ---------- x : array_like 2-D signal data. N : int or tuple of two ints, optional Number of Fourier components. Default is ``x.shape`` Returns ------- xa : ndarray Analytic signal of `x` taken along axes (0,1). References ---------- .. [1] Wikipedia, "Analytic signal", https://en.wikipedia.org/wiki/Analytic_signal """ x = np.atleast_2d(x) if x.ndim > 2: raise ValueError("x must be 2-D.") if np.iscomplexobj(x): raise ValueError("x must be real.") if N is None: N = x.shape elif isinstance(N, int): if N <= 0: raise ValueError("N must be positive.") N = (N, N) elif len(N) != 2 or np.any(np.asarray(N) <= 0): raise ValueError("When given as a tuple, N must hold exactly " "two positive integers") Xf = sp_fft.fft2(x, N, axes=(0, 1)) h1 = np.zeros(N[0], dtype=Xf.dtype) h2 = np.zeros(N[1], dtype=Xf.dtype) for h in (h1, h2): N1 = h.shape[0] if N1 % 2 == 0: h[0] = h[N1 // 2] = 1 h[1:N1 // 2] = 2 else: h[0] = 1 h[1:(N1 + 1) // 2] = 2 h = h1[:, np.newaxis] * h2[np.newaxis, :] k = x.ndim while k > 2: h = h[:, np.newaxis] k -= 1 x = sp_fft.ifft2(Xf * h, axes=(0, 1)) return x _msg_cplx_sort="""cmplx_sort is deprecated in SciPy 1.12 and will be removed in SciPy 1.14. The exact equivalent for a numpy array argument is >>> def cmplx_sort(p): ... idx = np.argsort(abs(p)) ... return np.take(p, idx, 0), idx """ def cmplx_sort(p): warnings.warn(_msg_cplx_sort, DeprecationWarning, stacklevel=2) return _cmplx_sort(p) def _cmplx_sort(p): """Sort roots based on magnitude. Parameters ---------- p : array_like The roots to sort, as a 1-D array. Returns ------- p_sorted : ndarray Sorted roots. indx : ndarray Array of indices needed to sort the input `p`. Examples -------- >>> from scipy import signal >>> vals = [1, 4, 1+1.j, 3] >>> p_sorted, indx = signal.cmplx_sort(vals) >>> p_sorted array([1.+0.j, 1.+1.j, 3.+0.j, 4.+0.j]) >>> indx array([0, 2, 3, 1]) """ p = np.asarray(p) indx = np.argsort(abs(p)) return np.take(p, indx, 0), indx def unique_roots(p, tol=1e-3, rtype='min'): """Determine unique roots and their multiplicities from a list of roots. Parameters ---------- p : array_like The list of roots. tol : float, optional The tolerance for two roots to be considered equal in terms of the distance between them. Default is 1e-3. Refer to Notes about the details on roots grouping. rtype : {'max', 'maximum', 'min', 'minimum', 'avg', 'mean'}, optional How to determine the returned root if multiple roots are within `tol` of each other. - 'max', 'maximum': pick the maximum of those roots - 'min', 'minimum': pick the minimum of those roots - 'avg', 'mean': take the average of those roots When finding minimum or maximum among complex roots they are compared first by the real part and then by the imaginary part. Returns ------- unique : ndarray The list of unique roots. multiplicity : ndarray The multiplicity of each root. Notes ----- If we have 3 roots ``a``, ``b`` and ``c``, such that ``a`` is close to ``b`` and ``b`` is close to ``c`` (distance is less than `tol`), then it doesn't necessarily mean that ``a`` is close to ``c``. It means that roots grouping is not unique. In this function we use "greedy" grouping going through the roots in the order they are given in the input `p`. This utility function is not specific to roots but can be used for any sequence of values for which uniqueness and multiplicity has to be determined. For a more general routine, see `numpy.unique`. Examples -------- >>> from scipy import signal >>> vals = [0, 1.3, 1.31, 2.8, 1.25, 2.2, 10.3] >>> uniq, mult = signal.unique_roots(vals, tol=2e-2, rtype='avg') Check which roots have multiplicity larger than 1: >>> uniq[mult > 1] array([ 1.305]) """ if rtype in ['max', 'maximum']: reduce = np.max elif rtype in ['min', 'minimum']: reduce = np.min elif rtype in ['avg', 'mean']: reduce = np.mean else: raise ValueError("`rtype` must be one of " "{'max', 'maximum', 'min', 'minimum', 'avg', 'mean'}") p = np.asarray(p) points = np.empty((len(p), 2)) points[:, 0] = np.real(p) points[:, 1] = np.imag(p) tree = cKDTree(points) p_unique = [] p_multiplicity = [] used = np.zeros(len(p), dtype=bool) for i in range(len(p)): if used[i]: continue group = tree.query_ball_point(points[i], tol) group = [x for x in group if not used[x]] p_unique.append(reduce(p[group])) p_multiplicity.append(len(group)) used[group] = True return np.asarray(p_unique), np.asarray(p_multiplicity) def invres(r, p, k, tol=1e-3, rtype='avg'): """Compute b(s) and a(s) from partial fraction expansion. If `M` is the degree of numerator `b` and `N` the degree of denominator `a`:: b(s) b[0] s**(M) + b[1] s**(M-1) + ... + b[M] H(s) = ------ = ------------------------------------------ a(s) a[0] s**(N) + a[1] s**(N-1) + ... + a[N] then the partial-fraction expansion H(s) is defined as:: r[0] r[1] r[-1] = -------- + -------- + ... + --------- + k(s) (s-p[0]) (s-p[1]) (s-p[-1]) If there are any repeated roots (closer together than `tol`), then H(s) has terms like:: r[i] r[i+1] r[i+n-1] -------- + ----------- + ... + ----------- (s-p[i]) (s-p[i])**2 (s-p[i])**n This function is used for polynomials in positive powers of s or z, such as analog filters or digital filters in controls engineering. For negative powers of z (typical for digital filters in DSP), use `invresz`. Parameters ---------- r : array_like Residues corresponding to the poles. For repeated poles, the residues must be ordered to correspond to ascending by power fractions. p : array_like Poles. Equal poles must be adjacent. k : array_like Coefficients of the direct polynomial term. tol : float, optional The tolerance for two roots to be considered equal in terms of the distance between them. Default is 1e-3. See `unique_roots` for further details. rtype : {'avg', 'min', 'max'}, optional Method for computing a root to represent a group of identical roots. Default is 'avg'. See `unique_roots` for further details. Returns ------- b : ndarray Numerator polynomial coefficients. a : ndarray Denominator polynomial coefficients. See Also -------- residue, invresz, unique_roots """ r = np.atleast_1d(r) p = np.atleast_1d(p) k = np.trim_zeros(np.atleast_1d(k), 'f') unique_poles, multiplicity = _group_poles(p, tol, rtype) factors, denominator = _compute_factors(unique_poles, multiplicity, include_powers=True) if len(k) == 0: numerator = 0 else: numerator = np.polymul(k, denominator) for residue, factor in zip(r, factors): numerator = np.polyadd(numerator, residue * factor) return numerator, denominator def _compute_factors(roots, multiplicity, include_powers=False): """Compute the total polynomial divided by factors for each root.""" current = np.array([1]) suffixes = [current] for pole, mult in zip(roots[-1:0:-1], multiplicity[-1:0:-1]): monomial = np.array([1, -pole]) for _ in range(mult): current = np.polymul(current, monomial) suffixes.append(current) suffixes = suffixes[::-1] factors = [] current = np.array([1]) for pole, mult, suffix in zip(roots, multiplicity, suffixes): monomial = np.array([1, -pole]) block = [] for i in range(mult): if i == 0 or include_powers: block.append(np.polymul(current, suffix)) current = np.polymul(current, monomial) factors.extend(reversed(block)) return factors, current def _compute_residues(poles, multiplicity, numerator): denominator_factors, _ = _compute_factors(poles, multiplicity) numerator = numerator.astype(poles.dtype) residues = [] for pole, mult, factor in zip(poles, multiplicity, denominator_factors): if mult == 1: residues.append(np.polyval(numerator, pole) / np.polyval(factor, pole)) else: numer = numerator.copy() monomial = np.array([1, -pole]) factor, d = np.polydiv(factor, monomial) block = [] for _ in range(mult): numer, n = np.polydiv(numer, monomial) r = n[0] / d[0] numer = np.polysub(numer, r * factor) block.append(r) residues.extend(reversed(block)) return np.asarray(residues) def residue(b, a, tol=1e-3, rtype='avg'): """Compute partial-fraction expansion of b(s) / a(s). If `M` is the degree of numerator `b` and `N` the degree of denominator `a`:: b(s) b[0] s**(M) + b[1] s**(M-1) + ... + b[M] H(s) = ------ = ------------------------------------------ a(s) a[0] s**(N) + a[1] s**(N-1) + ... + a[N] then the partial-fraction expansion H(s) is defined as:: r[0] r[1] r[-1] = -------- + -------- + ... + --------- + k(s) (s-p[0]) (s-p[1]) (s-p[-1]) If there are any repeated roots (closer together than `tol`), then H(s) has terms like:: r[i] r[i+1] r[i+n-1] -------- + ----------- + ... + ----------- (s-p[i]) (s-p[i])**2 (s-p[i])**n This function is used for polynomials in positive powers of s or z, such as analog filters or digital filters in controls engineering. For negative powers of z (typical for digital filters in DSP), use `residuez`. See Notes for details about the algorithm. Parameters ---------- b : array_like Numerator polynomial coefficients. a : array_like Denominator polynomial coefficients. tol : float, optional The tolerance for two roots to be considered equal in terms of the distance between them. Default is 1e-3. See `unique_roots` for further details. rtype : {'avg', 'min', 'max'}, optional Method for computing a root to represent a group of identical roots. Default is 'avg'. See `unique_roots` for further details. Returns ------- r : ndarray Residues corresponding to the poles. For repeated poles, the residues are ordered to correspond to ascending by power fractions. p : ndarray Poles ordered by magnitude in ascending order. k : ndarray Coefficients of the direct polynomial term. See Also -------- invres, residuez, numpy.poly, unique_roots Notes ----- The "deflation through subtraction" algorithm is used for computations --- method 6 in [1]_. The form of partial fraction expansion depends on poles multiplicity in the exact mathematical sense. However there is no way to exactly determine multiplicity of roots of a polynomial in numerical computing. Thus you should think of the result of `residue` with given `tol` as partial fraction expansion computed for the denominator composed of the computed poles with empirically determined multiplicity. The choice of `tol` can drastically change the result if there are close poles. References ---------- .. [1] J. F. Mahoney, B. D. Sivazlian, "Partial fractions expansion: a review of computational methodology and efficiency", Journal of Computational and Applied Mathematics, Vol. 9, 1983. """ b = np.asarray(b) a = np.asarray(a) if (np.issubdtype(b.dtype, np.complexfloating) or np.issubdtype(a.dtype, np.complexfloating)): b = b.astype(complex) a = a.astype(complex) else: b = b.astype(float) a = a.astype(float) b = np.trim_zeros(np.atleast_1d(b), 'f') a = np.trim_zeros(np.atleast_1d(a), 'f') if a.size == 0: raise ValueError("Denominator `a` is zero.") poles = np.roots(a) if b.size == 0: return np.zeros(poles.shape), _cmplx_sort(poles)[0], np.array([]) if len(b) < len(a): k = np.empty(0) else: k, b = np.polydiv(b, a) unique_poles, multiplicity = unique_roots(poles, tol=tol, rtype=rtype) unique_poles, order = _cmplx_sort(unique_poles) multiplicity = multiplicity[order] residues = _compute_residues(unique_poles, multiplicity, b) index = 0 for pole, mult in zip(unique_poles, multiplicity): poles[index:index + mult] = pole index += mult return residues / a[0], poles, k def residuez(b, a, tol=1e-3, rtype='avg'): """Compute partial-fraction expansion of b(z) / a(z). If `M` is the degree of numerator `b` and `N` the degree of denominator `a`:: b(z) b[0] + b[1] z**(-1) + ... + b[M] z**(-M) H(z) = ------ = ------------------------------------------ a(z) a[0] + a[1] z**(-1) + ... + a[N] z**(-N) then the partial-fraction expansion H(z) is defined as:: r[0] r[-1] = --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ... (1-p[0]z**(-1)) (1-p[-1]z**(-1)) If there are any repeated roots (closer than `tol`), then the partial fraction expansion has terms like:: r[i] r[i+1] r[i+n-1] -------------- + ------------------ + ... + ------------------ (1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n This function is used for polynomials in negative powers of z, such as digital filters in DSP. For positive powers, use `residue`. See Notes of `residue` for details about the algorithm. Parameters ---------- b : array_like Numerator polynomial coefficients. a : array_like Denominator polynomial coefficients. tol : float, optional The tolerance for two roots to be considered equal in terms of the distance between them. Default is 1e-3. See `unique_roots` for further details. rtype : {'avg', 'min', 'max'}, optional Method for computing a root to represent a group of identical roots. Default is 'avg'. See `unique_roots` for further details. Returns ------- r : ndarray Residues corresponding to the poles. For repeated poles, the residues are ordered to correspond to ascending by power fractions. p : ndarray Poles ordered by magnitude in ascending order. k : ndarray Coefficients of the direct polynomial term. See Also -------- invresz, residue, unique_roots """ b = np.asarray(b) a = np.asarray(a) if (np.issubdtype(b.dtype, np.complexfloating) or np.issubdtype(a.dtype, np.complexfloating)): b = b.astype(complex) a = a.astype(complex) else: b = b.astype(float) a = a.astype(float) b = np.trim_zeros(np.atleast_1d(b), 'b') a = np.trim_zeros(np.atleast_1d(a), 'b') if a.size == 0: raise ValueError("Denominator `a` is zero.") elif a[0] == 0: raise ValueError("First coefficient of determinant `a` must be " "non-zero.") poles = np.roots(a) if b.size == 0: return np.zeros(poles.shape), _cmplx_sort(poles)[0], np.array([]) b_rev = b[::-1] a_rev = a[::-1] if len(b_rev) < len(a_rev): k_rev = np.empty(0) else: k_rev, b_rev = np.polydiv(b_rev, a_rev) unique_poles, multiplicity = unique_roots(poles, tol=tol, rtype=rtype) unique_poles, order = _cmplx_sort(unique_poles) multiplicity = multiplicity[order] residues = _compute_residues(1 / unique_poles, multiplicity, b_rev) index = 0 powers = np.empty(len(residues), dtype=int) for pole, mult in zip(unique_poles, multiplicity): poles[index:index + mult] = pole powers[index:index + mult] = 1 + np.arange(mult) index += mult residues *= (-poles) ** powers / a_rev[0] return residues, poles, k_rev[::-1] def _group_poles(poles, tol, rtype): if rtype in ['max', 'maximum']: reduce = np.max elif rtype in ['min', 'minimum']: reduce = np.min elif rtype in ['avg', 'mean']: reduce = np.mean else: raise ValueError("`rtype` must be one of " "{'max', 'maximum', 'min', 'minimum', 'avg', 'mean'}") unique = [] multiplicity = [] pole = poles[0] block = [pole] for i in range(1, len(poles)): if abs(poles[i] - pole) <= tol: block.append(pole) else: unique.append(reduce(block)) multiplicity.append(len(block)) pole = poles[i] block = [pole] unique.append(reduce(block)) multiplicity.append(len(block)) return np.asarray(unique), np.asarray(multiplicity) def invresz(r, p, k, tol=1e-3, rtype='avg'): """Compute b(z) and a(z) from partial fraction expansion. If `M` is the degree of numerator `b` and `N` the degree of denominator `a`:: b(z) b[0] + b[1] z**(-1) + ... + b[M] z**(-M) H(z) = ------ = ------------------------------------------ a(z) a[0] + a[1] z**(-1) + ... + a[N] z**(-N) then the partial-fraction expansion H(z) is defined as:: r[0] r[-1] = --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ... (1-p[0]z**(-1)) (1-p[-1]z**(-1)) If there are any repeated roots (closer than `tol`), then the partial fraction expansion has terms like:: r[i] r[i+1] r[i+n-1] -------------- + ------------------ + ... + ------------------ (1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n This function is used for polynomials in negative powers of z, such as digital filters in DSP. For positive powers, use `invres`. Parameters ---------- r : array_like Residues corresponding to the poles. For repeated poles, the residues must be ordered to correspond to ascending by power fractions. p : array_like Poles. Equal poles must be adjacent. k : array_like Coefficients of the direct polynomial term. tol : float, optional The tolerance for two roots to be considered equal in terms of the distance between them. Default is 1e-3. See `unique_roots` for further details. rtype : {'avg', 'min', 'max'}, optional Method for computing a root to represent a group of identical roots. Default is 'avg'. See `unique_roots` for further details. Returns ------- b : ndarray Numerator polynomial coefficients. a : ndarray Denominator polynomial coefficients. See Also -------- residuez, unique_roots, invres """ r = np.atleast_1d(r) p = np.atleast_1d(p) k = np.trim_zeros(np.atleast_1d(k), 'b') unique_poles, multiplicity = _group_poles(p, tol, rtype) factors, denominator = _compute_factors(unique_poles, multiplicity, include_powers=True) if len(k) == 0: numerator = 0 else: numerator = np.polymul(k[::-1], denominator[::-1]) for residue, factor in zip(r, factors): numerator = np.polyadd(numerator, residue * factor[::-1]) return numerator[::-1], denominator def resample(x, num, t=None, axis=0, window=None, domain='time'): """ Resample `x` to `num` samples using Fourier method along the given axis. The resampled signal starts at the same value as `x` but is sampled with a spacing of ``len(x) / num * (spacing of x)``. Because a Fourier method is used, the signal is assumed to be periodic. Parameters ---------- x : array_like The data to be resampled. num : int The number of samples in the resampled signal. t : array_like, optional If `t` is given, it is assumed to be the equally spaced sample positions associated with the signal data in `x`. axis : int, optional The axis of `x` that is resampled. Default is 0. window : array_like, callable, string, float, or tuple, optional Specifies the window applied to the signal in the Fourier domain. See below for details. domain : string, optional A string indicating the domain of the input `x`: ``time`` Consider the input `x` as time-domain (Default), ``freq`` Consider the input `x` as frequency-domain. Returns ------- resampled_x or (resampled_x, resampled_t) Either the resampled array, or, if `t` was given, a tuple containing the resampled array and the corresponding resampled positions. See Also -------- decimate : Downsample the signal after applying an FIR or IIR filter. resample_poly : Resample using polyphase filtering and an FIR filter. Notes ----- The argument `window` controls a Fourier-domain window that tapers the Fourier spectrum before zero-padding to alleviate ringing in the resampled values for sampled signals you didn't intend to be interpreted as band-limited. If `window` is a function, then it is called with a vector of inputs indicating the frequency bins (i.e. fftfreq(x.shape[axis]) ). If `window` is an array of the same length as `x.shape[axis]` it is assumed to be the window to be applied directly in the Fourier domain (with dc and low-frequency first). For any other type of `window`, the function `scipy.signal.get_window` is called to generate the window. The first sample of the returned vector is the same as the first sample of the input vector. The spacing between samples is changed from ``dx`` to ``dx * len(x) / num``. If `t` is not None, then it is used solely to calculate the resampled positions `resampled_t` As noted, `resample` uses FFT transformations, which can be very slow if the number of input or output samples is large and prime; see `scipy.fft.fft`. Examples -------- Note that the end of the resampled data rises to meet the first sample of the next cycle: >>> import numpy as np >>> from scipy import signal >>> x = np.linspace(0, 10, 20, endpoint=False) >>> y = np.cos(-x**2/6.0) >>> f = signal.resample(y, 100) >>> xnew = np.linspace(0, 10, 100, endpoint=False) >>> import matplotlib.pyplot as plt >>> plt.plot(x, y, 'go-', xnew, f, '.-', 10, y[0], 'ro') >>> plt.legend(['data', 'resampled'], loc='best') >>> plt.show() """ if domain not in ('time', 'freq'): raise ValueError("Acceptable domain flags are 'time' or" " 'freq', not domain={}".format(domain)) x = np.asarray(x) Nx = x.shape[axis] # Check if we can use faster real FFT real_input = np.isrealobj(x) if domain == 'time': # Forward transform if real_input: X = sp_fft.rfft(x, axis=axis) else: # Full complex FFT X = sp_fft.fft(x, axis=axis) else: # domain == 'freq' X = x # Apply window to spectrum if window is not None: if callable(window): W = window(sp_fft.fftfreq(Nx)) elif isinstance(window, np.ndarray): if window.shape != (Nx,): raise ValueError('window must have the same length as data') W = window else: W = sp_fft.ifftshift(get_window(window, Nx)) newshape_W = [1] * x.ndim newshape_W[axis] = X.shape[axis] if real_input: # Fold the window back on itself to mimic complex behavior W_real = W.copy() W_real[1:] += W_real[-1:0:-1] W_real[1:] *= 0.5 X *= W_real[:newshape_W[axis]].reshape(newshape_W) else: X *= W.reshape(newshape_W) # Copy each half of the original spectrum to the output spectrum, either # truncating high frequences (downsampling) or zero-padding them # (upsampling) # Placeholder array for output spectrum newshape = list(x.shape) if real_input: newshape[axis] = num // 2 + 1 else: newshape[axis] = num Y = np.zeros(newshape, X.dtype) # Copy positive frequency components (and Nyquist, if present) N = min(num, Nx) nyq = N // 2 + 1 # Slice index that includes Nyquist if present sl = [slice(None)] * x.ndim sl[axis] = slice(0, nyq) Y[tuple(sl)] = X[tuple(sl)] if not real_input: # Copy negative frequency components if N > 2: # (slice expression doesn't collapse to empty array) sl[axis] = slice(nyq - N, None) Y[tuple(sl)] = X[tuple(sl)] # Split/join Nyquist component(s) if present # So far we have set Y[+N/2]=X[+N/2] if N % 2 == 0: if num < Nx: # downsampling if real_input: sl[axis] = slice(N//2, N//2 + 1) Y[tuple(sl)] *= 2. else: # select the component of Y at frequency +N/2, # add the component of X at -N/2 sl[axis] = slice(-N//2, -N//2 + 1) Y[tuple(sl)] += X[tuple(sl)] elif Nx < num: # upsampling # select the component at frequency +N/2 and halve it sl[axis] = slice(N//2, N//2 + 1) Y[tuple(sl)] *= 0.5 if not real_input: temp = Y[tuple(sl)] # set the component at -N/2 equal to the component at +N/2 sl[axis] = slice(num-N//2, num-N//2 + 1) Y[tuple(sl)] = temp # Inverse transform if real_input: y = sp_fft.irfft(Y, num, axis=axis) else: y = sp_fft.ifft(Y, axis=axis, overwrite_x=True) y *= (float(num) / float(Nx)) if t is None: return y else: new_t = np.arange(0, num) * (t[1] - t[0]) * Nx / float(num) + t[0] return y, new_t def resample_poly(x, up, down, axis=0, window=('kaiser', 5.0), padtype='constant', cval=None): """ Resample `x` along the given axis using polyphase filtering. The signal `x` is upsampled by the factor `up`, a zero-phase low-pass FIR filter is applied, and then it is downsampled by the factor `down`. The resulting sample rate is ``up / down`` times the original sample rate. By default, values beyond the boundary of the signal are assumed to be zero during the filtering step. Parameters ---------- x : array_like The data to be resampled. up : int The upsampling factor. down : int The downsampling factor. axis : int, optional The axis of `x` that is resampled. Default is 0. window : string, tuple, or array_like, optional Desired window to use to design the low-pass filter, or the FIR filter coefficients to employ. See below for details. padtype : string, optional `constant`, `line`, `mean`, `median`, `maximum`, `minimum` or any of the other signal extension modes supported by `scipy.signal.upfirdn`. Changes assumptions on values beyond the boundary. If `constant`, assumed to be `cval` (default zero). If `line` assumed to continue a linear trend defined by the first and last points. `mean`, `median`, `maximum` and `minimum` work as in `np.pad` and assume that the values beyond the boundary are the mean, median, maximum or minimum respectively of the array along the axis. .. versionadded:: 1.4.0 cval : float, optional Value to use if `padtype='constant'`. Default is zero. .. versionadded:: 1.4.0 Returns ------- resampled_x : array The resampled array. See Also -------- decimate : Downsample the signal after applying an FIR or IIR filter. resample : Resample up or down using the FFT method. Notes ----- This polyphase method will likely be faster than the Fourier method in `scipy.signal.resample` when the number of samples is large and prime, or when the number of samples is large and `up` and `down` share a large greatest common denominator. The length of the FIR filter used will depend on ``max(up, down) // gcd(up, down)``, and the number of operations during polyphase filtering will depend on the filter length and `down` (see `scipy.signal.upfirdn` for details). The argument `window` specifies the FIR low-pass filter design. If `window` is an array_like it is assumed to be the FIR filter coefficients. Note that the FIR filter is applied after the upsampling step, so it should be designed to operate on a signal at a sampling frequency higher than the original by a factor of `up//gcd(up, down)`. This function's output will be centered with respect to this array, so it is best to pass a symmetric filter with an odd number of samples if, as is usually the case, a zero-phase filter is desired. For any other type of `window`, the functions `scipy.signal.get_window` and `scipy.signal.firwin` are called to generate the appropriate filter coefficients. The first sample of the returned vector is the same as the first sample of the input vector. The spacing between samples is changed from ``dx`` to ``dx * down / float(up)``. Examples -------- By default, the end of the resampled data rises to meet the first sample of the next cycle for the FFT method, and gets closer to zero for the polyphase method: >>> import numpy as np >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> x = np.linspace(0, 10, 20, endpoint=False) >>> y = np.cos(-x**2/6.0) >>> f_fft = signal.resample(y, 100) >>> f_poly = signal.resample_poly(y, 100, 20) >>> xnew = np.linspace(0, 10, 100, endpoint=False) >>> plt.plot(xnew, f_fft, 'b.-', xnew, f_poly, 'r.-') >>> plt.plot(x, y, 'ko-') >>> plt.plot(10, y[0], 'bo', 10, 0., 'ro') # boundaries >>> plt.legend(['resample', 'resamp_poly', 'data'], loc='best') >>> plt.show() This default behaviour can be changed by using the padtype option: >>> N = 5 >>> x = np.linspace(0, 1, N, endpoint=False) >>> y = 2 + x**2 - 1.7*np.sin(x) + .2*np.cos(11*x) >>> y2 = 1 + x**3 + 0.1*np.sin(x) + .1*np.cos(11*x) >>> Y = np.stack([y, y2], axis=-1) >>> up = 4 >>> xr = np.linspace(0, 1, N*up, endpoint=False) >>> y2 = signal.resample_poly(Y, up, 1, padtype='constant') >>> y3 = signal.resample_poly(Y, up, 1, padtype='mean') >>> y4 = signal.resample_poly(Y, up, 1, padtype='line') >>> for i in [0,1]: ... plt.figure() ... plt.plot(xr, y4[:,i], 'g.', label='line') ... plt.plot(xr, y3[:,i], 'y.', label='mean') ... plt.plot(xr, y2[:,i], 'r.', label='constant') ... plt.plot(x, Y[:,i], 'k-') ... plt.legend() >>> plt.show() """ x = np.asarray(x) if up != int(up): raise ValueError("up must be an integer") if down != int(down): raise ValueError("down must be an integer") up = int(up) down = int(down) if up < 1 or down < 1: raise ValueError('up and down must be >= 1') if cval is not None and padtype != 'constant': raise ValueError('cval has no effect when padtype is ', padtype) # Determine our up and down factors # Use a rational approximation to save computation time on really long # signals g_ = math.gcd(up, down) up //= g_ down //= g_ if up == down == 1: return x.copy() n_in = x.shape[axis] n_out = n_in * up n_out = n_out // down + bool(n_out % down) if isinstance(window, (list, np.ndarray)): window = np.array(window) # use array to force a copy (we modify it) if window.ndim > 1: raise ValueError('window must be 1-D') half_len = (window.size - 1) // 2 h = window else: # Design a linear-phase low-pass FIR filter max_rate = max(up, down) f_c = 1. / max_rate # cutoff of FIR filter (rel. to Nyquist) half_len = 10 * max_rate # reasonable cutoff for sinc-like function h = firwin(2 * half_len + 1, f_c, window=window).astype(x.dtype) # match dtype of x h *= up # Zero-pad our filter to put the output samples at the center n_pre_pad = (down - half_len % down) n_post_pad = 0 n_pre_remove = (half_len + n_pre_pad) // down # We should rarely need to do this given our filter lengths... while _output_len(len(h) + n_pre_pad + n_post_pad, n_in, up, down) < n_out + n_pre_remove: n_post_pad += 1 h = np.concatenate((np.zeros(n_pre_pad, dtype=h.dtype), h, np.zeros(n_post_pad, dtype=h.dtype))) n_pre_remove_end = n_pre_remove + n_out # Remove background depending on the padtype option funcs = {'mean': np.mean, 'median': np.median, 'minimum': np.amin, 'maximum': np.amax} upfirdn_kwargs = {'mode': 'constant', 'cval': 0} if padtype in funcs: background_values = funcs[padtype](x, axis=axis, keepdims=True) elif padtype in _upfirdn_modes: upfirdn_kwargs = {'mode': padtype} if padtype == 'constant': if cval is None: cval = 0 upfirdn_kwargs['cval'] = cval else: raise ValueError( 'padtype must be one of: maximum, mean, median, minimum, ' + ', '.join(_upfirdn_modes)) if padtype in funcs: x = x - background_values # filter then remove excess y = upfirdn(h, x, up, down, axis=axis, **upfirdn_kwargs) keep = [slice(None), ]*x.ndim keep[axis] = slice(n_pre_remove, n_pre_remove_end) y_keep = y[tuple(keep)] # Add background back if padtype in funcs: y_keep += background_values return y_keep def vectorstrength(events, period): ''' Determine the vector strength of the events corresponding to the given period. The vector strength is a measure of phase synchrony, how well the timing of the events is synchronized to a single period of a periodic signal. If multiple periods are used, calculate the vector strength of each. This is called the "resonating vector strength". Parameters ---------- events : 1D array_like An array of time points containing the timing of the events. period : float or array_like The period of the signal that the events should synchronize to. The period is in the same units as `events`. It can also be an array of periods, in which case the outputs are arrays of the same length. Returns ------- strength : float or 1D array The strength of the synchronization. 1.0 is perfect synchronization and 0.0 is no synchronization. If `period` is an array, this is also an array with each element containing the vector strength at the corresponding period. phase : float or array The phase that the events are most strongly synchronized to in radians. If `period` is an array, this is also an array with each element containing the phase for the corresponding period. References ---------- van Hemmen, JL, Longtin, A, and Vollmayr, AN. Testing resonating vector strength: Auditory system, electric fish, and noise. Chaos 21, 047508 (2011); :doi:`10.1063/1.3670512`. van Hemmen, JL. Vector strength after Goldberg, Brown, and von Mises: biological and mathematical perspectives. Biol Cybern. 2013 Aug;107(4):385-96. :doi:`10.1007/s00422-013-0561-7`. van Hemmen, JL and Vollmayr, AN. Resonating vector strength: what happens when we vary the "probing" frequency while keeping the spike times fixed. Biol Cybern. 2013 Aug;107(4):491-94. :doi:`10.1007/s00422-013-0560-8`. ''' events = np.asarray(events) period = np.asarray(period) if events.ndim > 1: raise ValueError('events cannot have dimensions more than 1') if period.ndim > 1: raise ValueError('period cannot have dimensions more than 1') # we need to know later if period was originally a scalar scalarperiod = not period.ndim events = np.atleast_2d(events) period = np.atleast_2d(period) if (period <= 0).any(): raise ValueError('periods must be positive') # this converts the times to vectors vectors = np.exp(np.dot(2j*np.pi/period.T, events)) # the vector strength is just the magnitude of the mean of the vectors # the vector phase is the angle of the mean of the vectors vectormean = np.mean(vectors, axis=1) strength = abs(vectormean) phase = np.angle(vectormean) # if the original period was a scalar, return scalars if scalarperiod: strength = strength[0] phase = phase[0] return strength, phase def detrend(data, axis=-1, type='linear', bp=0, overwrite_data=False): """ Remove linear trend along axis from data. Parameters ---------- data : array_like The input data. axis : int, optional The axis along which to detrend the data. By default this is the last axis (-1). type : {'linear', 'constant'}, optional The type of detrending. If ``type == 'linear'`` (default), the result of a linear least-squares fit to `data` is subtracted from `data`. If ``type == 'constant'``, only the mean of `data` is subtracted. bp : array_like of ints, optional A sequence of break points. If given, an individual linear fit is performed for each part of `data` between two break points. Break points are specified as indices into `data`. This parameter only has an effect when ``type == 'linear'``. overwrite_data : bool, optional If True, perform in place detrending and avoid a copy. Default is False Returns ------- ret : ndarray The detrended input data. Examples -------- >>> import numpy as np >>> from scipy import signal >>> rng = np.random.default_rng() >>> npoints = 1000 >>> noise = rng.standard_normal(npoints) >>> x = 3 + 2*np.linspace(0, 1, npoints) + noise >>> (signal.detrend(x) - noise).max() 0.06 # random """ if type not in ['linear', 'l', 'constant', 'c']: raise ValueError("Trend type must be 'linear' or 'constant'.") data = np.asarray(data) dtype = data.dtype.char if dtype not in 'dfDF': dtype = 'd' if type in ['constant', 'c']: ret = data - np.mean(data, axis, keepdims=True) return ret else: dshape = data.shape N = dshape[axis] bp = np.sort(np.unique(np.concatenate(np.atleast_1d(0, bp, N)))) if np.any(bp > N): raise ValueError("Breakpoints must be less than length " "of data along given axis.") # Restructure data so that axis is along first dimension and # all other dimensions are collapsed into second dimension rnk = len(dshape) if axis < 0: axis = axis + rnk newdata = np.moveaxis(data, axis, 0) newdata_shape = newdata.shape newdata = newdata.reshape(N, -1) if not overwrite_data: newdata = newdata.copy() # make sure we have a copy if newdata.dtype.char not in 'dfDF': newdata = newdata.astype(dtype) # Nreg = len(bp) - 1 # Find leastsq fit and remove it for each piece for m in range(len(bp) - 1): Npts = bp[m + 1] - bp[m] A = np.ones((Npts, 2), dtype) A[:, 0] = np.arange(1, Npts + 1, dtype=dtype) / Npts sl = slice(bp[m], bp[m + 1]) coef, resids, rank, s = linalg.lstsq(A, newdata[sl]) newdata[sl] = newdata[sl] - A @ coef # Put data back in original shape. newdata = newdata.reshape(newdata_shape) ret = np.moveaxis(newdata, 0, axis) return ret def lfilter_zi(b, a): """ Construct initial conditions for lfilter for step response steady-state. Compute an initial state `zi` for the `lfilter` function that corresponds to the steady state of the step response. A typical use of this function is to set the initial state so that the output of the filter starts at the same value as the first element of the signal to be filtered. Parameters ---------- b, a : array_like (1-D) The IIR filter coefficients. See `lfilter` for more information. Returns ------- zi : 1-D ndarray The initial state for the filter. See Also -------- lfilter, lfiltic, filtfilt Notes ----- A linear filter with order m has a state space representation (A, B, C, D), for which the output y of the filter can be expressed as:: z(n+1) = A*z(n) + B*x(n) y(n) = C*z(n) + D*x(n) where z(n) is a vector of length m, A has shape (m, m), B has shape (m, 1), C has shape (1, m) and D has shape (1, 1) (assuming x(n) is a scalar). lfilter_zi solves:: zi = A*zi + B In other words, it finds the initial condition for which the response to an input of all ones is a constant. Given the filter coefficients `a` and `b`, the state space matrices for the transposed direct form II implementation of the linear filter, which is the implementation used by scipy.signal.lfilter, are:: A = scipy.linalg.companion(a).T B = b[1:] - a[1:]*b[0] assuming `a[0]` is 1.0; if `a[0]` is not 1, `a` and `b` are first divided by a[0]. Examples -------- The following code creates a lowpass Butterworth filter. Then it applies that filter to an array whose values are all 1.0; the output is also all 1.0, as expected for a lowpass filter. If the `zi` argument of `lfilter` had not been given, the output would have shown the transient signal. >>> from numpy import array, ones >>> from scipy.signal import lfilter, lfilter_zi, butter >>> b, a = butter(5, 0.25) >>> zi = lfilter_zi(b, a) >>> y, zo = lfilter(b, a, ones(10), zi=zi) >>> y array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) Another example: >>> x = array([0.5, 0.5, 0.5, 0.0, 0.0, 0.0, 0.0]) >>> y, zf = lfilter(b, a, x, zi=zi*x[0]) >>> y array([ 0.5 , 0.5 , 0.5 , 0.49836039, 0.48610528, 0.44399389, 0.35505241]) Note that the `zi` argument to `lfilter` was computed using `lfilter_zi` and scaled by `x[0]`. Then the output `y` has no transient until the input drops from 0.5 to 0.0. """ # FIXME: Can this function be replaced with an appropriate # use of lfiltic? For example, when b,a = butter(N,Wn), # lfiltic(b, a, y=numpy.ones_like(a), x=numpy.ones_like(b)). # # We could use scipy.signal.normalize, but it uses warnings in # cases where a ValueError is more appropriate, and it allows # b to be 2D. b = np.atleast_1d(b) if b.ndim != 1: raise ValueError("Numerator b must be 1-D.") a = np.atleast_1d(a) if a.ndim != 1: raise ValueError("Denominator a must be 1-D.") while len(a) > 1 and a[0] == 0.0: a = a[1:] if a.size < 1: raise ValueError("There must be at least one nonzero `a` coefficient.") if a[0] != 1.0: # Normalize the coefficients so a[0] == 1. b = b / a[0] a = a / a[0] n = max(len(a), len(b)) # Pad a or b with zeros so they are the same length. if len(a) < n: a = np.r_[a, np.zeros(n - len(a), dtype=a.dtype)] elif len(b) < n: b = np.r_[b, np.zeros(n - len(b), dtype=b.dtype)] IminusA = np.eye(n - 1, dtype=np.result_type(a, b)) - linalg.companion(a).T B = b[1:] - a[1:] * b[0] # Solve zi = A*zi + B zi = np.linalg.solve(IminusA, B) # For future reference: we could also use the following # explicit formulas to solve the linear system: # # zi = np.zeros(n - 1) # zi[0] = B.sum() / IminusA[:,0].sum() # asum = 1.0 # csum = 0.0 # for k in range(1,n-1): # asum += a[k] # csum += b[k] - a[k]*b[0] # zi[k] = asum*zi[0] - csum return zi def sosfilt_zi(sos): """ Construct initial conditions for sosfilt for step response steady-state. Compute an initial state `zi` for the `sosfilt` function that corresponds to the steady state of the step response. A typical use of this function is to set the initial state so that the output of the filter starts at the same value as the first element of the signal to be filtered. Parameters ---------- sos : array_like Array of second-order filter coefficients, must have shape ``(n_sections, 6)``. See `sosfilt` for the SOS filter format specification. Returns ------- zi : ndarray Initial conditions suitable for use with ``sosfilt``, shape ``(n_sections, 2)``. See Also -------- sosfilt, zpk2sos Notes ----- .. versionadded:: 0.16.0 Examples -------- Filter a rectangular pulse that begins at time 0, with and without the use of the `zi` argument of `scipy.signal.sosfilt`. >>> import numpy as np >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> sos = signal.butter(9, 0.125, output='sos') >>> zi = signal.sosfilt_zi(sos) >>> x = (np.arange(250) < 100).astype(int) >>> f1 = signal.sosfilt(sos, x) >>> f2, zo = signal.sosfilt(sos, x, zi=zi) >>> plt.plot(x, 'k--', label='x') >>> plt.plot(f1, 'b', alpha=0.5, linewidth=2, label='filtered') >>> plt.plot(f2, 'g', alpha=0.25, linewidth=4, label='filtered with zi') >>> plt.legend(loc='best') >>> plt.show() """ sos = np.asarray(sos) if sos.ndim != 2 or sos.shape[1] != 6: raise ValueError('sos must be shape (n_sections, 6)') if sos.dtype.kind in 'bui': sos = sos.astype(np.float64) n_sections = sos.shape[0] zi = np.empty((n_sections, 2), dtype=sos.dtype) scale = 1.0 for section in range(n_sections): b = sos[section, :3] a = sos[section, 3:] zi[section] = scale * lfilter_zi(b, a) # If H(z) = B(z)/A(z) is this section's transfer function, then # b.sum()/a.sum() is H(1), the gain at omega=0. That's the steady # state value of this section's step response. scale *= b.sum() / a.sum() return zi def _filtfilt_gust(b, a, x, axis=-1, irlen=None): """Forward-backward IIR filter that uses Gustafsson's method. Apply the IIR filter defined by `(b,a)` to `x` twice, first forward then backward, using Gustafsson's initial conditions [1]_. Let ``y_fb`` be the result of filtering first forward and then backward, and let ``y_bf`` be the result of filtering first backward then forward. Gustafsson's method is to compute initial conditions for the forward pass and the backward pass such that ``y_fb == y_bf``. Parameters ---------- b : scalar or 1-D ndarray Numerator coefficients of the filter. a : scalar or 1-D ndarray Denominator coefficients of the filter. x : ndarray Data to be filtered. axis : int, optional Axis of `x` to be filtered. Default is -1. irlen : int or None, optional The length of the nonnegligible part of the impulse response. If `irlen` is None, or if the length of the signal is less than ``2 * irlen``, then no part of the impulse response is ignored. Returns ------- y : ndarray The filtered data. x0 : ndarray Initial condition for the forward filter. x1 : ndarray Initial condition for the backward filter. Notes ----- Typically the return values `x0` and `x1` are not needed by the caller. The intended use of these return values is in unit tests. References ---------- .. [1] F. Gustaffson. Determining the initial states in forward-backward filtering. Transactions on Signal Processing, 46(4):988-992, 1996. """ # In the comments, "Gustafsson's paper" and [1] refer to the # paper referenced in the docstring. b = np.atleast_1d(b) a = np.atleast_1d(a) order = max(len(b), len(a)) - 1 if order == 0: # The filter is just scalar multiplication, with no state. scale = (b[0] / a[0])**2 y = scale * x return y, np.array([]), np.array([]) if axis != -1 or axis != x.ndim - 1: # Move the axis containing the data to the end. x = np.swapaxes(x, axis, x.ndim - 1) # n is the number of samples in the data to be filtered. n = x.shape[-1] if irlen is None or n <= 2*irlen: m = n else: m = irlen # Create Obs, the observability matrix (called O in the paper). # This matrix can be interpreted as the operator that propagates # an arbitrary initial state to the output, assuming the input is # zero. # In Gustafsson's paper, the forward and backward filters are not # necessarily the same, so he has both O_f and O_b. We use the same # filter in both directions, so we only need O. The same comment # applies to S below. Obs = np.zeros((m, order)) zi = np.zeros(order) zi[0] = 1 Obs[:, 0] = lfilter(b, a, np.zeros(m), zi=zi)[0] for k in range(1, order): Obs[k:, k] = Obs[:-k, 0] # Obsr is O^R (Gustafsson's notation for row-reversed O) Obsr = Obs[::-1] # Create S. S is the matrix that applies the filter to the reversed # propagated initial conditions. That is, # out = S.dot(zi) # is the same as # tmp, _ = lfilter(b, a, zeros(), zi=zi) # Propagate ICs. # out = lfilter(b, a, tmp[::-1]) # Reverse and filter. # Equations (5) & (6) of [1] S = lfilter(b, a, Obs[::-1], axis=0) # Sr is S^R (row-reversed S) Sr = S[::-1] # M is [(S^R - O), (O^R - S)] if m == n: M = np.hstack((Sr - Obs, Obsr - S)) else: # Matrix described in section IV of [1]. M = np.zeros((2*m, 2*order)) M[:m, :order] = Sr - Obs M[m:, order:] = Obsr - S # Naive forward-backward and backward-forward filters. # These have large transients because the filters use zero initial # conditions. y_f = lfilter(b, a, x) y_fb = lfilter(b, a, y_f[..., ::-1])[..., ::-1] y_b = lfilter(b, a, x[..., ::-1])[..., ::-1] y_bf = lfilter(b, a, y_b) delta_y_bf_fb = y_bf - y_fb if m == n: delta = delta_y_bf_fb else: start_m = delta_y_bf_fb[..., :m] end_m = delta_y_bf_fb[..., -m:] delta = np.concatenate((start_m, end_m), axis=-1) # ic_opt holds the "optimal" initial conditions. # The following code computes the result shown in the formula # of the paper between equations (6) and (7). if delta.ndim == 1: ic_opt = linalg.lstsq(M, delta)[0] else: # Reshape delta so it can be used as an array of multiple # right-hand-sides in linalg.lstsq. delta2d = delta.reshape(-1, delta.shape[-1]).T ic_opt0 = linalg.lstsq(M, delta2d)[0].T ic_opt = ic_opt0.reshape(delta.shape[:-1] + (M.shape[-1],)) # Now compute the filtered signal using equation (7) of [1]. # First, form [S^R, O^R] and call it W. if m == n: W = np.hstack((Sr, Obsr)) else: W = np.zeros((2*m, 2*order)) W[:m, :order] = Sr W[m:, order:] = Obsr # Equation (7) of [1] says # Y_fb^opt = Y_fb^0 + W * [x_0^opt; x_{N-1}^opt] # `wic` is (almost) the product on the right. # W has shape (m, 2*order), and ic_opt has shape (..., 2*order), # so we can't use W.dot(ic_opt). Instead, we dot ic_opt with W.T, # so wic has shape (..., m). wic = ic_opt.dot(W.T) # `wic` is "almost" the product of W and the optimal ICs in equation # (7)--if we're using a truncated impulse response (m < n), `wic` # contains only the adjustments required for the ends of the signal. # Here we form y_opt, taking this into account if necessary. y_opt = y_fb if m == n: y_opt += wic else: y_opt[..., :m] += wic[..., :m] y_opt[..., -m:] += wic[..., -m:] x0 = ic_opt[..., :order] x1 = ic_opt[..., -order:] if axis != -1 or axis != x.ndim - 1: # Restore the data axis to its original position. x0 = np.swapaxes(x0, axis, x.ndim - 1) x1 = np.swapaxes(x1, axis, x.ndim - 1) y_opt = np.swapaxes(y_opt, axis, x.ndim - 1) return y_opt, x0, x1 def filtfilt(b, a, x, axis=-1, padtype='odd', padlen=None, method='pad', irlen=None): """ Apply a digital filter forward and backward to a signal. This function applies a linear digital filter twice, once forward and once backwards. The combined filter has zero phase and a filter order twice that of the original. The function provides options for handling the edges of the signal. The function `sosfiltfilt` (and filter design using ``output='sos'``) should be preferred over `filtfilt` for most filtering tasks, as second-order sections have fewer numerical problems. Parameters ---------- b : (N,) array_like The numerator coefficient vector of the filter. a : (N,) array_like The denominator coefficient vector of the filter. If ``a[0]`` is not 1, then both `a` and `b` are normalized by ``a[0]``. x : array_like The array of data to be filtered. axis : int, optional The axis of `x` to which the filter is applied. Default is -1. padtype : str or None, optional Must be 'odd', 'even', 'constant', or None. This determines the type of extension to use for the padded signal to which the filter is applied. If `padtype` is None, no padding is used. The default is 'odd'. padlen : int or None, optional The number of elements by which to extend `x` at both ends of `axis` before applying the filter. This value must be less than ``x.shape[axis] - 1``. ``padlen=0`` implies no padding. The default value is ``3 * max(len(a), len(b))``. method : str, optional Determines the method for handling the edges of the signal, either "pad" or "gust". When `method` is "pad", the signal is padded; the type of padding is determined by `padtype` and `padlen`, and `irlen` is ignored. When `method` is "gust", Gustafsson's method is used, and `padtype` and `padlen` are ignored. irlen : int or None, optional When `method` is "gust", `irlen` specifies the length of the impulse response of the filter. If `irlen` is None, no part of the impulse response is ignored. For a long signal, specifying `irlen` can significantly improve the performance of the filter. Returns ------- y : ndarray The filtered output with the same shape as `x`. See Also -------- sosfiltfilt, lfilter_zi, lfilter, lfiltic, savgol_filter, sosfilt Notes ----- When `method` is "pad", the function pads the data along the given axis in one of three ways: odd, even or constant. The odd and even extensions have the corresponding symmetry about the end point of the data. The constant extension extends the data with the values at the end points. On both the forward and backward passes, the initial condition of the filter is found by using `lfilter_zi` and scaling it by the end point of the extended data. When `method` is "gust", Gustafsson's method [1]_ is used. Initial conditions are chosen for the forward and backward passes so that the forward-backward filter gives the same result as the backward-forward filter. The option to use Gustaffson's method was added in scipy version 0.16.0. References ---------- .. [1] F. Gustaffson, "Determining the initial states in forward-backward filtering", Transactions on Signal Processing, Vol. 46, pp. 988-992, 1996. Examples -------- The examples will use several functions from `scipy.signal`. >>> import numpy as np >>> from scipy import signal >>> import matplotlib.pyplot as plt First we create a one second signal that is the sum of two pure sine waves, with frequencies 5 Hz and 250 Hz, sampled at 2000 Hz. >>> t = np.linspace(0, 1.0, 2001) >>> xlow = np.sin(2 * np.pi * 5 * t) >>> xhigh = np.sin(2 * np.pi * 250 * t) >>> x = xlow + xhigh Now create a lowpass Butterworth filter with a cutoff of 0.125 times the Nyquist frequency, or 125 Hz, and apply it to ``x`` with `filtfilt`. The result should be approximately ``xlow``, with no phase shift. >>> b, a = signal.butter(8, 0.125) >>> y = signal.filtfilt(b, a, x, padlen=150) >>> np.abs(y - xlow).max() 9.1086182074789912e-06 We get a fairly clean result for this artificial example because the odd extension is exact, and with the moderately long padding, the filter's transients have dissipated by the time the actual data is reached. In general, transient effects at the edges are unavoidable. The following example demonstrates the option ``method="gust"``. First, create a filter. >>> b, a = signal.ellip(4, 0.01, 120, 0.125) # Filter to be applied. `sig` is a random input signal to be filtered. >>> rng = np.random.default_rng() >>> n = 60 >>> sig = rng.standard_normal(n)**3 + 3*rng.standard_normal(n).cumsum() Apply `filtfilt` to `sig`, once using the Gustafsson method, and once using padding, and plot the results for comparison. >>> fgust = signal.filtfilt(b, a, sig, method="gust") >>> fpad = signal.filtfilt(b, a, sig, padlen=50) >>> plt.plot(sig, 'k-', label='input') >>> plt.plot(fgust, 'b-', linewidth=4, label='gust') >>> plt.plot(fpad, 'c-', linewidth=1.5, label='pad') >>> plt.legend(loc='best') >>> plt.show() The `irlen` argument can be used to improve the performance of Gustafsson's method. Estimate the impulse response length of the filter. >>> z, p, k = signal.tf2zpk(b, a) >>> eps = 1e-9 >>> r = np.max(np.abs(p)) >>> approx_impulse_len = int(np.ceil(np.log(eps) / np.log(r))) >>> approx_impulse_len 137 Apply the filter to a longer signal, with and without the `irlen` argument. The difference between `y1` and `y2` is small. For long signals, using `irlen` gives a significant performance improvement. >>> x = rng.standard_normal(4000) >>> y1 = signal.filtfilt(b, a, x, method='gust') >>> y2 = signal.filtfilt(b, a, x, method='gust', irlen=approx_impulse_len) >>> print(np.max(np.abs(y1 - y2))) 2.875334415008979e-10 """ b = np.atleast_1d(b) a = np.atleast_1d(a) x = np.asarray(x) if method not in ["pad", "gust"]: raise ValueError("method must be 'pad' or 'gust'.") if method == "gust": y, z1, z2 = _filtfilt_gust(b, a, x, axis=axis, irlen=irlen) return y # method == "pad" edge, ext = _validate_pad(padtype, padlen, x, axis, ntaps=max(len(a), len(b))) # Get the steady state of the filter's step response. zi = lfilter_zi(b, a) # Reshape zi and create x0 so that zi*x0 broadcasts # to the correct value for the 'zi' keyword argument # to lfilter. zi_shape = [1] * x.ndim zi_shape[axis] = zi.size zi = np.reshape(zi, zi_shape) x0 = axis_slice(ext, stop=1, axis=axis) # Forward filter. (y, zf) = lfilter(b, a, ext, axis=axis, zi=zi * x0) # Backward filter. # Create y0 so zi*y0 broadcasts appropriately. y0 = axis_slice(y, start=-1, axis=axis) (y, zf) = lfilter(b, a, axis_reverse(y, axis=axis), axis=axis, zi=zi * y0) # Reverse y. y = axis_reverse(y, axis=axis) if edge > 0: # Slice the actual signal from the extended signal. y = axis_slice(y, start=edge, stop=-edge, axis=axis) return y def _validate_pad(padtype, padlen, x, axis, ntaps): """Helper to validate padding for filtfilt""" if padtype not in ['even', 'odd', 'constant', None]: raise ValueError(("Unknown value '%s' given to padtype. padtype " "must be 'even', 'odd', 'constant', or None.") % padtype) if padtype is None: padlen = 0 if padlen is None: # Original padding; preserved for backwards compatibility. edge = ntaps * 3 else: edge = padlen # x's 'axis' dimension must be bigger than edge. if x.shape[axis] <= edge: raise ValueError("The length of the input vector x must be greater " "than padlen, which is %d." % edge) if padtype is not None and edge > 0: # Make an extension of length `edge` at each # end of the input array. if padtype == 'even': ext = even_ext(x, edge, axis=axis) elif padtype == 'odd': ext = odd_ext(x, edge, axis=axis) else: ext = const_ext(x, edge, axis=axis) else: ext = x return edge, ext def _validate_x(x): x = np.asarray(x) if x.ndim == 0: raise ValueError('x must be at least 1-D') return x def sosfilt(sos, x, axis=-1, zi=None): """ Filter data along one dimension using cascaded second-order sections. Filter a data sequence, `x`, using a digital IIR filter defined by `sos`. Parameters ---------- sos : array_like Array of second-order filter coefficients, must have shape ``(n_sections, 6)``. Each row corresponds to a second-order section, with the first three columns providing the numerator coefficients and the last three providing the denominator coefficients. x : array_like An N-dimensional input array. axis : int, optional The axis of the input data array along which to apply the linear filter. The filter is applied to each subarray along this axis. Default is -1. zi : array_like, optional Initial conditions for the cascaded filter delays. It is a (at least 2D) vector of shape ``(n_sections, ..., 2, ...)``, where ``..., 2, ...`` denotes the shape of `x`, but with ``x.shape[axis]`` replaced by 2. If `zi` is None or is not given then initial rest (i.e. all zeros) is assumed. Note that these initial conditions are *not* the same as the initial conditions given by `lfiltic` or `lfilter_zi`. Returns ------- y : ndarray The output of the digital filter. zf : ndarray, optional If `zi` is None, this is not returned, otherwise, `zf` holds the final filter delay values. See Also -------- zpk2sos, sos2zpk, sosfilt_zi, sosfiltfilt, sosfreqz Notes ----- The filter function is implemented as a series of second-order filters with direct-form II transposed structure. It is designed to minimize numerical precision errors for high-order filters. .. versionadded:: 0.16.0 Examples -------- Plot a 13th-order filter's impulse response using both `lfilter` and `sosfilt`, showing the instability that results from trying to do a 13th-order filter in a single stage (the numerical error pushes some poles outside of the unit circle): >>> import matplotlib.pyplot as plt >>> from scipy import signal >>> b, a = signal.ellip(13, 0.009, 80, 0.05, output='ba') >>> sos = signal.ellip(13, 0.009, 80, 0.05, output='sos') >>> x = signal.unit_impulse(700) >>> y_tf = signal.lfilter(b, a, x) >>> y_sos = signal.sosfilt(sos, x) >>> plt.plot(y_tf, 'r', label='TF') >>> plt.plot(y_sos, 'k', label='SOS') >>> plt.legend(loc='best') >>> plt.show() """ x = _validate_x(x) sos, n_sections = _validate_sos(sos) x_zi_shape = list(x.shape) x_zi_shape[axis] = 2 x_zi_shape = tuple([n_sections] + x_zi_shape) inputs = [sos, x] if zi is not None: inputs.append(np.asarray(zi)) dtype = np.result_type(*inputs) if dtype.char not in 'fdgFDGO': raise NotImplementedError("input type '%s' not supported" % dtype) if zi is not None: zi = np.array(zi, dtype) # make a copy so that we can operate in place if zi.shape != x_zi_shape: raise ValueError('Invalid zi shape. With axis=%r, an input with ' 'shape %r, and an sos array with %d sections, zi ' 'must have shape %r, got %r.' % (axis, x.shape, n_sections, x_zi_shape, zi.shape)) return_zi = True else: zi = np.zeros(x_zi_shape, dtype=dtype) return_zi = False axis = axis % x.ndim # make positive x = np.moveaxis(x, axis, -1) zi = np.moveaxis(zi, [0, axis + 1], [-2, -1]) x_shape, zi_shape = x.shape, zi.shape x = np.reshape(x, (-1, x.shape[-1])) x = np.array(x, dtype, order='C') # make a copy, can modify in place zi = np.ascontiguousarray(np.reshape(zi, (-1, n_sections, 2))) sos = sos.astype(dtype, copy=False) _sosfilt(sos, x, zi) x.shape = x_shape x = np.moveaxis(x, -1, axis) if return_zi: zi.shape = zi_shape zi = np.moveaxis(zi, [-2, -1], [0, axis + 1]) out = (x, zi) else: out = x return out def sosfiltfilt(sos, x, axis=-1, padtype='odd', padlen=None): """ A forward-backward digital filter using cascaded second-order sections. See `filtfilt` for more complete information about this method. Parameters ---------- sos : array_like Array of second-order filter coefficients, must have shape ``(n_sections, 6)``. Each row corresponds to a second-order section, with the first three columns providing the numerator coefficients and the last three providing the denominator coefficients. x : array_like The array of data to be filtered. axis : int, optional The axis of `x` to which the filter is applied. Default is -1. padtype : str or None, optional Must be 'odd', 'even', 'constant', or None. This determines the type of extension to use for the padded signal to which the filter is applied. If `padtype` is None, no padding is used. The default is 'odd'. padlen : int or None, optional The number of elements by which to extend `x` at both ends of `axis` before applying the filter. This value must be less than ``x.shape[axis] - 1``. ``padlen=0`` implies no padding. The default value is:: 3 * (2 * len(sos) + 1 - min((sos[:, 2] == 0).sum(), (sos[:, 5] == 0).sum())) The extra subtraction at the end attempts to compensate for poles and zeros at the origin (e.g. for odd-order filters) to yield equivalent estimates of `padlen` to those of `filtfilt` for second-order section filters built with `scipy.signal` functions. Returns ------- y : ndarray The filtered output with the same shape as `x`. See Also -------- filtfilt, sosfilt, sosfilt_zi, sosfreqz Notes ----- .. versionadded:: 0.18.0 Examples -------- >>> import numpy as np >>> from scipy.signal import sosfiltfilt, butter >>> import matplotlib.pyplot as plt >>> rng = np.random.default_rng() Create an interesting signal to filter. >>> n = 201 >>> t = np.linspace(0, 1, n) >>> x = 1 + (t < 0.5) - 0.25*t**2 + 0.05*rng.standard_normal(n) Create a lowpass Butterworth filter, and use it to filter `x`. >>> sos = butter(4, 0.125, output='sos') >>> y = sosfiltfilt(sos, x) For comparison, apply an 8th order filter using `sosfilt`. The filter is initialized using the mean of the first four values of `x`. >>> from scipy.signal import sosfilt, sosfilt_zi >>> sos8 = butter(8, 0.125, output='sos') >>> zi = x[:4].mean() * sosfilt_zi(sos8) >>> y2, zo = sosfilt(sos8, x, zi=zi) Plot the results. Note that the phase of `y` matches the input, while `y2` has a significant phase delay. >>> plt.plot(t, x, alpha=0.5, label='x(t)') >>> plt.plot(t, y, label='y(t)') >>> plt.plot(t, y2, label='y2(t)') >>> plt.legend(framealpha=1, shadow=True) >>> plt.grid(alpha=0.25) >>> plt.xlabel('t') >>> plt.show() """ sos, n_sections = _validate_sos(sos) x = _validate_x(x) # `method` is "pad"... ntaps = 2 * n_sections + 1 ntaps -= min((sos[:, 2] == 0).sum(), (sos[:, 5] == 0).sum()) edge, ext = _validate_pad(padtype, padlen, x, axis, ntaps=ntaps) # These steps follow the same form as filtfilt with modifications zi = sosfilt_zi(sos) # shape (n_sections, 2) --> (n_sections, ..., 2, ...) zi_shape = [1] * x.ndim zi_shape[axis] = 2 zi.shape = [n_sections] + zi_shape x_0 = axis_slice(ext, stop=1, axis=axis) (y, zf) = sosfilt(sos, ext, axis=axis, zi=zi * x_0) y_0 = axis_slice(y, start=-1, axis=axis) (y, zf) = sosfilt(sos, axis_reverse(y, axis=axis), axis=axis, zi=zi * y_0) y = axis_reverse(y, axis=axis) if edge > 0: y = axis_slice(y, start=edge, stop=-edge, axis=axis) return y def decimate(x, q, n=None, ftype='iir', axis=-1, zero_phase=True): """ Downsample the signal after applying an anti-aliasing filter. By default, an order 8 Chebyshev type I filter is used. A 30 point FIR filter with Hamming window is used if `ftype` is 'fir'. Parameters ---------- x : array_like The signal to be downsampled, as an N-dimensional array. q : int The downsampling factor. When using IIR downsampling, it is recommended to call `decimate` multiple times for downsampling factors higher than 13. n : int, optional The order of the filter (1 less than the length for 'fir'). Defaults to 8 for 'iir' and 20 times the downsampling factor for 'fir'. ftype : str {'iir', 'fir'} or ``dlti`` instance, optional If 'iir' or 'fir', specifies the type of lowpass filter. If an instance of an `dlti` object, uses that object to filter before downsampling. axis : int, optional The axis along which to decimate. zero_phase : bool, optional Prevent phase shift by filtering with `filtfilt` instead of `lfilter` when using an IIR filter, and shifting the outputs back by the filter's group delay when using an FIR filter. The default value of ``True`` is recommended, since a phase shift is generally not desired. .. versionadded:: 0.18.0 Returns ------- y : ndarray The down-sampled signal. See Also -------- resample : Resample up or down using the FFT method. resample_poly : Resample using polyphase filtering and an FIR filter. Notes ----- The ``zero_phase`` keyword was added in 0.18.0. The possibility to use instances of ``dlti`` as ``ftype`` was added in 0.18.0. Examples -------- >>> import numpy as np >>> from scipy import signal >>> import matplotlib.pyplot as plt Define wave parameters. >>> wave_duration = 3 >>> sample_rate = 100 >>> freq = 2 >>> q = 5 Calculate number of samples. >>> samples = wave_duration*sample_rate >>> samples_decimated = int(samples/q) Create cosine wave. >>> x = np.linspace(0, wave_duration, samples, endpoint=False) >>> y = np.cos(x*np.pi*freq*2) Decimate cosine wave. >>> ydem = signal.decimate(y, q) >>> xnew = np.linspace(0, wave_duration, samples_decimated, endpoint=False) Plot original and decimated waves. >>> plt.plot(x, y, '.-', xnew, ydem, 'o-') >>> plt.xlabel('Time, Seconds') >>> plt.legend(['data', 'decimated'], loc='best') >>> plt.show() """ x = np.asarray(x) q = operator.index(q) if n is not None: n = operator.index(n) result_type = x.dtype if not np.issubdtype(result_type, np.inexact) \ or result_type.type == np.float16: # upcast integers and float16 to float64 result_type = np.float64 if ftype == 'fir': if n is None: half_len = 10 * q # reasonable cutoff for our sinc-like function n = 2 * half_len b, a = firwin(n+1, 1. / q, window='hamming'), 1. b = np.asarray(b, dtype=result_type) a = np.asarray(a, dtype=result_type) elif ftype == 'iir': iir_use_sos = True if n is None: n = 8 sos = cheby1(n, 0.05, 0.8 / q, output='sos') sos = np.asarray(sos, dtype=result_type) elif isinstance(ftype, dlti): system = ftype._as_zpk() if system.poles.shape[0] == 0: # FIR system = ftype._as_tf() b, a = system.num, system.den ftype = 'fir' elif (any(np.iscomplex(system.poles)) or any(np.iscomplex(system.poles)) or np.iscomplex(system.gain)): # sosfilt & sosfiltfilt don't handle complex coeffs iir_use_sos = False system = ftype._as_tf() b, a = system.num, system.den else: iir_use_sos = True sos = zpk2sos(system.zeros, system.poles, system.gain) sos = np.asarray(sos, dtype=result_type) else: raise ValueError('invalid ftype') sl = [slice(None)] * x.ndim if ftype == 'fir': b = b / a if zero_phase: y = resample_poly(x, 1, q, axis=axis, window=b) else: # upfirdn is generally faster than lfilter by a factor equal to the # downsampling factor, since it only calculates the needed outputs n_out = x.shape[axis] // q + bool(x.shape[axis] % q) y = upfirdn(b, x, up=1, down=q, axis=axis) sl[axis] = slice(None, n_out, None) else: # IIR case if zero_phase: if iir_use_sos: y = sosfiltfilt(sos, x, axis=axis) else: y = filtfilt(b, a, x, axis=axis) else: if iir_use_sos: y = sosfilt(sos, x, axis=axis) else: y = lfilter(b, a, x, axis=axis) sl[axis] = slice(None, None, q) return y[tuple(sl)]
157,611
33.05618
85
py
scipy
scipy-main/scipy/signal/_spectral_py.py
"""Tools for spectral analysis. """ import numpy as np from scipy import fft as sp_fft from . import _signaltools from .windows import get_window from ._spectral import _lombscargle from ._arraytools import const_ext, even_ext, odd_ext, zero_ext import warnings __all__ = ['periodogram', 'welch', 'lombscargle', 'csd', 'coherence', 'spectrogram', 'stft', 'istft', 'check_COLA', 'check_NOLA'] def lombscargle(x, y, freqs, precenter=False, normalize=False): """ lombscargle(x, y, freqs) Computes the Lomb-Scargle periodogram. The Lomb-Scargle periodogram was developed by Lomb [1]_ and further extended by Scargle [2]_ to find, and test the significance of weak periodic signals with uneven temporal sampling. When *normalize* is False (default) the computed periodogram is unnormalized, it takes the value ``(A**2) * N/4`` for a harmonic signal with amplitude A for sufficiently large N. When *normalize* is True the computed periodogram is normalized by the residuals of the data around a constant reference model (at zero). Input arrays should be 1-D and will be cast to float64. Parameters ---------- x : array_like Sample times. y : array_like Measurement values. freqs : array_like Angular frequencies for output periodogram. precenter : bool, optional Pre-center measurement values by subtracting the mean. normalize : bool, optional Compute normalized periodogram. Returns ------- pgram : array_like Lomb-Scargle periodogram. Raises ------ ValueError If the input arrays `x` and `y` do not have the same shape. See Also -------- istft: Inverse Short Time Fourier Transform check_COLA: Check whether the Constant OverLap Add (COLA) constraint is met welch: Power spectral density by Welch's method spectrogram: Spectrogram by Welch's method csd: Cross spectral density by Welch's method Notes ----- This subroutine calculates the periodogram using a slightly modified algorithm due to Townsend [3]_ which allows the periodogram to be calculated using only a single pass through the input arrays for each frequency. The algorithm running time scales roughly as O(x * freqs) or O(N^2) for a large number of samples and frequencies. References ---------- .. [1] N.R. Lomb "Least-squares frequency analysis of unequally spaced data", Astrophysics and Space Science, vol 39, pp. 447-462, 1976 .. [2] J.D. Scargle "Studies in astronomical time series analysis. II - Statistical aspects of spectral analysis of unevenly spaced data", The Astrophysical Journal, vol 263, pp. 835-853, 1982 .. [3] R.H.D. Townsend, "Fast calculation of the Lomb-Scargle periodogram using graphics processing units.", The Astrophysical Journal Supplement Series, vol 191, pp. 247-253, 2010 Examples -------- >>> import numpy as np >>> import matplotlib.pyplot as plt >>> rng = np.random.default_rng() First define some input parameters for the signal: >>> A = 2. >>> w0 = 1. # rad/sec >>> nin = 150 >>> nout = 100000 Randomly generate sample times: >>> x = rng.uniform(0, 10*np.pi, nin) Plot a sine wave for the selected times: >>> y = A * np.cos(w0*x) Define the array of frequencies for which to compute the periodogram: >>> w = np.linspace(0.01, 10, nout) Calculate Lomb-Scargle periodogram: >>> import scipy.signal as signal >>> pgram = signal.lombscargle(x, y, w, normalize=True) Now make a plot of the input data: >>> fig, (ax_t, ax_w) = plt.subplots(2, 1, constrained_layout=True) >>> ax_t.plot(x, y, 'b+') >>> ax_t.set_xlabel('Time [s]') Then plot the normalized periodogram: >>> ax_w.plot(w, pgram) >>> ax_w.set_xlabel('Angular frequency [rad/s]') >>> ax_w.set_ylabel('Normalized amplitude') >>> plt.show() """ x = np.ascontiguousarray(x, dtype=np.float64) y = np.ascontiguousarray(y, dtype=np.float64) freqs = np.ascontiguousarray(freqs, dtype=np.float64) assert x.ndim == 1 assert y.ndim == 1 assert freqs.ndim == 1 if precenter: pgram = _lombscargle(x, y - y.mean(), freqs) else: pgram = _lombscargle(x, y, freqs) if normalize: pgram *= 2 / np.dot(y, y) return pgram def periodogram(x, fs=1.0, window='boxcar', nfft=None, detrend='constant', return_onesided=True, scaling='density', axis=-1): """ Estimate power spectral density using a periodogram. Parameters ---------- x : array_like Time series of measurement values fs : float, optional Sampling frequency of the `x` time series. Defaults to 1.0. window : str or tuple or array_like, optional Desired window to use. If `window` is a string or tuple, it is passed to `get_window` to generate the window values, which are DFT-even by default. See `get_window` for a list of windows and required parameters. If `window` is array_like it will be used directly as the window and its length must be equal to the length of the axis over which the periodogram is computed. Defaults to 'boxcar'. nfft : int, optional Length of the FFT used. If `None` the length of `x` will be used. detrend : str or function or `False`, optional Specifies how to detrend each segment. If `detrend` is a string, it is passed as the `type` argument to the `detrend` function. If it is a function, it takes a segment and returns a detrended segment. If `detrend` is `False`, no detrending is done. Defaults to 'constant'. return_onesided : bool, optional If `True`, return a one-sided spectrum for real data. If `False` return a two-sided spectrum. Defaults to `True`, but for complex data, a two-sided spectrum is always returned. scaling : { 'density', 'spectrum' }, optional Selects between computing the power spectral density ('density') where `Pxx` has units of V**2/Hz and computing the power spectrum ('spectrum') where `Pxx` has units of V**2, if `x` is measured in V and `fs` is measured in Hz. Defaults to 'density' axis : int, optional Axis along which the periodogram is computed; the default is over the last axis (i.e. ``axis=-1``). Returns ------- f : ndarray Array of sample frequencies. Pxx : ndarray Power spectral density or power spectrum of `x`. See Also -------- welch: Estimate power spectral density using Welch's method lombscargle: Lomb-Scargle periodogram for unevenly sampled data Notes ----- .. versionadded:: 0.12.0 Examples -------- >>> import numpy as np >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> rng = np.random.default_rng() Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by 0.001 V**2/Hz of white noise sampled at 10 kHz. >>> fs = 10e3 >>> N = 1e5 >>> amp = 2*np.sqrt(2) >>> freq = 1234.0 >>> noise_power = 0.001 * fs / 2 >>> time = np.arange(N) / fs >>> x = amp*np.sin(2*np.pi*freq*time) >>> x += rng.normal(scale=np.sqrt(noise_power), size=time.shape) Compute and plot the power spectral density. >>> f, Pxx_den = signal.periodogram(x, fs) >>> plt.semilogy(f, Pxx_den) >>> plt.ylim([1e-7, 1e2]) >>> plt.xlabel('frequency [Hz]') >>> plt.ylabel('PSD [V**2/Hz]') >>> plt.show() If we average the last half of the spectral density, to exclude the peak, we can recover the noise power on the signal. >>> np.mean(Pxx_den[25000:]) 0.000985320699252543 Now compute and plot the power spectrum. >>> f, Pxx_spec = signal.periodogram(x, fs, 'flattop', scaling='spectrum') >>> plt.figure() >>> plt.semilogy(f, np.sqrt(Pxx_spec)) >>> plt.ylim([1e-4, 1e1]) >>> plt.xlabel('frequency [Hz]') >>> plt.ylabel('Linear spectrum [V RMS]') >>> plt.show() The peak height in the power spectrum is an estimate of the RMS amplitude. >>> np.sqrt(Pxx_spec.max()) 2.0077340678640727 """ x = np.asarray(x) if x.size == 0: return np.empty(x.shape), np.empty(x.shape) if window is None: window = 'boxcar' if nfft is None: nperseg = x.shape[axis] elif nfft == x.shape[axis]: nperseg = nfft elif nfft > x.shape[axis]: nperseg = x.shape[axis] elif nfft < x.shape[axis]: s = [np.s_[:]]*len(x.shape) s[axis] = np.s_[:nfft] x = x[tuple(s)] nperseg = nfft nfft = None if hasattr(window, 'size'): if window.size != nperseg: raise ValueError('the size of the window must be the same size ' 'of the input on the specified axis') return welch(x, fs=fs, window=window, nperseg=nperseg, noverlap=0, nfft=nfft, detrend=detrend, return_onesided=return_onesided, scaling=scaling, axis=axis) def welch(x, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None, detrend='constant', return_onesided=True, scaling='density', axis=-1, average='mean'): r""" Estimate power spectral density using Welch's method. Welch's method [1]_ computes an estimate of the power spectral density by dividing the data into overlapping segments, computing a modified periodogram for each segment and averaging the periodograms. Parameters ---------- x : array_like Time series of measurement values fs : float, optional Sampling frequency of the `x` time series. Defaults to 1.0. window : str or tuple or array_like, optional Desired window to use. If `window` is a string or tuple, it is passed to `get_window` to generate the window values, which are DFT-even by default. See `get_window` for a list of windows and required parameters. If `window` is array_like it will be used directly as the window and its length must be nperseg. Defaults to a Hann window. nperseg : int, optional Length of each segment. Defaults to None, but if window is str or tuple, is set to 256, and if window is array_like, is set to the length of the window. noverlap : int, optional Number of points to overlap between segments. If `None`, ``noverlap = nperseg // 2``. Defaults to `None`. nfft : int, optional Length of the FFT used, if a zero padded FFT is desired. If `None`, the FFT length is `nperseg`. Defaults to `None`. detrend : str or function or `False`, optional Specifies how to detrend each segment. If `detrend` is a string, it is passed as the `type` argument to the `detrend` function. If it is a function, it takes a segment and returns a detrended segment. If `detrend` is `False`, no detrending is done. Defaults to 'constant'. return_onesided : bool, optional If `True`, return a one-sided spectrum for real data. If `False` return a two-sided spectrum. Defaults to `True`, but for complex data, a two-sided spectrum is always returned. scaling : { 'density', 'spectrum' }, optional Selects between computing the power spectral density ('density') where `Pxx` has units of V**2/Hz and computing the power spectrum ('spectrum') where `Pxx` has units of V**2, if `x` is measured in V and `fs` is measured in Hz. Defaults to 'density' axis : int, optional Axis along which the periodogram is computed; the default is over the last axis (i.e. ``axis=-1``). average : { 'mean', 'median' }, optional Method to use when averaging periodograms. Defaults to 'mean'. .. versionadded:: 1.2.0 Returns ------- f : ndarray Array of sample frequencies. Pxx : ndarray Power spectral density or power spectrum of x. See Also -------- periodogram: Simple, optionally modified periodogram lombscargle: Lomb-Scargle periodogram for unevenly sampled data Notes ----- An appropriate amount of overlap will depend on the choice of window and on your requirements. For the default Hann window an overlap of 50% is a reasonable trade off between accurately estimating the signal power, while not over counting any of the data. Narrower windows may require a larger overlap. If `noverlap` is 0, this method is equivalent to Bartlett's method [2]_. .. versionadded:: 0.12.0 References ---------- .. [1] P. Welch, "The use of the fast Fourier transform for the estimation of power spectra: A method based on time averaging over short, modified periodograms", IEEE Trans. Audio Electroacoust. vol. 15, pp. 70-73, 1967. .. [2] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra", Biometrika, vol. 37, pp. 1-16, 1950. Examples -------- >>> import numpy as np >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> rng = np.random.default_rng() Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by 0.001 V**2/Hz of white noise sampled at 10 kHz. >>> fs = 10e3 >>> N = 1e5 >>> amp = 2*np.sqrt(2) >>> freq = 1234.0 >>> noise_power = 0.001 * fs / 2 >>> time = np.arange(N) / fs >>> x = amp*np.sin(2*np.pi*freq*time) >>> x += rng.normal(scale=np.sqrt(noise_power), size=time.shape) Compute and plot the power spectral density. >>> f, Pxx_den = signal.welch(x, fs, nperseg=1024) >>> plt.semilogy(f, Pxx_den) >>> plt.ylim([0.5e-3, 1]) >>> plt.xlabel('frequency [Hz]') >>> plt.ylabel('PSD [V**2/Hz]') >>> plt.show() If we average the last half of the spectral density, to exclude the peak, we can recover the noise power on the signal. >>> np.mean(Pxx_den[256:]) 0.0009924865443739191 Now compute and plot the power spectrum. >>> f, Pxx_spec = signal.welch(x, fs, 'flattop', 1024, scaling='spectrum') >>> plt.figure() >>> plt.semilogy(f, np.sqrt(Pxx_spec)) >>> plt.xlabel('frequency [Hz]') >>> plt.ylabel('Linear spectrum [V RMS]') >>> plt.show() The peak height in the power spectrum is an estimate of the RMS amplitude. >>> np.sqrt(Pxx_spec.max()) 2.0077340678640727 If we now introduce a discontinuity in the signal, by increasing the amplitude of a small portion of the signal by 50, we can see the corruption of the mean average power spectral density, but using a median average better estimates the normal behaviour. >>> x[int(N//2):int(N//2)+10] *= 50. >>> f, Pxx_den = signal.welch(x, fs, nperseg=1024) >>> f_med, Pxx_den_med = signal.welch(x, fs, nperseg=1024, average='median') >>> plt.semilogy(f, Pxx_den, label='mean') >>> plt.semilogy(f_med, Pxx_den_med, label='median') >>> plt.ylim([0.5e-3, 1]) >>> plt.xlabel('frequency [Hz]') >>> plt.ylabel('PSD [V**2/Hz]') >>> plt.legend() >>> plt.show() """ freqs, Pxx = csd(x, x, fs=fs, window=window, nperseg=nperseg, noverlap=noverlap, nfft=nfft, detrend=detrend, return_onesided=return_onesided, scaling=scaling, axis=axis, average=average) return freqs, Pxx.real def csd(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None, detrend='constant', return_onesided=True, scaling='density', axis=-1, average='mean'): r""" Estimate the cross power spectral density, Pxy, using Welch's method. Parameters ---------- x : array_like Time series of measurement values y : array_like Time series of measurement values fs : float, optional Sampling frequency of the `x` and `y` time series. Defaults to 1.0. window : str or tuple or array_like, optional Desired window to use. If `window` is a string or tuple, it is passed to `get_window` to generate the window values, which are DFT-even by default. See `get_window` for a list of windows and required parameters. If `window` is array_like it will be used directly as the window and its length must be nperseg. Defaults to a Hann window. nperseg : int, optional Length of each segment. Defaults to None, but if window is str or tuple, is set to 256, and if window is array_like, is set to the length of the window. noverlap: int, optional Number of points to overlap between segments. If `None`, ``noverlap = nperseg // 2``. Defaults to `None`. nfft : int, optional Length of the FFT used, if a zero padded FFT is desired. If `None`, the FFT length is `nperseg`. Defaults to `None`. detrend : str or function or `False`, optional Specifies how to detrend each segment. If `detrend` is a string, it is passed as the `type` argument to the `detrend` function. If it is a function, it takes a segment and returns a detrended segment. If `detrend` is `False`, no detrending is done. Defaults to 'constant'. return_onesided : bool, optional If `True`, return a one-sided spectrum for real data. If `False` return a two-sided spectrum. Defaults to `True`, but for complex data, a two-sided spectrum is always returned. scaling : { 'density', 'spectrum' }, optional Selects between computing the cross spectral density ('density') where `Pxy` has units of V**2/Hz and computing the cross spectrum ('spectrum') where `Pxy` has units of V**2, if `x` and `y` are measured in V and `fs` is measured in Hz. Defaults to 'density' axis : int, optional Axis along which the CSD is computed for both inputs; the default is over the last axis (i.e. ``axis=-1``). average : { 'mean', 'median' }, optional Method to use when averaging periodograms. If the spectrum is complex, the average is computed separately for the real and imaginary parts. Defaults to 'mean'. .. versionadded:: 1.2.0 Returns ------- f : ndarray Array of sample frequencies. Pxy : ndarray Cross spectral density or cross power spectrum of x,y. See Also -------- periodogram: Simple, optionally modified periodogram lombscargle: Lomb-Scargle periodogram for unevenly sampled data welch: Power spectral density by Welch's method. [Equivalent to csd(x,x)] coherence: Magnitude squared coherence by Welch's method. Notes ----- By convention, Pxy is computed with the conjugate FFT of X multiplied by the FFT of Y. If the input series differ in length, the shorter series will be zero-padded to match. An appropriate amount of overlap will depend on the choice of window and on your requirements. For the default Hann window an overlap of 50% is a reasonable trade off between accurately estimating the signal power, while not over counting any of the data. Narrower windows may require a larger overlap. .. versionadded:: 0.16.0 References ---------- .. [1] P. Welch, "The use of the fast Fourier transform for the estimation of power spectra: A method based on time averaging over short, modified periodograms", IEEE Trans. Audio Electroacoust. vol. 15, pp. 70-73, 1967. .. [2] Rabiner, Lawrence R., and B. Gold. "Theory and Application of Digital Signal Processing" Prentice-Hall, pp. 414-419, 1975 Examples -------- >>> import numpy as np >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> rng = np.random.default_rng() Generate two test signals with some common features. >>> fs = 10e3 >>> N = 1e5 >>> amp = 20 >>> freq = 1234.0 >>> noise_power = 0.001 * fs / 2 >>> time = np.arange(N) / fs >>> b, a = signal.butter(2, 0.25, 'low') >>> x = rng.normal(scale=np.sqrt(noise_power), size=time.shape) >>> y = signal.lfilter(b, a, x) >>> x += amp*np.sin(2*np.pi*freq*time) >>> y += rng.normal(scale=0.1*np.sqrt(noise_power), size=time.shape) Compute and plot the magnitude of the cross spectral density. >>> f, Pxy = signal.csd(x, y, fs, nperseg=1024) >>> plt.semilogy(f, np.abs(Pxy)) >>> plt.xlabel('frequency [Hz]') >>> plt.ylabel('CSD [V**2/Hz]') >>> plt.show() """ freqs, _, Pxy = _spectral_helper(x, y, fs, window, nperseg, noverlap, nfft, detrend, return_onesided, scaling, axis, mode='psd') # Average over windows. if len(Pxy.shape) >= 2 and Pxy.size > 0: if Pxy.shape[-1] > 1: if average == 'median': # np.median must be passed real arrays for the desired result bias = _median_bias(Pxy.shape[-1]) if np.iscomplexobj(Pxy): Pxy = (np.median(np.real(Pxy), axis=-1) + 1j * np.median(np.imag(Pxy), axis=-1)) else: Pxy = np.median(Pxy, axis=-1) Pxy /= bias elif average == 'mean': Pxy = Pxy.mean(axis=-1) else: raise ValueError('average must be "median" or "mean", got %s' % (average,)) else: Pxy = np.reshape(Pxy, Pxy.shape[:-1]) return freqs, Pxy def spectrogram(x, fs=1.0, window=('tukey', .25), nperseg=None, noverlap=None, nfft=None, detrend='constant', return_onesided=True, scaling='density', axis=-1, mode='psd'): """Compute a spectrogram with consecutive Fourier transforms. Spectrograms can be used as a way of visualizing the change of a nonstationary signal's frequency content over time. .. legacy:: function :class:`ShortTimeFFT` is a newer STFT / ISTFT implementation with more features also including a :meth:`~ShortTimeFFT.spectrogram` method. A :ref:`comparison <tutorial_stft_legacy_stft>` between the implementations can be found in the :ref:`tutorial_stft` section of the :ref:`user_guide`. Parameters ---------- x : array_like Time series of measurement values fs : float, optional Sampling frequency of the `x` time series. Defaults to 1.0. window : str or tuple or array_like, optional Desired window to use. If `window` is a string or tuple, it is passed to `get_window` to generate the window values, which are DFT-even by default. See `get_window` for a list of windows and required parameters. If `window` is array_like it will be used directly as the window and its length must be nperseg. Defaults to a Tukey window with shape parameter of 0.25. nperseg : int, optional Length of each segment. Defaults to None, but if window is str or tuple, is set to 256, and if window is array_like, is set to the length of the window. noverlap : int, optional Number of points to overlap between segments. If `None`, ``noverlap = nperseg // 8``. Defaults to `None`. nfft : int, optional Length of the FFT used, if a zero padded FFT is desired. If `None`, the FFT length is `nperseg`. Defaults to `None`. detrend : str or function or `False`, optional Specifies how to detrend each segment. If `detrend` is a string, it is passed as the `type` argument to the `detrend` function. If it is a function, it takes a segment and returns a detrended segment. If `detrend` is `False`, no detrending is done. Defaults to 'constant'. return_onesided : bool, optional If `True`, return a one-sided spectrum for real data. If `False` return a two-sided spectrum. Defaults to `True`, but for complex data, a two-sided spectrum is always returned. scaling : { 'density', 'spectrum' }, optional Selects between computing the power spectral density ('density') where `Sxx` has units of V**2/Hz and computing the power spectrum ('spectrum') where `Sxx` has units of V**2, if `x` is measured in V and `fs` is measured in Hz. Defaults to 'density'. axis : int, optional Axis along which the spectrogram is computed; the default is over the last axis (i.e. ``axis=-1``). mode : str, optional Defines what kind of return values are expected. Options are ['psd', 'complex', 'magnitude', 'angle', 'phase']. 'complex' is equivalent to the output of `stft` with no padding or boundary extension. 'magnitude' returns the absolute magnitude of the STFT. 'angle' and 'phase' return the complex angle of the STFT, with and without unwrapping, respectively. Returns ------- f : ndarray Array of sample frequencies. t : ndarray Array of segment times. Sxx : ndarray Spectrogram of x. By default, the last axis of Sxx corresponds to the segment times. See Also -------- periodogram: Simple, optionally modified periodogram lombscargle: Lomb-Scargle periodogram for unevenly sampled data welch: Power spectral density by Welch's method. csd: Cross spectral density by Welch's method. ShortTimeFFT: Newer STFT/ISTFT implementation providing more features, which also includes a :meth:`~ShortTimeFFT.spectrogram` method. Notes ----- An appropriate amount of overlap will depend on the choice of window and on your requirements. In contrast to welch's method, where the entire data stream is averaged over, one may wish to use a smaller overlap (or perhaps none at all) when computing a spectrogram, to maintain some statistical independence between individual segments. It is for this reason that the default window is a Tukey window with 1/8th of a window's length overlap at each end. .. versionadded:: 0.16.0 References ---------- .. [1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck "Discrete-Time Signal Processing", Prentice Hall, 1999. Examples -------- >>> import numpy as np >>> from scipy import signal >>> from scipy.fft import fftshift >>> import matplotlib.pyplot as plt >>> rng = np.random.default_rng() Generate a test signal, a 2 Vrms sine wave whose frequency is slowly modulated around 3kHz, corrupted by white noise of exponentially decreasing magnitude sampled at 10 kHz. >>> fs = 10e3 >>> N = 1e5 >>> amp = 2 * np.sqrt(2) >>> noise_power = 0.01 * fs / 2 >>> time = np.arange(N) / float(fs) >>> mod = 500*np.cos(2*np.pi*0.25*time) >>> carrier = amp * np.sin(2*np.pi*3e3*time + mod) >>> noise = rng.normal(scale=np.sqrt(noise_power), size=time.shape) >>> noise *= np.exp(-time/5) >>> x = carrier + noise Compute and plot the spectrogram. >>> f, t, Sxx = signal.spectrogram(x, fs) >>> plt.pcolormesh(t, f, Sxx, shading='gouraud') >>> plt.ylabel('Frequency [Hz]') >>> plt.xlabel('Time [sec]') >>> plt.show() Note, if using output that is not one sided, then use the following: >>> f, t, Sxx = signal.spectrogram(x, fs, return_onesided=False) >>> plt.pcolormesh(t, fftshift(f), fftshift(Sxx, axes=0), shading='gouraud') >>> plt.ylabel('Frequency [Hz]') >>> plt.xlabel('Time [sec]') >>> plt.show() """ modelist = ['psd', 'complex', 'magnitude', 'angle', 'phase'] if mode not in modelist: raise ValueError('unknown value for mode {}, must be one of {}' .format(mode, modelist)) # need to set default for nperseg before setting default for noverlap below window, nperseg = _triage_segments(window, nperseg, input_length=x.shape[axis]) # Less overlap than welch, so samples are more statisically independent if noverlap is None: noverlap = nperseg // 8 if mode == 'psd': freqs, time, Sxx = _spectral_helper(x, x, fs, window, nperseg, noverlap, nfft, detrend, return_onesided, scaling, axis, mode='psd') else: freqs, time, Sxx = _spectral_helper(x, x, fs, window, nperseg, noverlap, nfft, detrend, return_onesided, scaling, axis, mode='stft') if mode == 'magnitude': Sxx = np.abs(Sxx) elif mode in ['angle', 'phase']: Sxx = np.angle(Sxx) if mode == 'phase': # Sxx has one additional dimension for time strides if axis < 0: axis -= 1 Sxx = np.unwrap(Sxx, axis=axis) # mode =='complex' is same as `stft`, doesn't need modification return freqs, time, Sxx def check_COLA(window, nperseg, noverlap, tol=1e-10): r"""Check whether the Constant OverLap Add (COLA) constraint is met. Parameters ---------- window : str or tuple or array_like Desired window to use. If `window` is a string or tuple, it is passed to `get_window` to generate the window values, which are DFT-even by default. See `get_window` for a list of windows and required parameters. If `window` is array_like it will be used directly as the window and its length must be nperseg. nperseg : int Length of each segment. noverlap : int Number of points to overlap between segments. tol : float, optional The allowed variance of a bin's weighted sum from the median bin sum. Returns ------- verdict : bool `True` if chosen combination satisfies COLA within `tol`, `False` otherwise See Also -------- check_NOLA: Check whether the Nonzero Overlap Add (NOLA) constraint is met stft: Short Time Fourier Transform istft: Inverse Short Time Fourier Transform Notes ----- In order to enable inversion of an STFT via the inverse STFT in `istft`, it is sufficient that the signal windowing obeys the constraint of "Constant OverLap Add" (COLA). This ensures that every point in the input data is equally weighted, thereby avoiding aliasing and allowing full reconstruction. Some examples of windows that satisfy COLA: - Rectangular window at overlap of 0, 1/2, 2/3, 3/4, ... - Bartlett window at overlap of 1/2, 3/4, 5/6, ... - Hann window at 1/2, 2/3, 3/4, ... - Any Blackman family window at 2/3 overlap - Any window with ``noverlap = nperseg-1`` A very comprehensive list of other windows may be found in [2]_, wherein the COLA condition is satisfied when the "Amplitude Flatness" is unity. .. versionadded:: 0.19.0 References ---------- .. [1] Julius O. Smith III, "Spectral Audio Signal Processing", W3K Publishing, 2011,ISBN 978-0-9745607-3-1. .. [2] G. Heinzel, A. Ruediger and R. Schilling, "Spectrum and spectral density estimation by the Discrete Fourier transform (DFT), including a comprehensive list of window functions and some new at-top windows", 2002, http://hdl.handle.net/11858/00-001M-0000-0013-557A-5 Examples -------- >>> from scipy import signal Confirm COLA condition for rectangular window of 75% (3/4) overlap: >>> signal.check_COLA(signal.windows.boxcar(100), 100, 75) True COLA is not true for 25% (1/4) overlap, though: >>> signal.check_COLA(signal.windows.boxcar(100), 100, 25) False "Symmetrical" Hann window (for filter design) is not COLA: >>> signal.check_COLA(signal.windows.hann(120, sym=True), 120, 60) False "Periodic" or "DFT-even" Hann window (for FFT analysis) is COLA for overlap of 1/2, 2/3, 3/4, etc.: >>> signal.check_COLA(signal.windows.hann(120, sym=False), 120, 60) True >>> signal.check_COLA(signal.windows.hann(120, sym=False), 120, 80) True >>> signal.check_COLA(signal.windows.hann(120, sym=False), 120, 90) True """ nperseg = int(nperseg) if nperseg < 1: raise ValueError('nperseg must be a positive integer') if noverlap >= nperseg: raise ValueError('noverlap must be less than nperseg.') noverlap = int(noverlap) if isinstance(window, str) or type(window) is tuple: win = get_window(window, nperseg) else: win = np.asarray(window) if len(win.shape) != 1: raise ValueError('window must be 1-D') if win.shape[0] != nperseg: raise ValueError('window must have length of nperseg') step = nperseg - noverlap binsums = sum(win[ii*step:(ii+1)*step] for ii in range(nperseg//step)) if nperseg % step != 0: binsums[:nperseg % step] += win[-(nperseg % step):] deviation = binsums - np.median(binsums) return np.max(np.abs(deviation)) < tol def check_NOLA(window, nperseg, noverlap, tol=1e-10): r"""Check whether the Nonzero Overlap Add (NOLA) constraint is met. Parameters ---------- window : str or tuple or array_like Desired window to use. If `window` is a string or tuple, it is passed to `get_window` to generate the window values, which are DFT-even by default. See `get_window` for a list of windows and required parameters. If `window` is array_like it will be used directly as the window and its length must be nperseg. nperseg : int Length of each segment. noverlap : int Number of points to overlap between segments. tol : float, optional The allowed variance of a bin's weighted sum from the median bin sum. Returns ------- verdict : bool `True` if chosen combination satisfies the NOLA constraint within `tol`, `False` otherwise See Also -------- check_COLA: Check whether the Constant OverLap Add (COLA) constraint is met stft: Short Time Fourier Transform istft: Inverse Short Time Fourier Transform Notes ----- In order to enable inversion of an STFT via the inverse STFT in `istft`, the signal windowing must obey the constraint of "nonzero overlap add" (NOLA): .. math:: \sum_{t}w^{2}[n-tH] \ne 0 for all :math:`n`, where :math:`w` is the window function, :math:`t` is the frame index, and :math:`H` is the hop size (:math:`H` = `nperseg` - `noverlap`). This ensures that the normalization factors in the denominator of the overlap-add inversion equation are not zero. Only very pathological windows will fail the NOLA constraint. .. versionadded:: 1.2.0 References ---------- .. [1] Julius O. Smith III, "Spectral Audio Signal Processing", W3K Publishing, 2011,ISBN 978-0-9745607-3-1. .. [2] G. Heinzel, A. Ruediger and R. Schilling, "Spectrum and spectral density estimation by the Discrete Fourier transform (DFT), including a comprehensive list of window functions and some new at-top windows", 2002, http://hdl.handle.net/11858/00-001M-0000-0013-557A-5 Examples -------- >>> import numpy as np >>> from scipy import signal Confirm NOLA condition for rectangular window of 75% (3/4) overlap: >>> signal.check_NOLA(signal.windows.boxcar(100), 100, 75) True NOLA is also true for 25% (1/4) overlap: >>> signal.check_NOLA(signal.windows.boxcar(100), 100, 25) True "Symmetrical" Hann window (for filter design) is also NOLA: >>> signal.check_NOLA(signal.windows.hann(120, sym=True), 120, 60) True As long as there is overlap, it takes quite a pathological window to fail NOLA: >>> w = np.ones(64, dtype="float") >>> w[::2] = 0 >>> signal.check_NOLA(w, 64, 32) False If there is not enough overlap, a window with zeros at the ends will not work: >>> signal.check_NOLA(signal.windows.hann(64), 64, 0) False >>> signal.check_NOLA(signal.windows.hann(64), 64, 1) False >>> signal.check_NOLA(signal.windows.hann(64), 64, 2) True """ nperseg = int(nperseg) if nperseg < 1: raise ValueError('nperseg must be a positive integer') if noverlap >= nperseg: raise ValueError('noverlap must be less than nperseg') if noverlap < 0: raise ValueError('noverlap must be a nonnegative integer') noverlap = int(noverlap) if isinstance(window, str) or type(window) is tuple: win = get_window(window, nperseg) else: win = np.asarray(window) if len(win.shape) != 1: raise ValueError('window must be 1-D') if win.shape[0] != nperseg: raise ValueError('window must have length of nperseg') step = nperseg - noverlap binsums = sum(win[ii*step:(ii+1)*step]**2 for ii in range(nperseg//step)) if nperseg % step != 0: binsums[:nperseg % step] += win[-(nperseg % step):]**2 return np.min(binsums) > tol def stft(x, fs=1.0, window='hann', nperseg=256, noverlap=None, nfft=None, detrend=False, return_onesided=True, boundary='zeros', padded=True, axis=-1, scaling='spectrum'): r"""Compute the Short Time Fourier Transform (STFT). STFTs can be used as a way of quantifying the change of a nonstationary signal's frequency and phase content over time. .. legacy:: function `ShortTimeFFT` is a newer STFT / ISTFT implementation with more features. A :ref:`comparison <tutorial_stft_legacy_stft>` between the implementations can be found in the :ref:`tutorial_stft` section of the :ref:`user_guide`. Parameters ---------- x : array_like Time series of measurement values fs : float, optional Sampling frequency of the `x` time series. Defaults to 1.0. window : str or tuple or array_like, optional Desired window to use. If `window` is a string or tuple, it is passed to `get_window` to generate the window values, which are DFT-even by default. See `get_window` for a list of windows and required parameters. If `window` is array_like it will be used directly as the window and its length must be nperseg. Defaults to a Hann window. nperseg : int, optional Length of each segment. Defaults to 256. noverlap : int, optional Number of points to overlap between segments. If `None`, ``noverlap = nperseg // 2``. Defaults to `None`. When specified, the COLA constraint must be met (see Notes below). nfft : int, optional Length of the FFT used, if a zero padded FFT is desired. If `None`, the FFT length is `nperseg`. Defaults to `None`. detrend : str or function or `False`, optional Specifies how to detrend each segment. If `detrend` is a string, it is passed as the `type` argument to the `detrend` function. If it is a function, it takes a segment and returns a detrended segment. If `detrend` is `False`, no detrending is done. Defaults to `False`. return_onesided : bool, optional If `True`, return a one-sided spectrum for real data. If `False` return a two-sided spectrum. Defaults to `True`, but for complex data, a two-sided spectrum is always returned. boundary : str or None, optional Specifies whether the input signal is extended at both ends, and how to generate the new values, in order to center the first windowed segment on the first input point. This has the benefit of enabling reconstruction of the first input point when the employed window function starts at zero. Valid options are ``['even', 'odd', 'constant', 'zeros', None]``. Defaults to 'zeros', for zero padding extension. I.e. ``[1, 2, 3, 4]`` is extended to ``[0, 1, 2, 3, 4, 0]`` for ``nperseg=3``. padded : bool, optional Specifies whether the input signal is zero-padded at the end to make the signal fit exactly into an integer number of window segments, so that all of the signal is included in the output. Defaults to `True`. Padding occurs after boundary extension, if `boundary` is not `None`, and `padded` is `True`, as is the default. axis : int, optional Axis along which the STFT is computed; the default is over the last axis (i.e. ``axis=-1``). scaling: {'spectrum', 'psd'} The default 'spectrum' scaling allows each frequency line of `Zxx` to be interpreted as a magnitude spectrum. The 'psd' option scales each line to a power spectral density - it allows to calculate the signal's energy by numerically integrating over ``abs(Zxx)**2``. .. versionadded:: 1.9.0 Returns ------- f : ndarray Array of sample frequencies. t : ndarray Array of segment times. Zxx : ndarray STFT of `x`. By default, the last axis of `Zxx` corresponds to the segment times. See Also -------- istft: Inverse Short Time Fourier Transform ShortTimeFFT: Newer STFT/ISTFT implementation providing more features. check_COLA: Check whether the Constant OverLap Add (COLA) constraint is met check_NOLA: Check whether the Nonzero Overlap Add (NOLA) constraint is met welch: Power spectral density by Welch's method. spectrogram: Spectrogram by Welch's method. csd: Cross spectral density by Welch's method. lombscargle: Lomb-Scargle periodogram for unevenly sampled data Notes ----- In order to enable inversion of an STFT via the inverse STFT in `istft`, the signal windowing must obey the constraint of "Nonzero OverLap Add" (NOLA), and the input signal must have complete windowing coverage (i.e. ``(x.shape[axis] - nperseg) % (nperseg-noverlap) == 0``). The `padded` argument may be used to accomplish this. Given a time-domain signal :math:`x[n]`, a window :math:`w[n]`, and a hop size :math:`H` = `nperseg - noverlap`, the windowed frame at time index :math:`t` is given by .. math:: x_{t}[n]=x[n]w[n-tH] The overlap-add (OLA) reconstruction equation is given by .. math:: x[n]=\frac{\sum_{t}x_{t}[n]w[n-tH]}{\sum_{t}w^{2}[n-tH]} The NOLA constraint ensures that every normalization term that appears in the denomimator of the OLA reconstruction equation is nonzero. Whether a choice of `window`, `nperseg`, and `noverlap` satisfy this constraint can be tested with `check_NOLA`. .. versionadded:: 0.19.0 References ---------- .. [1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck "Discrete-Time Signal Processing", Prentice Hall, 1999. .. [2] Daniel W. Griffin, Jae S. Lim "Signal Estimation from Modified Short-Time Fourier Transform", IEEE 1984, 10.1109/TASSP.1984.1164317 Examples -------- >>> import numpy as np >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> rng = np.random.default_rng() Generate a test signal, a 2 Vrms sine wave whose frequency is slowly modulated around 3kHz, corrupted by white noise of exponentially decreasing magnitude sampled at 10 kHz. >>> fs = 10e3 >>> N = 1e5 >>> amp = 2 * np.sqrt(2) >>> noise_power = 0.01 * fs / 2 >>> time = np.arange(N) / float(fs) >>> mod = 500*np.cos(2*np.pi*0.25*time) >>> carrier = amp * np.sin(2*np.pi*3e3*time + mod) >>> noise = rng.normal(scale=np.sqrt(noise_power), ... size=time.shape) >>> noise *= np.exp(-time/5) >>> x = carrier + noise Compute and plot the STFT's magnitude. >>> f, t, Zxx = signal.stft(x, fs, nperseg=1000) >>> plt.pcolormesh(t, f, np.abs(Zxx), vmin=0, vmax=amp, shading='gouraud') >>> plt.title('STFT Magnitude') >>> plt.ylabel('Frequency [Hz]') >>> plt.xlabel('Time [sec]') >>> plt.show() Compare the energy of the signal `x` with the energy of its STFT: >>> E_x = sum(x**2) / fs # Energy of x >>> # Calculate a two-sided STFT with PSD scaling: >>> f, t, Zxx = signal.stft(x, fs, nperseg=1000, return_onesided=False, ... scaling='psd') >>> # Integrate numerically over abs(Zxx)**2: >>> df, dt = f[1] - f[0], t[1] - t[0] >>> E_Zxx = sum(np.sum(Zxx.real**2 + Zxx.imag**2, axis=0) * df) * dt >>> # The energy is the same, but the numerical errors are quite large: >>> np.isclose(E_x, E_Zxx, rtol=1e-2) True """ if scaling == 'psd': scaling = 'density' elif scaling != 'spectrum': raise ValueError(f"Parameter {scaling=} not in ['spectrum', 'psd']!") freqs, time, Zxx = _spectral_helper(x, x, fs, window, nperseg, noverlap, nfft, detrend, return_onesided, scaling=scaling, axis=axis, mode='stft', boundary=boundary, padded=padded) return freqs, time, Zxx def istft(Zxx, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None, input_onesided=True, boundary=True, time_axis=-1, freq_axis=-2, scaling='spectrum'): r"""Perform the inverse Short Time Fourier transform (iSTFT). .. legacy:: function `ShortTimeFFT` is a newer STFT / ISTFT implementation with more features. A :ref:`comparison <tutorial_stft_legacy_stft>` between the implementations can be found in the :ref:`tutorial_stft` section of the :ref:`user_guide`. Parameters ---------- Zxx : array_like STFT of the signal to be reconstructed. If a purely real array is passed, it will be cast to a complex data type. fs : float, optional Sampling frequency of the time series. Defaults to 1.0. window : str or tuple or array_like, optional Desired window to use. If `window` is a string or tuple, it is passed to `get_window` to generate the window values, which are DFT-even by default. See `get_window` for a list of windows and required parameters. If `window` is array_like it will be used directly as the window and its length must be nperseg. Defaults to a Hann window. Must match the window used to generate the STFT for faithful inversion. nperseg : int, optional Number of data points corresponding to each STFT segment. This parameter must be specified if the number of data points per segment is odd, or if the STFT was padded via ``nfft > nperseg``. If `None`, the value depends on the shape of `Zxx` and `input_onesided`. If `input_onesided` is `True`, ``nperseg=2*(Zxx.shape[freq_axis] - 1)``. Otherwise, ``nperseg=Zxx.shape[freq_axis]``. Defaults to `None`. noverlap : int, optional Number of points to overlap between segments. If `None`, half of the segment length. Defaults to `None`. When specified, the COLA constraint must be met (see Notes below), and should match the parameter used to generate the STFT. Defaults to `None`. nfft : int, optional Number of FFT points corresponding to each STFT segment. This parameter must be specified if the STFT was padded via ``nfft > nperseg``. If `None`, the default values are the same as for `nperseg`, detailed above, with one exception: if `input_onesided` is True and ``nperseg==2*Zxx.shape[freq_axis] - 1``, `nfft` also takes on that value. This case allows the proper inversion of an odd-length unpadded STFT using ``nfft=None``. Defaults to `None`. input_onesided : bool, optional If `True`, interpret the input array as one-sided FFTs, such as is returned by `stft` with ``return_onesided=True`` and `numpy.fft.rfft`. If `False`, interpret the input as a a two-sided FFT. Defaults to `True`. boundary : bool, optional Specifies whether the input signal was extended at its boundaries by supplying a non-`None` ``boundary`` argument to `stft`. Defaults to `True`. time_axis : int, optional Where the time segments of the STFT is located; the default is the last axis (i.e. ``axis=-1``). freq_axis : int, optional Where the frequency axis of the STFT is located; the default is the penultimate axis (i.e. ``axis=-2``). scaling: {'spectrum', 'psd'} The default 'spectrum' scaling allows each frequency line of `Zxx` to be interpreted as a magnitude spectrum. The 'psd' option scales each line to a power spectral density - it allows to calculate the signal's energy by numerically integrating over ``abs(Zxx)**2``. Returns ------- t : ndarray Array of output data times. x : ndarray iSTFT of `Zxx`. See Also -------- stft: Short Time Fourier Transform ShortTimeFFT: Newer STFT/ISTFT implementation providing more features. check_COLA: Check whether the Constant OverLap Add (COLA) constraint is met check_NOLA: Check whether the Nonzero Overlap Add (NOLA) constraint is met Notes ----- In order to enable inversion of an STFT via the inverse STFT with `istft`, the signal windowing must obey the constraint of "nonzero overlap add" (NOLA): .. math:: \sum_{t}w^{2}[n-tH] \ne 0 This ensures that the normalization factors that appear in the denominator of the overlap-add reconstruction equation .. math:: x[n]=\frac{\sum_{t}x_{t}[n]w[n-tH]}{\sum_{t}w^{2}[n-tH]} are not zero. The NOLA constraint can be checked with the `check_NOLA` function. An STFT which has been modified (via masking or otherwise) is not guaranteed to correspond to a exactly realizible signal. This function implements the iSTFT via the least-squares estimation algorithm detailed in [2]_, which produces a signal that minimizes the mean squared error between the STFT of the returned signal and the modified STFT. .. versionadded:: 0.19.0 References ---------- .. [1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck "Discrete-Time Signal Processing", Prentice Hall, 1999. .. [2] Daniel W. Griffin, Jae S. Lim "Signal Estimation from Modified Short-Time Fourier Transform", IEEE 1984, 10.1109/TASSP.1984.1164317 Examples -------- >>> import numpy as np >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> rng = np.random.default_rng() Generate a test signal, a 2 Vrms sine wave at 50Hz corrupted by 0.001 V**2/Hz of white noise sampled at 1024 Hz. >>> fs = 1024 >>> N = 10*fs >>> nperseg = 512 >>> amp = 2 * np.sqrt(2) >>> noise_power = 0.001 * fs / 2 >>> time = np.arange(N) / float(fs) >>> carrier = amp * np.sin(2*np.pi*50*time) >>> noise = rng.normal(scale=np.sqrt(noise_power), ... size=time.shape) >>> x = carrier + noise Compute the STFT, and plot its magnitude >>> f, t, Zxx = signal.stft(x, fs=fs, nperseg=nperseg) >>> plt.figure() >>> plt.pcolormesh(t, f, np.abs(Zxx), vmin=0, vmax=amp, shading='gouraud') >>> plt.ylim([f[1], f[-1]]) >>> plt.title('STFT Magnitude') >>> plt.ylabel('Frequency [Hz]') >>> plt.xlabel('Time [sec]') >>> plt.yscale('log') >>> plt.show() Zero the components that are 10% or less of the carrier magnitude, then convert back to a time series via inverse STFT >>> Zxx = np.where(np.abs(Zxx) >= amp/10, Zxx, 0) >>> _, xrec = signal.istft(Zxx, fs) Compare the cleaned signal with the original and true carrier signals. >>> plt.figure() >>> plt.plot(time, x, time, xrec, time, carrier) >>> plt.xlim([2, 2.1]) >>> plt.xlabel('Time [sec]') >>> plt.ylabel('Signal') >>> plt.legend(['Carrier + Noise', 'Filtered via STFT', 'True Carrier']) >>> plt.show() Note that the cleaned signal does not start as abruptly as the original, since some of the coefficients of the transient were also removed: >>> plt.figure() >>> plt.plot(time, x, time, xrec, time, carrier) >>> plt.xlim([0, 0.1]) >>> plt.xlabel('Time [sec]') >>> plt.ylabel('Signal') >>> plt.legend(['Carrier + Noise', 'Filtered via STFT', 'True Carrier']) >>> plt.show() """ # Make sure input is an ndarray of appropriate complex dtype Zxx = np.asarray(Zxx) + 0j freq_axis = int(freq_axis) time_axis = int(time_axis) if Zxx.ndim < 2: raise ValueError('Input stft must be at least 2d!') if freq_axis == time_axis: raise ValueError('Must specify differing time and frequency axes!') nseg = Zxx.shape[time_axis] if input_onesided: # Assume even segment length n_default = 2*(Zxx.shape[freq_axis] - 1) else: n_default = Zxx.shape[freq_axis] # Check windowing parameters if nperseg is None: nperseg = n_default else: nperseg = int(nperseg) if nperseg < 1: raise ValueError('nperseg must be a positive integer') if nfft is None: if (input_onesided) and (nperseg == n_default + 1): # Odd nperseg, no FFT padding nfft = nperseg else: nfft = n_default elif nfft < nperseg: raise ValueError('nfft must be greater than or equal to nperseg.') else: nfft = int(nfft) if noverlap is None: noverlap = nperseg//2 else: noverlap = int(noverlap) if noverlap >= nperseg: raise ValueError('noverlap must be less than nperseg.') nstep = nperseg - noverlap # Rearrange axes if necessary if time_axis != Zxx.ndim-1 or freq_axis != Zxx.ndim-2: # Turn negative indices to positive for the call to transpose if freq_axis < 0: freq_axis = Zxx.ndim + freq_axis if time_axis < 0: time_axis = Zxx.ndim + time_axis zouter = list(range(Zxx.ndim)) for ax in sorted([time_axis, freq_axis], reverse=True): zouter.pop(ax) Zxx = np.transpose(Zxx, zouter+[freq_axis, time_axis]) # Get window as array if isinstance(window, str) or type(window) is tuple: win = get_window(window, nperseg) else: win = np.asarray(window) if len(win.shape) != 1: raise ValueError('window must be 1-D') if win.shape[0] != nperseg: raise ValueError(f'window must have length of {nperseg}') ifunc = sp_fft.irfft if input_onesided else sp_fft.ifft xsubs = ifunc(Zxx, axis=-2, n=nfft)[..., :nperseg, :] # Initialize output and normalization arrays outputlength = nperseg + (nseg-1)*nstep x = np.zeros(list(Zxx.shape[:-2])+[outputlength], dtype=xsubs.dtype) norm = np.zeros(outputlength, dtype=xsubs.dtype) if np.result_type(win, xsubs) != xsubs.dtype: win = win.astype(xsubs.dtype) if scaling == 'spectrum': xsubs *= win.sum() elif scaling == 'psd': xsubs *= np.sqrt(fs * sum(win**2)) else: raise ValueError(f"Parameter {scaling=} not in ['spectrum', 'psd']!") # Construct the output from the ifft segments # This loop could perhaps be vectorized/strided somehow... for ii in range(nseg): # Window the ifft x[..., ii*nstep:ii*nstep+nperseg] += xsubs[..., ii] * win norm[..., ii*nstep:ii*nstep+nperseg] += win**2 # Remove extension points if boundary: x = x[..., nperseg//2:-(nperseg//2)] norm = norm[..., nperseg//2:-(nperseg//2)] # Divide out normalization where non-tiny if np.sum(norm > 1e-10) != len(norm): warnings.warn( "NOLA condition failed, STFT may not be invertible." + (" Possibly due to missing boundary" if not boundary else "") ) x /= np.where(norm > 1e-10, norm, 1.0) if input_onesided: x = x.real # Put axes back if x.ndim > 1: if time_axis != Zxx.ndim-1: if freq_axis < time_axis: time_axis -= 1 x = np.moveaxis(x, -1, time_axis) time = np.arange(x.shape[0])/float(fs) return time, x def coherence(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None, detrend='constant', axis=-1): r""" Estimate the magnitude squared coherence estimate, Cxy, of discrete-time signals X and Y using Welch's method. ``Cxy = abs(Pxy)**2/(Pxx*Pyy)``, where `Pxx` and `Pyy` are power spectral density estimates of X and Y, and `Pxy` is the cross spectral density estimate of X and Y. Parameters ---------- x : array_like Time series of measurement values y : array_like Time series of measurement values fs : float, optional Sampling frequency of the `x` and `y` time series. Defaults to 1.0. window : str or tuple or array_like, optional Desired window to use. If `window` is a string or tuple, it is passed to `get_window` to generate the window values, which are DFT-even by default. See `get_window` for a list of windows and required parameters. If `window` is array_like it will be used directly as the window and its length must be nperseg. Defaults to a Hann window. nperseg : int, optional Length of each segment. Defaults to None, but if window is str or tuple, is set to 256, and if window is array_like, is set to the length of the window. noverlap: int, optional Number of points to overlap between segments. If `None`, ``noverlap = nperseg // 2``. Defaults to `None`. nfft : int, optional Length of the FFT used, if a zero padded FFT is desired. If `None`, the FFT length is `nperseg`. Defaults to `None`. detrend : str or function or `False`, optional Specifies how to detrend each segment. If `detrend` is a string, it is passed as the `type` argument to the `detrend` function. If it is a function, it takes a segment and returns a detrended segment. If `detrend` is `False`, no detrending is done. Defaults to 'constant'. axis : int, optional Axis along which the coherence is computed for both inputs; the default is over the last axis (i.e. ``axis=-1``). Returns ------- f : ndarray Array of sample frequencies. Cxy : ndarray Magnitude squared coherence of x and y. See Also -------- periodogram: Simple, optionally modified periodogram lombscargle: Lomb-Scargle periodogram for unevenly sampled data welch: Power spectral density by Welch's method. csd: Cross spectral density by Welch's method. Notes ----- An appropriate amount of overlap will depend on the choice of window and on your requirements. For the default Hann window an overlap of 50% is a reasonable trade off between accurately estimating the signal power, while not over counting any of the data. Narrower windows may require a larger overlap. .. versionadded:: 0.16.0 References ---------- .. [1] P. Welch, "The use of the fast Fourier transform for the estimation of power spectra: A method based on time averaging over short, modified periodograms", IEEE Trans. Audio Electroacoust. vol. 15, pp. 70-73, 1967. .. [2] Stoica, Petre, and Randolph Moses, "Spectral Analysis of Signals" Prentice Hall, 2005 Examples -------- >>> import numpy as np >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> rng = np.random.default_rng() Generate two test signals with some common features. >>> fs = 10e3 >>> N = 1e5 >>> amp = 20 >>> freq = 1234.0 >>> noise_power = 0.001 * fs / 2 >>> time = np.arange(N) / fs >>> b, a = signal.butter(2, 0.25, 'low') >>> x = rng.normal(scale=np.sqrt(noise_power), size=time.shape) >>> y = signal.lfilter(b, a, x) >>> x += amp*np.sin(2*np.pi*freq*time) >>> y += rng.normal(scale=0.1*np.sqrt(noise_power), size=time.shape) Compute and plot the coherence. >>> f, Cxy = signal.coherence(x, y, fs, nperseg=1024) >>> plt.semilogy(f, Cxy) >>> plt.xlabel('frequency [Hz]') >>> plt.ylabel('Coherence') >>> plt.show() """ freqs, Pxx = welch(x, fs=fs, window=window, nperseg=nperseg, noverlap=noverlap, nfft=nfft, detrend=detrend, axis=axis) _, Pyy = welch(y, fs=fs, window=window, nperseg=nperseg, noverlap=noverlap, nfft=nfft, detrend=detrend, axis=axis) _, Pxy = csd(x, y, fs=fs, window=window, nperseg=nperseg, noverlap=noverlap, nfft=nfft, detrend=detrend, axis=axis) Cxy = np.abs(Pxy)**2 / Pxx / Pyy return freqs, Cxy def _spectral_helper(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None, detrend='constant', return_onesided=True, scaling='density', axis=-1, mode='psd', boundary=None, padded=False): """Calculate various forms of windowed FFTs for PSD, CSD, etc. This is a helper function that implements the commonality between the stft, psd, csd, and spectrogram functions. It is not designed to be called externally. The windows are not averaged over; the result from each window is returned. Parameters ---------- x : array_like Array or sequence containing the data to be analyzed. y : array_like Array or sequence containing the data to be analyzed. If this is the same object in memory as `x` (i.e. ``_spectral_helper(x, x, ...)``), the extra computations are spared. fs : float, optional Sampling frequency of the time series. Defaults to 1.0. window : str or tuple or array_like, optional Desired window to use. If `window` is a string or tuple, it is passed to `get_window` to generate the window values, which are DFT-even by default. See `get_window` for a list of windows and required parameters. If `window` is array_like it will be used directly as the window and its length must be nperseg. Defaults to a Hann window. nperseg : int, optional Length of each segment. Defaults to None, but if window is str or tuple, is set to 256, and if window is array_like, is set to the length of the window. noverlap : int, optional Number of points to overlap between segments. If `None`, ``noverlap = nperseg // 2``. Defaults to `None`. nfft : int, optional Length of the FFT used, if a zero padded FFT is desired. If `None`, the FFT length is `nperseg`. Defaults to `None`. detrend : str or function or `False`, optional Specifies how to detrend each segment. If `detrend` is a string, it is passed as the `type` argument to the `detrend` function. If it is a function, it takes a segment and returns a detrended segment. If `detrend` is `False`, no detrending is done. Defaults to 'constant'. return_onesided : bool, optional If `True`, return a one-sided spectrum for real data. If `False` return a two-sided spectrum. Defaults to `True`, but for complex data, a two-sided spectrum is always returned. scaling : { 'density', 'spectrum' }, optional Selects between computing the cross spectral density ('density') where `Pxy` has units of V**2/Hz and computing the cross spectrum ('spectrum') where `Pxy` has units of V**2, if `x` and `y` are measured in V and `fs` is measured in Hz. Defaults to 'density' axis : int, optional Axis along which the FFTs are computed; the default is over the last axis (i.e. ``axis=-1``). mode: str {'psd', 'stft'}, optional Defines what kind of return values are expected. Defaults to 'psd'. boundary : str or None, optional Specifies whether the input signal is extended at both ends, and how to generate the new values, in order to center the first windowed segment on the first input point. This has the benefit of enabling reconstruction of the first input point when the employed window function starts at zero. Valid options are ``['even', 'odd', 'constant', 'zeros', None]``. Defaults to `None`. padded : bool, optional Specifies whether the input signal is zero-padded at the end to make the signal fit exactly into an integer number of window segments, so that all of the signal is included in the output. Defaults to `False`. Padding occurs after boundary extension, if `boundary` is not `None`, and `padded` is `True`. Returns ------- freqs : ndarray Array of sample frequencies. t : ndarray Array of times corresponding to each data segment result : ndarray Array of output data, contents dependent on *mode* kwarg. Notes ----- Adapted from matplotlib.mlab .. versionadded:: 0.16.0 """ if mode not in ['psd', 'stft']: raise ValueError("Unknown value for mode %s, must be one of: " "{'psd', 'stft'}" % mode) boundary_funcs = {'even': even_ext, 'odd': odd_ext, 'constant': const_ext, 'zeros': zero_ext, None: None} if boundary not in boundary_funcs: raise ValueError("Unknown boundary option '{}', must be one of: {}" .format(boundary, list(boundary_funcs.keys()))) # If x and y are the same object we can save ourselves some computation. same_data = y is x if not same_data and mode != 'psd': raise ValueError("x and y must be equal if mode is 'stft'") axis = int(axis) # Ensure we have np.arrays, get outdtype x = np.asarray(x) if not same_data: y = np.asarray(y) outdtype = np.result_type(x, y, np.complex64) else: outdtype = np.result_type(x, np.complex64) if not same_data: # Check if we can broadcast the outer axes together xouter = list(x.shape) youter = list(y.shape) xouter.pop(axis) youter.pop(axis) try: outershape = np.broadcast(np.empty(xouter), np.empty(youter)).shape except ValueError as e: raise ValueError('x and y cannot be broadcast together.') from e if same_data: if x.size == 0: return np.empty(x.shape), np.empty(x.shape), np.empty(x.shape) else: if x.size == 0 or y.size == 0: outshape = outershape + (min([x.shape[axis], y.shape[axis]]),) emptyout = np.moveaxis(np.empty(outshape), -1, axis) return emptyout, emptyout, emptyout if x.ndim > 1: if axis != -1: x = np.moveaxis(x, axis, -1) if not same_data and y.ndim > 1: y = np.moveaxis(y, axis, -1) # Check if x and y are the same length, zero-pad if necessary if not same_data: if x.shape[-1] != y.shape[-1]: if x.shape[-1] < y.shape[-1]: pad_shape = list(x.shape) pad_shape[-1] = y.shape[-1] - x.shape[-1] x = np.concatenate((x, np.zeros(pad_shape)), -1) else: pad_shape = list(y.shape) pad_shape[-1] = x.shape[-1] - y.shape[-1] y = np.concatenate((y, np.zeros(pad_shape)), -1) if nperseg is not None: # if specified by user nperseg = int(nperseg) if nperseg < 1: raise ValueError('nperseg must be a positive integer') # parse window; if array like, then set nperseg = win.shape win, nperseg = _triage_segments(window, nperseg, input_length=x.shape[-1]) if nfft is None: nfft = nperseg elif nfft < nperseg: raise ValueError('nfft must be greater than or equal to nperseg.') else: nfft = int(nfft) if noverlap is None: noverlap = nperseg//2 else: noverlap = int(noverlap) if noverlap >= nperseg: raise ValueError('noverlap must be less than nperseg.') nstep = nperseg - noverlap # Padding occurs after boundary extension, so that the extended signal ends # in zeros, instead of introducing an impulse at the end. # I.e. if x = [..., 3, 2] # extend then pad -> [..., 3, 2, 2, 3, 0, 0, 0] # pad then extend -> [..., 3, 2, 0, 0, 0, 2, 3] if boundary is not None: ext_func = boundary_funcs[boundary] x = ext_func(x, nperseg//2, axis=-1) if not same_data: y = ext_func(y, nperseg//2, axis=-1) if padded: # Pad to integer number of windowed segments # I.e make x.shape[-1] = nperseg + (nseg-1)*nstep, with integer nseg nadd = (-(x.shape[-1]-nperseg) % nstep) % nperseg zeros_shape = list(x.shape[:-1]) + [nadd] x = np.concatenate((x, np.zeros(zeros_shape)), axis=-1) if not same_data: zeros_shape = list(y.shape[:-1]) + [nadd] y = np.concatenate((y, np.zeros(zeros_shape)), axis=-1) # Handle detrending and window functions if not detrend: def detrend_func(d): return d elif not hasattr(detrend, '__call__'): def detrend_func(d): return _signaltools.detrend(d, type=detrend, axis=-1) elif axis != -1: # Wrap this function so that it receives a shape that it could # reasonably expect to receive. def detrend_func(d): d = np.moveaxis(d, -1, axis) d = detrend(d) return np.moveaxis(d, axis, -1) else: detrend_func = detrend if np.result_type(win, np.complex64) != outdtype: win = win.astype(outdtype) if scaling == 'density': scale = 1.0 / (fs * (win*win).sum()) elif scaling == 'spectrum': scale = 1.0 / win.sum()**2 else: raise ValueError('Unknown scaling: %r' % scaling) if mode == 'stft': scale = np.sqrt(scale) if return_onesided: if np.iscomplexobj(x): sides = 'twosided' warnings.warn('Input data is complex, switching to ' 'return_onesided=False') else: sides = 'onesided' if not same_data: if np.iscomplexobj(y): sides = 'twosided' warnings.warn('Input data is complex, switching to ' 'return_onesided=False') else: sides = 'twosided' if sides == 'twosided': freqs = sp_fft.fftfreq(nfft, 1/fs) elif sides == 'onesided': freqs = sp_fft.rfftfreq(nfft, 1/fs) # Perform the windowed FFTs result = _fft_helper(x, win, detrend_func, nperseg, noverlap, nfft, sides) if not same_data: # All the same operations on the y data result_y = _fft_helper(y, win, detrend_func, nperseg, noverlap, nfft, sides) result = np.conjugate(result) * result_y elif mode == 'psd': result = np.conjugate(result) * result result *= scale if sides == 'onesided' and mode == 'psd': if nfft % 2: result[..., 1:] *= 2 else: # Last point is unpaired Nyquist freq point, don't double result[..., 1:-1] *= 2 time = np.arange(nperseg/2, x.shape[-1] - nperseg/2 + 1, nperseg - noverlap)/float(fs) if boundary is not None: time -= (nperseg/2) / fs result = result.astype(outdtype) # All imaginary parts are zero anyways if same_data and mode != 'stft': result = result.real # Output is going to have new last axis for time/window index, so a # negative axis index shifts down one if axis < 0: axis -= 1 # Roll frequency axis back to axis where the data came from result = np.moveaxis(result, -1, axis) return freqs, time, result def _fft_helper(x, win, detrend_func, nperseg, noverlap, nfft, sides): """ Calculate windowed FFT, for internal use by `scipy.signal._spectral_helper`. This is a helper function that does the main FFT calculation for `_spectral helper`. All input validation is performed there, and the data axis is assumed to be the last axis of x. It is not designed to be called externally. The windows are not averaged over; the result from each window is returned. Returns ------- result : ndarray Array of FFT data Notes ----- Adapted from matplotlib.mlab .. versionadded:: 0.16.0 """ # Created strided array of data segments if nperseg == 1 and noverlap == 0: result = x[..., np.newaxis] else: # https://stackoverflow.com/a/5568169 step = nperseg - noverlap shape = x.shape[:-1]+((x.shape[-1]-noverlap)//step, nperseg) strides = x.strides[:-1]+(step*x.strides[-1], x.strides[-1]) result = np.lib.stride_tricks.as_strided(x, shape=shape, strides=strides) # Detrend each data segment individually result = detrend_func(result) # Apply window by multiplication result = win * result # Perform the fft. Acts on last axis by default. Zero-pads automatically if sides == 'twosided': func = sp_fft.fft else: result = result.real func = sp_fft.rfft result = func(result, n=nfft) return result def _triage_segments(window, nperseg, input_length): """ Parses window and nperseg arguments for spectrogram and _spectral_helper. This is a helper function, not meant to be called externally. Parameters ---------- window : string, tuple, or ndarray If window is specified by a string or tuple and nperseg is not specified, nperseg is set to the default of 256 and returns a window of that length. If instead the window is array_like and nperseg is not specified, then nperseg is set to the length of the window. A ValueError is raised if the user supplies both an array_like window and a value for nperseg but nperseg does not equal the length of the window. nperseg : int Length of each segment input_length: int Length of input signal, i.e. x.shape[-1]. Used to test for errors. Returns ------- win : ndarray window. If function was called with string or tuple than this will hold the actual array used as a window. nperseg : int Length of each segment. If window is str or tuple, nperseg is set to 256. If window is array_like, nperseg is set to the length of the window. """ # parse window; if array like, then set nperseg = win.shape if isinstance(window, str) or isinstance(window, tuple): # if nperseg not specified if nperseg is None: nperseg = 256 # then change to default if nperseg > input_length: warnings.warn('nperseg = {0:d} is greater than input length ' ' = {1:d}, using nperseg = {1:d}' .format(nperseg, input_length)) nperseg = input_length win = get_window(window, nperseg) else: win = np.asarray(window) if len(win.shape) != 1: raise ValueError('window must be 1-D') if input_length < win.shape[-1]: raise ValueError('window is longer than input signal') if nperseg is None: nperseg = win.shape[0] elif nperseg is not None: if nperseg != win.shape[0]: raise ValueError("value specified for nperseg is different" " from length of window") return win, nperseg def _median_bias(n): """ Returns the bias of the median of a set of periodograms relative to the mean. See Appendix B from [1]_ for details. Parameters ---------- n : int Numbers of periodograms being averaged. Returns ------- bias : float Calculated bias. References ---------- .. [1] B. Allen, W.G. Anderson, P.R. Brady, D.A. Brown, J.D.E. Creighton. "FINDCHIRP: an algorithm for detection of gravitational waves from inspiraling compact binaries", Physical Review D 85, 2012, :arxiv:`gr-qc/0509116` """ ii_2 = 2 * np.arange(1., (n-1) // 2 + 1) return 1 + np.sum(1. / (ii_2 + 1) - 1. / ii_2)
77,910
36.242352
80
py
scipy
scipy-main/scipy/signal/_arraytools.py
""" Functions for acting on a axis of an array. """ import numpy as np def axis_slice(a, start=None, stop=None, step=None, axis=-1): """Take a slice along axis 'axis' from 'a'. Parameters ---------- a : numpy.ndarray The array to be sliced. start, stop, step : int or None The slice parameters. axis : int, optional The axis of `a` to be sliced. Examples -------- >>> a = array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> axis_slice(a, start=0, stop=1, axis=1) array([[1], [4], [7]]) >>> axis_slice(a, start=1, axis=0) array([[4, 5, 6], [7, 8, 9]]) Notes ----- The keyword arguments start, stop and step are used by calling slice(start, stop, step). This implies axis_slice() does not handle its arguments the exactly the same as indexing. To select a single index k, for example, use axis_slice(a, start=k, stop=k+1) In this case, the length of the axis 'axis' in the result will be 1; the trivial dimension is not removed. (Use numpy.squeeze() to remove trivial axes.) """ a_slice = [slice(None)] * a.ndim a_slice[axis] = slice(start, stop, step) b = a[tuple(a_slice)] return b def axis_reverse(a, axis=-1): """Reverse the 1-D slices of `a` along axis `axis`. Returns axis_slice(a, step=-1, axis=axis). """ return axis_slice(a, step=-1, axis=axis) def odd_ext(x, n, axis=-1): """ Odd extension at the boundaries of an array Generate a new ndarray by making an odd extension of `x` along an axis. Parameters ---------- x : ndarray The array to be extended. n : int The number of elements by which to extend `x` at each end of the axis. axis : int, optional The axis along which to extend `x`. Default is -1. Examples -------- >>> from scipy.signal._arraytools import odd_ext >>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]]) >>> odd_ext(a, 2) array([[-1, 0, 1, 2, 3, 4, 5, 6, 7], [-4, -1, 0, 1, 4, 9, 16, 23, 28]]) Odd extension is a "180 degree rotation" at the endpoints of the original array: >>> t = np.linspace(0, 1.5, 100) >>> a = 0.9 * np.sin(2 * np.pi * t**2) >>> b = odd_ext(a, 40) >>> import matplotlib.pyplot as plt >>> plt.plot(arange(-40, 140), b, 'b', lw=1, label='odd extension') >>> plt.plot(arange(100), a, 'r', lw=2, label='original') >>> plt.legend(loc='best') >>> plt.show() """ if n < 1: return x if n > x.shape[axis] - 1: raise ValueError(("The extension length n (%d) is too big. " + "It must not exceed x.shape[axis]-1, which is %d.") % (n, x.shape[axis] - 1)) left_end = axis_slice(x, start=0, stop=1, axis=axis) left_ext = axis_slice(x, start=n, stop=0, step=-1, axis=axis) right_end = axis_slice(x, start=-1, axis=axis) right_ext = axis_slice(x, start=-2, stop=-(n + 2), step=-1, axis=axis) ext = np.concatenate((2 * left_end - left_ext, x, 2 * right_end - right_ext), axis=axis) return ext def even_ext(x, n, axis=-1): """ Even extension at the boundaries of an array Generate a new ndarray by making an even extension of `x` along an axis. Parameters ---------- x : ndarray The array to be extended. n : int The number of elements by which to extend `x` at each end of the axis. axis : int, optional The axis along which to extend `x`. Default is -1. Examples -------- >>> from scipy.signal._arraytools import even_ext >>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]]) >>> even_ext(a, 2) array([[ 3, 2, 1, 2, 3, 4, 5, 4, 3], [ 4, 1, 0, 1, 4, 9, 16, 9, 4]]) Even extension is a "mirror image" at the boundaries of the original array: >>> t = np.linspace(0, 1.5, 100) >>> a = 0.9 * np.sin(2 * np.pi * t**2) >>> b = even_ext(a, 40) >>> import matplotlib.pyplot as plt >>> plt.plot(arange(-40, 140), b, 'b', lw=1, label='even extension') >>> plt.plot(arange(100), a, 'r', lw=2, label='original') >>> plt.legend(loc='best') >>> plt.show() """ if n < 1: return x if n > x.shape[axis] - 1: raise ValueError(("The extension length n (%d) is too big. " + "It must not exceed x.shape[axis]-1, which is %d.") % (n, x.shape[axis] - 1)) left_ext = axis_slice(x, start=n, stop=0, step=-1, axis=axis) right_ext = axis_slice(x, start=-2, stop=-(n + 2), step=-1, axis=axis) ext = np.concatenate((left_ext, x, right_ext), axis=axis) return ext def const_ext(x, n, axis=-1): """ Constant extension at the boundaries of an array Generate a new ndarray that is a constant extension of `x` along an axis. The extension repeats the values at the first and last element of the axis. Parameters ---------- x : ndarray The array to be extended. n : int The number of elements by which to extend `x` at each end of the axis. axis : int, optional The axis along which to extend `x`. Default is -1. Examples -------- >>> from scipy.signal._arraytools import const_ext >>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]]) >>> const_ext(a, 2) array([[ 1, 1, 1, 2, 3, 4, 5, 5, 5], [ 0, 0, 0, 1, 4, 9, 16, 16, 16]]) Constant extension continues with the same values as the endpoints of the array: >>> t = np.linspace(0, 1.5, 100) >>> a = 0.9 * np.sin(2 * np.pi * t**2) >>> b = const_ext(a, 40) >>> import matplotlib.pyplot as plt >>> plt.plot(arange(-40, 140), b, 'b', lw=1, label='constant extension') >>> plt.plot(arange(100), a, 'r', lw=2, label='original') >>> plt.legend(loc='best') >>> plt.show() """ if n < 1: return x left_end = axis_slice(x, start=0, stop=1, axis=axis) ones_shape = [1] * x.ndim ones_shape[axis] = n ones = np.ones(ones_shape, dtype=x.dtype) left_ext = ones * left_end right_end = axis_slice(x, start=-1, axis=axis) right_ext = ones * right_end ext = np.concatenate((left_ext, x, right_ext), axis=axis) return ext def zero_ext(x, n, axis=-1): """ Zero padding at the boundaries of an array Generate a new ndarray that is a zero-padded extension of `x` along an axis. Parameters ---------- x : ndarray The array to be extended. n : int The number of elements by which to extend `x` at each end of the axis. axis : int, optional The axis along which to extend `x`. Default is -1. Examples -------- >>> from scipy.signal._arraytools import zero_ext >>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]]) >>> zero_ext(a, 2) array([[ 0, 0, 1, 2, 3, 4, 5, 0, 0], [ 0, 0, 0, 1, 4, 9, 16, 0, 0]]) """ if n < 1: return x zeros_shape = list(x.shape) zeros_shape[axis] = n zeros = np.zeros(zeros_shape, dtype=x.dtype) ext = np.concatenate((zeros, x, zeros), axis=axis) return ext
7,489
29.950413
79
py
scipy
scipy-main/scipy/signal/_savitzky_golay.py
import numpy as np from scipy.linalg import lstsq from scipy._lib._util import float_factorial from scipy.ndimage import convolve1d from ._arraytools import axis_slice def savgol_coeffs(window_length, polyorder, deriv=0, delta=1.0, pos=None, use="conv"): """Compute the coefficients for a 1-D Savitzky-Golay FIR filter. Parameters ---------- window_length : int The length of the filter window (i.e., the number of coefficients). polyorder : int The order of the polynomial used to fit the samples. `polyorder` must be less than `window_length`. deriv : int, optional The order of the derivative to compute. This must be a nonnegative integer. The default is 0, which means to filter the data without differentiating. delta : float, optional The spacing of the samples to which the filter will be applied. This is only used if deriv > 0. pos : int or None, optional If pos is not None, it specifies evaluation position within the window. The default is the middle of the window. use : str, optional Either 'conv' or 'dot'. This argument chooses the order of the coefficients. The default is 'conv', which means that the coefficients are ordered to be used in a convolution. With use='dot', the order is reversed, so the filter is applied by dotting the coefficients with the data set. Returns ------- coeffs : 1-D ndarray The filter coefficients. See Also -------- savgol_filter Notes ----- .. versionadded:: 0.14.0 References ---------- A. Savitzky, M. J. E. Golay, Smoothing and Differentiation of Data by Simplified Least Squares Procedures. Analytical Chemistry, 1964, 36 (8), pp 1627-1639. Jianwen Luo, Kui Ying, and Jing Bai. 2005. Savitzky-Golay smoothing and differentiation filter for even number data. Signal Process. 85, 7 (July 2005), 1429-1434. Examples -------- >>> import numpy as np >>> from scipy.signal import savgol_coeffs >>> savgol_coeffs(5, 2) array([-0.08571429, 0.34285714, 0.48571429, 0.34285714, -0.08571429]) >>> savgol_coeffs(5, 2, deriv=1) array([ 2.00000000e-01, 1.00000000e-01, 2.07548111e-16, -1.00000000e-01, -2.00000000e-01]) Note that use='dot' simply reverses the coefficients. >>> savgol_coeffs(5, 2, pos=3) array([ 0.25714286, 0.37142857, 0.34285714, 0.17142857, -0.14285714]) >>> savgol_coeffs(5, 2, pos=3, use='dot') array([-0.14285714, 0.17142857, 0.34285714, 0.37142857, 0.25714286]) >>> savgol_coeffs(4, 2, pos=3, deriv=1, use='dot') array([0.45, -0.85, -0.65, 1.05]) `x` contains data from the parabola x = t**2, sampled at t = -1, 0, 1, 2, 3. `c` holds the coefficients that will compute the derivative at the last position. When dotted with `x` the result should be 6. >>> x = np.array([1, 0, 1, 4, 9]) >>> c = savgol_coeffs(5, 2, pos=4, deriv=1, use='dot') >>> c.dot(x) 6.0 """ # An alternative method for finding the coefficients when deriv=0 is # t = np.arange(window_length) # unit = (t == pos).astype(int) # coeffs = np.polyval(np.polyfit(t, unit, polyorder), t) # The method implemented here is faster. # To recreate the table of sample coefficients shown in the chapter on # the Savitzy-Golay filter in the Numerical Recipes book, use # window_length = nL + nR + 1 # pos = nL + 1 # c = savgol_coeffs(window_length, M, pos=pos, use='dot') if polyorder >= window_length: raise ValueError("polyorder must be less than window_length.") halflen, rem = divmod(window_length, 2) if pos is None: if rem == 0: pos = halflen - 0.5 else: pos = halflen if not (0 <= pos < window_length): raise ValueError("pos must be nonnegative and less than " "window_length.") if use not in ['conv', 'dot']: raise ValueError("`use` must be 'conv' or 'dot'") if deriv > polyorder: coeffs = np.zeros(window_length) return coeffs # Form the design matrix A. The columns of A are powers of the integers # from -pos to window_length - pos - 1. The powers (i.e., rows) range # from 0 to polyorder. (That is, A is a vandermonde matrix, but not # necessarily square.) x = np.arange(-pos, window_length - pos, dtype=float) if use == "conv": # Reverse so that result can be used in a convolution. x = x[::-1] order = np.arange(polyorder + 1).reshape(-1, 1) A = x ** order # y determines which order derivative is returned. y = np.zeros(polyorder + 1) # The coefficient assigned to y[deriv] scales the result to take into # account the order of the derivative and the sample spacing. y[deriv] = float_factorial(deriv) / (delta ** deriv) # Find the least-squares solution of A*c = y coeffs, _, _, _ = lstsq(A, y) return coeffs def _polyder(p, m): """Differentiate polynomials represented with coefficients. p must be a 1-D or 2-D array. In the 2-D case, each column gives the coefficients of a polynomial; the first row holds the coefficients associated with the highest power. m must be a nonnegative integer. (numpy.polyder doesn't handle the 2-D case.) """ if m == 0: result = p else: n = len(p) if n <= m: result = np.zeros_like(p[:1, ...]) else: dp = p[:-m].copy() for k in range(m): rng = np.arange(n - k - 1, m - k - 1, -1) dp *= rng.reshape((n - m,) + (1,) * (p.ndim - 1)) result = dp return result def _fit_edge(x, window_start, window_stop, interp_start, interp_stop, axis, polyorder, deriv, delta, y): """ Given an N-d array `x` and the specification of a slice of `x` from `window_start` to `window_stop` along `axis`, create an interpolating polynomial of each 1-D slice, and evaluate that polynomial in the slice from `interp_start` to `interp_stop`. Put the result into the corresponding slice of `y`. """ # Get the edge into a (window_length, -1) array. x_edge = axis_slice(x, start=window_start, stop=window_stop, axis=axis) if axis == 0 or axis == -x.ndim: xx_edge = x_edge swapped = False else: xx_edge = x_edge.swapaxes(axis, 0) swapped = True xx_edge = xx_edge.reshape(xx_edge.shape[0], -1) # Fit the edges. poly_coeffs has shape (polyorder + 1, -1), # where '-1' is the same as in xx_edge. poly_coeffs = np.polyfit(np.arange(0, window_stop - window_start), xx_edge, polyorder) if deriv > 0: poly_coeffs = _polyder(poly_coeffs, deriv) # Compute the interpolated values for the edge. i = np.arange(interp_start - window_start, interp_stop - window_start) values = np.polyval(poly_coeffs, i.reshape(-1, 1)) / (delta ** deriv) # Now put the values into the appropriate slice of y. # First reshape values to match y. shp = list(y.shape) shp[0], shp[axis] = shp[axis], shp[0] values = values.reshape(interp_stop - interp_start, *shp[1:]) if swapped: values = values.swapaxes(0, axis) # Get a view of the data to be replaced by values. y_edge = axis_slice(y, start=interp_start, stop=interp_stop, axis=axis) y_edge[...] = values def _fit_edges_polyfit(x, window_length, polyorder, deriv, delta, axis, y): """ Use polynomial interpolation of x at the low and high ends of the axis to fill in the halflen values in y. This function just calls _fit_edge twice, once for each end of the axis. """ halflen = window_length // 2 _fit_edge(x, 0, window_length, 0, halflen, axis, polyorder, deriv, delta, y) n = x.shape[axis] _fit_edge(x, n - window_length, n, n - halflen, n, axis, polyorder, deriv, delta, y) def savgol_filter(x, window_length, polyorder, deriv=0, delta=1.0, axis=-1, mode='interp', cval=0.0): """ Apply a Savitzky-Golay filter to an array. This is a 1-D filter. If `x` has dimension greater than 1, `axis` determines the axis along which the filter is applied. Parameters ---------- x : array_like The data to be filtered. If `x` is not a single or double precision floating point array, it will be converted to type ``numpy.float64`` before filtering. window_length : int The length of the filter window (i.e., the number of coefficients). If `mode` is 'interp', `window_length` must be less than or equal to the size of `x`. polyorder : int The order of the polynomial used to fit the samples. `polyorder` must be less than `window_length`. deriv : int, optional The order of the derivative to compute. This must be a nonnegative integer. The default is 0, which means to filter the data without differentiating. delta : float, optional The spacing of the samples to which the filter will be applied. This is only used if deriv > 0. Default is 1.0. axis : int, optional The axis of the array `x` along which the filter is to be applied. Default is -1. mode : str, optional Must be 'mirror', 'constant', 'nearest', 'wrap' or 'interp'. This determines the type of extension to use for the padded signal to which the filter is applied. When `mode` is 'constant', the padding value is given by `cval`. See the Notes for more details on 'mirror', 'constant', 'wrap', and 'nearest'. When the 'interp' mode is selected (the default), no extension is used. Instead, a degree `polyorder` polynomial is fit to the last `window_length` values of the edges, and this polynomial is used to evaluate the last `window_length // 2` output values. cval : scalar, optional Value to fill past the edges of the input if `mode` is 'constant'. Default is 0.0. Returns ------- y : ndarray, same shape as `x` The filtered data. See Also -------- savgol_coeffs Notes ----- Details on the `mode` options: 'mirror': Repeats the values at the edges in reverse order. The value closest to the edge is not included. 'nearest': The extension contains the nearest input value. 'constant': The extension contains the value given by the `cval` argument. 'wrap': The extension contains the values from the other end of the array. For example, if the input is [1, 2, 3, 4, 5, 6, 7, 8], and `window_length` is 7, the following shows the extended data for the various `mode` options (assuming `cval` is 0):: mode | Ext | Input | Ext -----------+---------+------------------------+--------- 'mirror' | 4 3 2 | 1 2 3 4 5 6 7 8 | 7 6 5 'nearest' | 1 1 1 | 1 2 3 4 5 6 7 8 | 8 8 8 'constant' | 0 0 0 | 1 2 3 4 5 6 7 8 | 0 0 0 'wrap' | 6 7 8 | 1 2 3 4 5 6 7 8 | 1 2 3 .. versionadded:: 0.14.0 Examples -------- >>> import numpy as np >>> from scipy.signal import savgol_filter >>> np.set_printoptions(precision=2) # For compact display. >>> x = np.array([2, 2, 5, 2, 1, 0, 1, 4, 9]) Filter with a window length of 5 and a degree 2 polynomial. Use the defaults for all other parameters. >>> savgol_filter(x, 5, 2) array([1.66, 3.17, 3.54, 2.86, 0.66, 0.17, 1. , 4. , 9. ]) Note that the last five values in x are samples of a parabola, so when mode='interp' (the default) is used with polyorder=2, the last three values are unchanged. Compare that to, for example, `mode='nearest'`: >>> savgol_filter(x, 5, 2, mode='nearest') array([1.74, 3.03, 3.54, 2.86, 0.66, 0.17, 1. , 4.6 , 7.97]) """ if mode not in ["mirror", "constant", "nearest", "interp", "wrap"]: raise ValueError("mode must be 'mirror', 'constant', 'nearest' " "'wrap' or 'interp'.") x = np.asarray(x) # Ensure that x is either single or double precision floating point. if x.dtype != np.float64 and x.dtype != np.float32: x = x.astype(np.float64) coeffs = savgol_coeffs(window_length, polyorder, deriv=deriv, delta=delta) if mode == "interp": if window_length > x.shape[axis]: raise ValueError("If mode is 'interp', window_length must be less " "than or equal to the size of x.") # Do not pad. Instead, for the elements within `window_length // 2` # of the ends of the sequence, use the polynomial that is fitted to # the last `window_length` elements. y = convolve1d(x, coeffs, axis=axis, mode="constant") _fit_edges_polyfit(x, window_length, polyorder, deriv, delta, axis, y) else: # Any mode other than 'interp' is passed on to ndimage.convolve1d. y = convolve1d(x, coeffs, axis=axis, mode=mode, cval=cval) return y
13,417
36.480447
79
py
scipy
scipy-main/scipy/signal/_czt.py
# This program is public domain # Authors: Paul Kienzle, Nadav Horesh """ Chirp z-transform. We provide two interfaces to the chirp z-transform: an object interface which precalculates part of the transform and can be applied efficiently to many different data sets, and a functional interface which is applied only to the given data set. Transforms ---------- CZT : callable (x, axis=-1) -> array Define a chirp z-transform that can be applied to different signals. ZoomFFT : callable (x, axis=-1) -> array Define a Fourier transform on a range of frequencies. Functions --------- czt : array Compute the chirp z-transform for a signal. zoom_fft : array Compute the Fourier transform on a range of frequencies. """ import cmath import numbers import numpy as np from numpy import pi, arange from scipy.fft import fft, ifft, next_fast_len __all__ = ['czt', 'zoom_fft', 'CZT', 'ZoomFFT', 'czt_points'] def _validate_sizes(n, m): if n < 1 or not isinstance(n, numbers.Integral): raise ValueError('Invalid number of CZT data ' f'points ({n}) specified. ' 'n must be positive and integer type.') if m is None: m = n elif m < 1 or not isinstance(m, numbers.Integral): raise ValueError('Invalid number of CZT output ' f'points ({m}) specified. ' 'm must be positive and integer type.') return m def czt_points(m, w=None, a=1+0j): """ Return the points at which the chirp z-transform is computed. Parameters ---------- m : int The number of points desired. w : complex, optional The ratio between points in each step. Defaults to equally spaced points around the entire unit circle. a : complex, optional The starting point in the complex plane. Default is 1+0j. Returns ------- out : ndarray The points in the Z plane at which `CZT` samples the z-transform, when called with arguments `m`, `w`, and `a`, as complex numbers. See Also -------- CZT : Class that creates a callable chirp z-transform function. czt : Convenience function for quickly calculating CZT. Examples -------- Plot the points of a 16-point FFT: >>> import numpy as np >>> from scipy.signal import czt_points >>> points = czt_points(16) >>> import matplotlib.pyplot as plt >>> plt.plot(points.real, points.imag, 'o') >>> plt.gca().add_patch(plt.Circle((0,0), radius=1, fill=False, alpha=.3)) >>> plt.axis('equal') >>> plt.show() and a 91-point logarithmic spiral that crosses the unit circle: >>> m, w, a = 91, 0.995*np.exp(-1j*np.pi*.05), 0.8*np.exp(1j*np.pi/6) >>> points = czt_points(m, w, a) >>> plt.plot(points.real, points.imag, 'o') >>> plt.gca().add_patch(plt.Circle((0,0), radius=1, fill=False, alpha=.3)) >>> plt.axis('equal') >>> plt.show() """ m = _validate_sizes(1, m) k = arange(m) a = 1.0 * a # at least float if w is None: # Nothing specified, default to FFT return a * np.exp(2j * pi * k / m) else: # w specified w = 1.0 * w # at least float return a * w**-k class CZT: """ Create a callable chirp z-transform function. Transform to compute the frequency response around a spiral. Objects of this class are callables which can compute the chirp z-transform on their inputs. This object precalculates the constant chirps used in the given transform. Parameters ---------- n : int The size of the signal. m : int, optional The number of output points desired. Default is `n`. w : complex, optional The ratio between points in each step. This must be precise or the accumulated error will degrade the tail of the output sequence. Defaults to equally spaced points around the entire unit circle. a : complex, optional The starting point in the complex plane. Default is 1+0j. Returns ------- f : CZT Callable object ``f(x, axis=-1)`` for computing the chirp z-transform on `x`. See Also -------- czt : Convenience function for quickly calculating CZT. ZoomFFT : Class that creates a callable partial FFT function. Notes ----- The defaults are chosen such that ``f(x)`` is equivalent to ``fft.fft(x)`` and, if ``m > len(x)``, that ``f(x, m)`` is equivalent to ``fft.fft(x, m)``. If `w` does not lie on the unit circle, then the transform will be around a spiral with exponentially-increasing radius. Regardless, angle will increase linearly. For transforms that do lie on the unit circle, accuracy is better when using `ZoomFFT`, since any numerical error in `w` is accumulated for long data lengths, drifting away from the unit circle. The chirp z-transform can be faster than an equivalent FFT with zero padding. Try it with your own array sizes to see. However, the chirp z-transform is considerably less precise than the equivalent zero-padded FFT. As this CZT is implemented using the Bluestein algorithm, it can compute large prime-length Fourier transforms in O(N log N) time, rather than the O(N**2) time required by the direct DFT calculation. (`scipy.fft` also uses Bluestein's algorithm'.) (The name "chirp z-transform" comes from the use of a chirp in the Bluestein algorithm. It does not decompose signals into chirps, like other transforms with "chirp" in the name.) References ---------- .. [1] Leo I. Bluestein, "A linear filtering approach to the computation of the discrete Fourier transform," Northeast Electronics Research and Engineering Meeting Record 10, 218-219 (1968). .. [2] Rabiner, Schafer, and Rader, "The chirp z-transform algorithm and its application," Bell Syst. Tech. J. 48, 1249-1292 (1969). Examples -------- Compute multiple prime-length FFTs: >>> from scipy.signal import CZT >>> import numpy as np >>> a = np.random.rand(7) >>> b = np.random.rand(7) >>> c = np.random.rand(7) >>> czt_7 = CZT(n=7) >>> A = czt_7(a) >>> B = czt_7(b) >>> C = czt_7(c) Display the points at which the FFT is calculated: >>> czt_7.points() array([ 1.00000000+0.j , 0.62348980+0.78183148j, -0.22252093+0.97492791j, -0.90096887+0.43388374j, -0.90096887-0.43388374j, -0.22252093-0.97492791j, 0.62348980-0.78183148j]) >>> import matplotlib.pyplot as plt >>> plt.plot(czt_7.points().real, czt_7.points().imag, 'o') >>> plt.gca().add_patch(plt.Circle((0,0), radius=1, fill=False, alpha=.3)) >>> plt.axis('equal') >>> plt.show() """ def __init__(self, n, m=None, w=None, a=1+0j): m = _validate_sizes(n, m) k = arange(max(m, n), dtype=np.min_scalar_type(-max(m, n)**2)) if w is None: # Nothing specified, default to FFT-like w = cmath.exp(-2j*pi/m) wk2 = np.exp(-(1j * pi * ((k**2) % (2*m))) / m) else: # w specified wk2 = w**(k**2/2.) a = 1.0 * a # at least float self.w, self.a = w, a self.m, self.n = m, n nfft = next_fast_len(n + m - 1) self._Awk2 = a**-k[:n] * wk2[:n] self._nfft = nfft self._Fwk2 = fft(1/np.hstack((wk2[n-1:0:-1], wk2[:m])), nfft) self._wk2 = wk2[:m] self._yidx = slice(n-1, n+m-1) def __call__(self, x, *, axis=-1): """ Calculate the chirp z-transform of a signal. Parameters ---------- x : array The signal to transform. axis : int, optional Axis over which to compute the FFT. If not given, the last axis is used. Returns ------- out : ndarray An array of the same dimensions as `x`, but with the length of the transformed axis set to `m`. """ x = np.asarray(x) if x.shape[axis] != self.n: raise ValueError(f"CZT defined for length {self.n}, not " f"{x.shape[axis]}") # Calculate transpose coordinates, to allow operation on any given axis trnsp = np.arange(x.ndim) trnsp[[axis, -1]] = [-1, axis] x = x.transpose(*trnsp) y = ifft(self._Fwk2 * fft(x*self._Awk2, self._nfft)) y = y[..., self._yidx] * self._wk2 return y.transpose(*trnsp) def points(self): """ Return the points at which the chirp z-transform is computed. """ return czt_points(self.m, self.w, self.a) class ZoomFFT(CZT): """ Create a callable zoom FFT transform function. This is a specialization of the chirp z-transform (`CZT`) for a set of equally-spaced frequencies around the unit circle, used to calculate a section of the FFT more efficiently than calculating the entire FFT and truncating. Parameters ---------- n : int The size of the signal. fn : array_like A length-2 sequence [`f1`, `f2`] giving the frequency range, or a scalar, for which the range [0, `fn`] is assumed. m : int, optional The number of points to evaluate. Default is `n`. fs : float, optional The sampling frequency. If ``fs=10`` represented 10 kHz, for example, then `f1` and `f2` would also be given in kHz. The default sampling frequency is 2, so `f1` and `f2` should be in the range [0, 1] to keep the transform below the Nyquist frequency. endpoint : bool, optional If True, `f2` is the last sample. Otherwise, it is not included. Default is False. Returns ------- f : ZoomFFT Callable object ``f(x, axis=-1)`` for computing the zoom FFT on `x`. See Also -------- zoom_fft : Convenience function for calculating a zoom FFT. Notes ----- The defaults are chosen such that ``f(x, 2)`` is equivalent to ``fft.fft(x)`` and, if ``m > len(x)``, that ``f(x, 2, m)`` is equivalent to ``fft.fft(x, m)``. Sampling frequency is 1/dt, the time step between samples in the signal `x`. The unit circle corresponds to frequencies from 0 up to the sampling frequency. The default sampling frequency of 2 means that `f1`, `f2` values up to the Nyquist frequency are in the range [0, 1). For `f1`, `f2` values expressed in radians, a sampling frequency of 2*pi should be used. Remember that a zoom FFT can only interpolate the points of the existing FFT. It cannot help to resolve two separate nearby frequencies. Frequency resolution can only be increased by increasing acquisition time. These functions are implemented using Bluestein's algorithm (as is `scipy.fft`). [2]_ References ---------- .. [1] Steve Alan Shilling, "A study of the chirp z-transform and its applications", pg 29 (1970) https://krex.k-state.edu/dspace/bitstream/handle/2097/7844/LD2668R41972S43.pdf .. [2] Leo I. Bluestein, "A linear filtering approach to the computation of the discrete Fourier transform," Northeast Electronics Research and Engineering Meeting Record 10, 218-219 (1968). Examples -------- To plot the transform results use something like the following: >>> import numpy as np >>> from scipy.signal import ZoomFFT >>> t = np.linspace(0, 1, 1021) >>> x = np.cos(2*np.pi*15*t) + np.sin(2*np.pi*17*t) >>> f1, f2 = 5, 27 >>> transform = ZoomFFT(len(x), [f1, f2], len(x), fs=1021) >>> X = transform(x) >>> f = np.linspace(f1, f2, len(x)) >>> import matplotlib.pyplot as plt >>> plt.plot(f, 20*np.log10(np.abs(X))) >>> plt.show() """ def __init__(self, n, fn, m=None, *, fs=2, endpoint=False): m = _validate_sizes(n, m) k = arange(max(m, n), dtype=np.min_scalar_type(-max(m, n)**2)) if np.size(fn) == 2: f1, f2 = fn elif np.size(fn) == 1: f1, f2 = 0.0, fn else: raise ValueError('fn must be a scalar or 2-length sequence') self.f1, self.f2, self.fs = f1, f2, fs if endpoint: scale = ((f2 - f1) * m) / (fs * (m - 1)) else: scale = (f2 - f1) / fs a = cmath.exp(2j * pi * f1/fs) wk2 = np.exp(-(1j * pi * scale * k**2) / m) self.w = cmath.exp(-2j*pi/m * scale) self.a = a self.m, self.n = m, n ak = np.exp(-2j * pi * f1/fs * k[:n]) self._Awk2 = ak * wk2[:n] nfft = next_fast_len(n + m - 1) self._nfft = nfft self._Fwk2 = fft(1/np.hstack((wk2[n-1:0:-1], wk2[:m])), nfft) self._wk2 = wk2[:m] self._yidx = slice(n-1, n+m-1) def czt(x, m=None, w=None, a=1+0j, *, axis=-1): """ Compute the frequency response around a spiral in the Z plane. Parameters ---------- x : array The signal to transform. m : int, optional The number of output points desired. Default is the length of the input data. w : complex, optional The ratio between points in each step. This must be precise or the accumulated error will degrade the tail of the output sequence. Defaults to equally spaced points around the entire unit circle. a : complex, optional The starting point in the complex plane. Default is 1+0j. axis : int, optional Axis over which to compute the FFT. If not given, the last axis is used. Returns ------- out : ndarray An array of the same dimensions as `x`, but with the length of the transformed axis set to `m`. See Also -------- CZT : Class that creates a callable chirp z-transform function. zoom_fft : Convenience function for partial FFT calculations. Notes ----- The defaults are chosen such that ``signal.czt(x)`` is equivalent to ``fft.fft(x)`` and, if ``m > len(x)``, that ``signal.czt(x, m)`` is equivalent to ``fft.fft(x, m)``. If the transform needs to be repeated, use `CZT` to construct a specialized transform function which can be reused without recomputing constants. An example application is in system identification, repeatedly evaluating small slices of the z-transform of a system, around where a pole is expected to exist, to refine the estimate of the pole's true location. [1]_ References ---------- .. [1] Steve Alan Shilling, "A study of the chirp z-transform and its applications", pg 20 (1970) https://krex.k-state.edu/dspace/bitstream/handle/2097/7844/LD2668R41972S43.pdf Examples -------- Generate a sinusoid: >>> import numpy as np >>> f1, f2, fs = 8, 10, 200 # Hz >>> t = np.linspace(0, 1, fs, endpoint=False) >>> x = np.sin(2*np.pi*t*f2) >>> import matplotlib.pyplot as plt >>> plt.plot(t, x) >>> plt.axis([0, 1, -1.1, 1.1]) >>> plt.show() Its discrete Fourier transform has all of its energy in a single frequency bin: >>> from scipy.fft import rfft, rfftfreq >>> from scipy.signal import czt, czt_points >>> plt.plot(rfftfreq(fs, 1/fs), abs(rfft(x))) >>> plt.margins(0, 0.1) >>> plt.show() However, if the sinusoid is logarithmically-decaying: >>> x = np.exp(-t*f1) * np.sin(2*np.pi*t*f2) >>> plt.plot(t, x) >>> plt.axis([0, 1, -1.1, 1.1]) >>> plt.show() the DFT will have spectral leakage: >>> plt.plot(rfftfreq(fs, 1/fs), abs(rfft(x))) >>> plt.margins(0, 0.1) >>> plt.show() While the DFT always samples the z-transform around the unit circle, the chirp z-transform allows us to sample the Z-transform along any logarithmic spiral, such as a circle with radius smaller than unity: >>> M = fs // 2 # Just positive frequencies, like rfft >>> a = np.exp(-f1/fs) # Starting point of the circle, radius < 1 >>> w = np.exp(-1j*np.pi/M) # "Step size" of circle >>> points = czt_points(M + 1, w, a) # M + 1 to include Nyquist >>> plt.plot(points.real, points.imag, '.') >>> plt.gca().add_patch(plt.Circle((0,0), radius=1, fill=False, alpha=.3)) >>> plt.axis('equal'); plt.axis([-1.05, 1.05, -0.05, 1.05]) >>> plt.show() With the correct radius, this transforms the decaying sinusoid (and others with the same decay rate) without spectral leakage: >>> z_vals = czt(x, M + 1, w, a) # Include Nyquist for comparison to rfft >>> freqs = np.angle(points)*fs/(2*np.pi) # angle = omega, radius = sigma >>> plt.plot(freqs, abs(z_vals)) >>> plt.margins(0, 0.1) >>> plt.show() """ x = np.asarray(x) transform = CZT(x.shape[axis], m=m, w=w, a=a) return transform(x, axis=axis) def zoom_fft(x, fn, m=None, *, fs=2, endpoint=False, axis=-1): """ Compute the DFT of `x` only for frequencies in range `fn`. Parameters ---------- x : array The signal to transform. fn : array_like A length-2 sequence [`f1`, `f2`] giving the frequency range, or a scalar, for which the range [0, `fn`] is assumed. m : int, optional The number of points to evaluate. The default is the length of `x`. fs : float, optional The sampling frequency. If ``fs=10`` represented 10 kHz, for example, then `f1` and `f2` would also be given in kHz. The default sampling frequency is 2, so `f1` and `f2` should be in the range [0, 1] to keep the transform below the Nyquist frequency. endpoint : bool, optional If True, `f2` is the last sample. Otherwise, it is not included. Default is False. axis : int, optional Axis over which to compute the FFT. If not given, the last axis is used. Returns ------- out : ndarray The transformed signal. The Fourier transform will be calculated at the points f1, f1+df, f1+2df, ..., f2, where df=(f2-f1)/m. See Also -------- ZoomFFT : Class that creates a callable partial FFT function. Notes ----- The defaults are chosen such that ``signal.zoom_fft(x, 2)`` is equivalent to ``fft.fft(x)`` and, if ``m > len(x)``, that ``signal.zoom_fft(x, 2, m)`` is equivalent to ``fft.fft(x, m)``. To graph the magnitude of the resulting transform, use:: plot(linspace(f1, f2, m, endpoint=False), abs(zoom_fft(x, [f1, f2], m))) If the transform needs to be repeated, use `ZoomFFT` to construct a specialized transform function which can be reused without recomputing constants. Examples -------- To plot the transform results use something like the following: >>> import numpy as np >>> from scipy.signal import zoom_fft >>> t = np.linspace(0, 1, 1021) >>> x = np.cos(2*np.pi*15*t) + np.sin(2*np.pi*17*t) >>> f1, f2 = 5, 27 >>> X = zoom_fft(x, [f1, f2], len(x), fs=1021) >>> f = np.linspace(f1, f2, len(x)) >>> import matplotlib.pyplot as plt >>> plt.plot(f, 20*np.log10(np.abs(X))) >>> plt.show() """ x = np.asarray(x) transform = ZoomFFT(x.shape[axis], fn, m=m, fs=fs, endpoint=endpoint) return transform(x, axis=axis)
19,445
32.760417
89
py
scipy
scipy-main/scipy/signal/tests/test_array_tools.py
import numpy as np from numpy.testing import assert_array_equal from pytest import raises as assert_raises from scipy.signal._arraytools import (axis_slice, axis_reverse, odd_ext, even_ext, const_ext, zero_ext) class TestArrayTools: def test_axis_slice(self): a = np.arange(12).reshape(3, 4) s = axis_slice(a, start=0, stop=1, axis=0) assert_array_equal(s, a[0:1, :]) s = axis_slice(a, start=-1, axis=0) assert_array_equal(s, a[-1:, :]) s = axis_slice(a, start=0, stop=1, axis=1) assert_array_equal(s, a[:, 0:1]) s = axis_slice(a, start=-1, axis=1) assert_array_equal(s, a[:, -1:]) s = axis_slice(a, start=0, step=2, axis=0) assert_array_equal(s, a[::2, :]) s = axis_slice(a, start=0, step=2, axis=1) assert_array_equal(s, a[:, ::2]) def test_axis_reverse(self): a = np.arange(12).reshape(3, 4) r = axis_reverse(a, axis=0) assert_array_equal(r, a[::-1, :]) r = axis_reverse(a, axis=1) assert_array_equal(r, a[:, ::-1]) def test_odd_ext(self): a = np.array([[1, 2, 3, 4, 5], [9, 8, 7, 6, 5]]) odd = odd_ext(a, 2, axis=1) expected = np.array([[-1, 0, 1, 2, 3, 4, 5, 6, 7], [11, 10, 9, 8, 7, 6, 5, 4, 3]]) assert_array_equal(odd, expected) odd = odd_ext(a, 1, axis=0) expected = np.array([[-7, -4, -1, 2, 5], [1, 2, 3, 4, 5], [9, 8, 7, 6, 5], [17, 14, 11, 8, 5]]) assert_array_equal(odd, expected) assert_raises(ValueError, odd_ext, a, 2, axis=0) assert_raises(ValueError, odd_ext, a, 5, axis=1) def test_even_ext(self): a = np.array([[1, 2, 3, 4, 5], [9, 8, 7, 6, 5]]) even = even_ext(a, 2, axis=1) expected = np.array([[3, 2, 1, 2, 3, 4, 5, 4, 3], [7, 8, 9, 8, 7, 6, 5, 6, 7]]) assert_array_equal(even, expected) even = even_ext(a, 1, axis=0) expected = np.array([[9, 8, 7, 6, 5], [1, 2, 3, 4, 5], [9, 8, 7, 6, 5], [1, 2, 3, 4, 5]]) assert_array_equal(even, expected) assert_raises(ValueError, even_ext, a, 2, axis=0) assert_raises(ValueError, even_ext, a, 5, axis=1) def test_const_ext(self): a = np.array([[1, 2, 3, 4, 5], [9, 8, 7, 6, 5]]) const = const_ext(a, 2, axis=1) expected = np.array([[1, 1, 1, 2, 3, 4, 5, 5, 5], [9, 9, 9, 8, 7, 6, 5, 5, 5]]) assert_array_equal(const, expected) const = const_ext(a, 1, axis=0) expected = np.array([[1, 2, 3, 4, 5], [1, 2, 3, 4, 5], [9, 8, 7, 6, 5], [9, 8, 7, 6, 5]]) assert_array_equal(const, expected) def test_zero_ext(self): a = np.array([[1, 2, 3, 4, 5], [9, 8, 7, 6, 5]]) zero = zero_ext(a, 2, axis=1) expected = np.array([[0, 0, 1, 2, 3, 4, 5, 0, 0], [0, 0, 9, 8, 7, 6, 5, 0, 0]]) assert_array_equal(zero, expected) zero = zero_ext(a, 1, axis=0) expected = np.array([[0, 0, 0, 0, 0], [1, 2, 3, 4, 5], [9, 8, 7, 6, 5], [0, 0, 0, 0, 0]]) assert_array_equal(zero, expected)
3,632
31.4375
63
py
scipy
scipy-main/scipy/signal/tests/test_dltisys.py
# Author: Jeffrey Armstrong <jeff@approximatrix.com> # April 4, 2011 import numpy as np from numpy.testing import (assert_equal, assert_array_almost_equal, assert_array_equal, assert_allclose, assert_, assert_almost_equal, suppress_warnings) from pytest import raises as assert_raises from scipy.signal import (dlsim, dstep, dimpulse, tf2zpk, lti, dlti, StateSpace, TransferFunction, ZerosPolesGain, dfreqresp, dbode, BadCoefficients) class TestDLTI: def test_dlsim(self): a = np.asarray([[0.9, 0.1], [-0.2, 0.9]]) b = np.asarray([[0.4, 0.1, -0.1], [0.0, 0.05, 0.0]]) c = np.asarray([[0.1, 0.3]]) d = np.asarray([[0.0, -0.1, 0.0]]) dt = 0.5 # Create an input matrix with inputs down the columns (3 cols) and its # respective time input vector u = np.hstack((np.linspace(0, 4.0, num=5)[:, np.newaxis], np.full((5, 1), 0.01), np.full((5, 1), -0.002))) t_in = np.linspace(0, 2.0, num=5) # Define the known result yout_truth = np.array([[-0.001, -0.00073, 0.039446, 0.0915387, 0.13195948]]).T xout_truth = np.asarray([[0, 0], [0.0012, 0.0005], [0.40233, 0.00071], [1.163368, -0.079327], [2.2402985, -0.3035679]]) tout, yout, xout = dlsim((a, b, c, d, dt), u, t_in) assert_array_almost_equal(yout_truth, yout) assert_array_almost_equal(xout_truth, xout) assert_array_almost_equal(t_in, tout) # Make sure input with single-dimension doesn't raise error dlsim((1, 2, 3), 4) # Interpolated control - inputs should have different time steps # than the discrete model uses internally u_sparse = u[[0, 4], :] t_sparse = np.asarray([0.0, 2.0]) tout, yout, xout = dlsim((a, b, c, d, dt), u_sparse, t_sparse) assert_array_almost_equal(yout_truth, yout) assert_array_almost_equal(xout_truth, xout) assert_equal(len(tout), yout.shape[0]) # Transfer functions (assume dt = 0.5) num = np.asarray([1.0, -0.1]) den = np.asarray([0.3, 1.0, 0.2]) yout_truth = np.array([[0.0, 0.0, 3.33333333333333, -4.77777777777778, 23.0370370370370]]).T # Assume use of the first column of the control input built earlier tout, yout = dlsim((num, den, 0.5), u[:, 0], t_in) assert_array_almost_equal(yout, yout_truth) assert_array_almost_equal(t_in, tout) # Retest the same with a 1-D input vector uflat = np.asarray(u[:, 0]) uflat = uflat.reshape((5,)) tout, yout = dlsim((num, den, 0.5), uflat, t_in) assert_array_almost_equal(yout, yout_truth) assert_array_almost_equal(t_in, tout) # zeros-poles-gain representation zd = np.array([0.5, -0.5]) pd = np.array([1.j / np.sqrt(2), -1.j / np.sqrt(2)]) k = 1.0 yout_truth = np.array([[0.0, 1.0, 2.0, 2.25, 2.5]]).T tout, yout = dlsim((zd, pd, k, 0.5), u[:, 0], t_in) assert_array_almost_equal(yout, yout_truth) assert_array_almost_equal(t_in, tout) # Raise an error for continuous-time systems system = lti([1], [1, 1]) assert_raises(AttributeError, dlsim, system, u) def test_dstep(self): a = np.asarray([[0.9, 0.1], [-0.2, 0.9]]) b = np.asarray([[0.4, 0.1, -0.1], [0.0, 0.05, 0.0]]) c = np.asarray([[0.1, 0.3]]) d = np.asarray([[0.0, -0.1, 0.0]]) dt = 0.5 # Because b.shape[1] == 3, dstep should result in a tuple of three # result vectors yout_step_truth = (np.asarray([0.0, 0.04, 0.052, 0.0404, 0.00956, -0.036324, -0.093318, -0.15782348, -0.226628324, -0.2969374948]), np.asarray([-0.1, -0.075, -0.058, -0.04815, -0.04453, -0.0461895, -0.0521812, -0.061588875, -0.073549579, -0.08727047595]), np.asarray([0.0, -0.01, -0.013, -0.0101, -0.00239, 0.009081, 0.0233295, 0.03945587, 0.056657081, 0.0742343737])) tout, yout = dstep((a, b, c, d, dt), n=10) assert_equal(len(yout), 3) for i in range(0, len(yout)): assert_equal(yout[i].shape[0], 10) assert_array_almost_equal(yout[i].flatten(), yout_step_truth[i]) # Check that the other two inputs (tf, zpk) will work as well tfin = ([1.0], [1.0, 1.0], 0.5) yout_tfstep = np.asarray([0.0, 1.0, 0.0]) tout, yout = dstep(tfin, n=3) assert_equal(len(yout), 1) assert_array_almost_equal(yout[0].flatten(), yout_tfstep) zpkin = tf2zpk(tfin[0], tfin[1]) + (0.5,) tout, yout = dstep(zpkin, n=3) assert_equal(len(yout), 1) assert_array_almost_equal(yout[0].flatten(), yout_tfstep) # Raise an error for continuous-time systems system = lti([1], [1, 1]) assert_raises(AttributeError, dstep, system) def test_dimpulse(self): a = np.asarray([[0.9, 0.1], [-0.2, 0.9]]) b = np.asarray([[0.4, 0.1, -0.1], [0.0, 0.05, 0.0]]) c = np.asarray([[0.1, 0.3]]) d = np.asarray([[0.0, -0.1, 0.0]]) dt = 0.5 # Because b.shape[1] == 3, dimpulse should result in a tuple of three # result vectors yout_imp_truth = (np.asarray([0.0, 0.04, 0.012, -0.0116, -0.03084, -0.045884, -0.056994, -0.06450548, -0.068804844, -0.0703091708]), np.asarray([-0.1, 0.025, 0.017, 0.00985, 0.00362, -0.0016595, -0.0059917, -0.009407675, -0.011960704, -0.01372089695]), np.asarray([0.0, -0.01, -0.003, 0.0029, 0.00771, 0.011471, 0.0142485, 0.01612637, 0.017201211, 0.0175772927])) tout, yout = dimpulse((a, b, c, d, dt), n=10) assert_equal(len(yout), 3) for i in range(0, len(yout)): assert_equal(yout[i].shape[0], 10) assert_array_almost_equal(yout[i].flatten(), yout_imp_truth[i]) # Check that the other two inputs (tf, zpk) will work as well tfin = ([1.0], [1.0, 1.0], 0.5) yout_tfimpulse = np.asarray([0.0, 1.0, -1.0]) tout, yout = dimpulse(tfin, n=3) assert_equal(len(yout), 1) assert_array_almost_equal(yout[0].flatten(), yout_tfimpulse) zpkin = tf2zpk(tfin[0], tfin[1]) + (0.5,) tout, yout = dimpulse(zpkin, n=3) assert_equal(len(yout), 1) assert_array_almost_equal(yout[0].flatten(), yout_tfimpulse) # Raise an error for continuous-time systems system = lti([1], [1, 1]) assert_raises(AttributeError, dimpulse, system) def test_dlsim_trivial(self): a = np.array([[0.0]]) b = np.array([[0.0]]) c = np.array([[0.0]]) d = np.array([[0.0]]) n = 5 u = np.zeros(n).reshape(-1, 1) tout, yout, xout = dlsim((a, b, c, d, 1), u) assert_array_equal(tout, np.arange(float(n))) assert_array_equal(yout, np.zeros((n, 1))) assert_array_equal(xout, np.zeros((n, 1))) def test_dlsim_simple1d(self): a = np.array([[0.5]]) b = np.array([[0.0]]) c = np.array([[1.0]]) d = np.array([[0.0]]) n = 5 u = np.zeros(n).reshape(-1, 1) tout, yout, xout = dlsim((a, b, c, d, 1), u, x0=1) assert_array_equal(tout, np.arange(float(n))) expected = (0.5 ** np.arange(float(n))).reshape(-1, 1) assert_array_equal(yout, expected) assert_array_equal(xout, expected) def test_dlsim_simple2d(self): lambda1 = 0.5 lambda2 = 0.25 a = np.array([[lambda1, 0.0], [0.0, lambda2]]) b = np.array([[0.0], [0.0]]) c = np.array([[1.0, 0.0], [0.0, 1.0]]) d = np.array([[0.0], [0.0]]) n = 5 u = np.zeros(n).reshape(-1, 1) tout, yout, xout = dlsim((a, b, c, d, 1), u, x0=1) assert_array_equal(tout, np.arange(float(n))) # The analytical solution: expected = (np.array([lambda1, lambda2]) ** np.arange(float(n)).reshape(-1, 1)) assert_array_equal(yout, expected) assert_array_equal(xout, expected) def test_more_step_and_impulse(self): lambda1 = 0.5 lambda2 = 0.75 a = np.array([[lambda1, 0.0], [0.0, lambda2]]) b = np.array([[1.0, 0.0], [0.0, 1.0]]) c = np.array([[1.0, 1.0]]) d = np.array([[0.0, 0.0]]) n = 10 # Check a step response. ts, ys = dstep((a, b, c, d, 1), n=n) # Create the exact step response. stp0 = (1.0 / (1 - lambda1)) * (1.0 - lambda1 ** np.arange(n)) stp1 = (1.0 / (1 - lambda2)) * (1.0 - lambda2 ** np.arange(n)) assert_allclose(ys[0][:, 0], stp0) assert_allclose(ys[1][:, 0], stp1) # Check an impulse response with an initial condition. x0 = np.array([1.0, 1.0]) ti, yi = dimpulse((a, b, c, d, 1), n=n, x0=x0) # Create the exact impulse response. imp = (np.array([lambda1, lambda2]) ** np.arange(-1, n + 1).reshape(-1, 1)) imp[0, :] = 0.0 # Analytical solution to impulse response y0 = imp[:n, 0] + np.dot(imp[1:n + 1, :], x0) y1 = imp[:n, 1] + np.dot(imp[1:n + 1, :], x0) assert_allclose(yi[0][:, 0], y0) assert_allclose(yi[1][:, 0], y1) # Check that dt=0.1, n=3 gives 3 time values. system = ([1.0], [1.0, -0.5], 0.1) t, (y,) = dstep(system, n=3) assert_allclose(t, [0, 0.1, 0.2]) assert_array_equal(y.T, [[0, 1.0, 1.5]]) t, (y,) = dimpulse(system, n=3) assert_allclose(t, [0, 0.1, 0.2]) assert_array_equal(y.T, [[0, 1, 0.5]]) class TestDlti: def test_dlti_instantiation(self): # Test that lti can be instantiated. dt = 0.05 # TransferFunction s = dlti([1], [-1], dt=dt) assert_(isinstance(s, TransferFunction)) assert_(isinstance(s, dlti)) assert_(not isinstance(s, lti)) assert_equal(s.dt, dt) # ZerosPolesGain s = dlti(np.array([]), np.array([-1]), 1, dt=dt) assert_(isinstance(s, ZerosPolesGain)) assert_(isinstance(s, dlti)) assert_(not isinstance(s, lti)) assert_equal(s.dt, dt) # StateSpace s = dlti([1], [-1], 1, 3, dt=dt) assert_(isinstance(s, StateSpace)) assert_(isinstance(s, dlti)) assert_(not isinstance(s, lti)) assert_equal(s.dt, dt) # Number of inputs assert_raises(ValueError, dlti, 1) assert_raises(ValueError, dlti, 1, 1, 1, 1, 1) class TestStateSpaceDisc: def test_initialization(self): # Check that all initializations work dt = 0.05 StateSpace(1, 1, 1, 1, dt=dt) StateSpace([1], [2], [3], [4], dt=dt) StateSpace(np.array([[1, 2], [3, 4]]), np.array([[1], [2]]), np.array([[1, 0]]), np.array([[0]]), dt=dt) StateSpace(1, 1, 1, 1, dt=True) def test_conversion(self): # Check the conversion functions s = StateSpace(1, 2, 3, 4, dt=0.05) assert_(isinstance(s.to_ss(), StateSpace)) assert_(isinstance(s.to_tf(), TransferFunction)) assert_(isinstance(s.to_zpk(), ZerosPolesGain)) # Make sure copies work assert_(StateSpace(s) is not s) assert_(s.to_ss() is not s) def test_properties(self): # Test setters/getters for cross class properties. # This implicitly tests to_tf() and to_zpk() # Getters s = StateSpace(1, 1, 1, 1, dt=0.05) assert_equal(s.poles, [1]) assert_equal(s.zeros, [0]) class TestTransferFunction: def test_initialization(self): # Check that all initializations work dt = 0.05 TransferFunction(1, 1, dt=dt) TransferFunction([1], [2], dt=dt) TransferFunction(np.array([1]), np.array([2]), dt=dt) TransferFunction(1, 1, dt=True) def test_conversion(self): # Check the conversion functions s = TransferFunction([1, 0], [1, -1], dt=0.05) assert_(isinstance(s.to_ss(), StateSpace)) assert_(isinstance(s.to_tf(), TransferFunction)) assert_(isinstance(s.to_zpk(), ZerosPolesGain)) # Make sure copies work assert_(TransferFunction(s) is not s) assert_(s.to_tf() is not s) def test_properties(self): # Test setters/getters for cross class properties. # This implicitly tests to_ss() and to_zpk() # Getters s = TransferFunction([1, 0], [1, -1], dt=0.05) assert_equal(s.poles, [1]) assert_equal(s.zeros, [0]) class TestZerosPolesGain: def test_initialization(self): # Check that all initializations work dt = 0.05 ZerosPolesGain(1, 1, 1, dt=dt) ZerosPolesGain([1], [2], 1, dt=dt) ZerosPolesGain(np.array([1]), np.array([2]), 1, dt=dt) ZerosPolesGain(1, 1, 1, dt=True) def test_conversion(self): # Check the conversion functions s = ZerosPolesGain(1, 2, 3, dt=0.05) assert_(isinstance(s.to_ss(), StateSpace)) assert_(isinstance(s.to_tf(), TransferFunction)) assert_(isinstance(s.to_zpk(), ZerosPolesGain)) # Make sure copies work assert_(ZerosPolesGain(s) is not s) assert_(s.to_zpk() is not s) class Test_dfreqresp: def test_manual(self): # Test dfreqresp() real part calculation (manual sanity check). # 1st order low-pass filter: H(z) = 1 / (z - 0.2), system = TransferFunction(1, [1, -0.2], dt=0.1) w = [0.1, 1, 10] w, H = dfreqresp(system, w=w) # test real expected_re = [1.2383, 0.4130, -0.7553] assert_almost_equal(H.real, expected_re, decimal=4) # test imag expected_im = [-0.1555, -1.0214, 0.3955] assert_almost_equal(H.imag, expected_im, decimal=4) def test_auto(self): # Test dfreqresp() real part calculation. # 1st order low-pass filter: H(z) = 1 / (z - 0.2), system = TransferFunction(1, [1, -0.2], dt=0.1) w = [0.1, 1, 10, 100] w, H = dfreqresp(system, w=w) jw = np.exp(w * 1j) y = np.polyval(system.num, jw) / np.polyval(system.den, jw) # test real expected_re = y.real assert_almost_equal(H.real, expected_re) # test imag expected_im = y.imag assert_almost_equal(H.imag, expected_im) def test_freq_range(self): # Test that freqresp() finds a reasonable frequency range. # 1st order low-pass filter: H(z) = 1 / (z - 0.2), # Expected range is from 0.01 to 10. system = TransferFunction(1, [1, -0.2], dt=0.1) n = 10 expected_w = np.linspace(0, np.pi, 10, endpoint=False) w, H = dfreqresp(system, n=n) assert_almost_equal(w, expected_w) def test_pole_one(self): # Test that freqresp() doesn't fail on a system with a pole at 0. # integrator, pole at zero: H(s) = 1 / s system = TransferFunction([1], [1, -1], dt=0.1) with suppress_warnings() as sup: sup.filter(RuntimeWarning, message="divide by zero") sup.filter(RuntimeWarning, message="invalid value encountered") w, H = dfreqresp(system, n=2) assert_equal(w[0], 0.) # a fail would give not-a-number def test_error(self): # Raise an error for continuous-time systems system = lti([1], [1, 1]) assert_raises(AttributeError, dfreqresp, system) def test_from_state_space(self): # H(z) = 2 / z^3 - 0.5 * z^2 system_TF = dlti([2], [1, -0.5, 0, 0]) A = np.array([[0.5, 0, 0], [1, 0, 0], [0, 1, 0]]) B = np.array([[1, 0, 0]]).T C = np.array([[0, 0, 2]]) D = 0 system_SS = dlti(A, B, C, D) w = 10.0**np.arange(-3,0,.5) with suppress_warnings() as sup: sup.filter(BadCoefficients) w1, H1 = dfreqresp(system_TF, w=w) w2, H2 = dfreqresp(system_SS, w=w) assert_almost_equal(H1, H2) def test_from_zpk(self): # 1st order low-pass filter: H(s) = 0.3 / (z - 0.2), system_ZPK = dlti([],[0.2],0.3) system_TF = dlti(0.3, [1, -0.2]) w = [0.1, 1, 10, 100] w1, H1 = dfreqresp(system_ZPK, w=w) w2, H2 = dfreqresp(system_TF, w=w) assert_almost_equal(H1, H2) class Test_bode: def test_manual(self): # Test bode() magnitude calculation (manual sanity check). # 1st order low-pass filter: H(s) = 0.3 / (z - 0.2), dt = 0.1 system = TransferFunction(0.3, [1, -0.2], dt=dt) w = [0.1, 0.5, 1, np.pi] w2, mag, phase = dbode(system, w=w) # Test mag expected_mag = [-8.5329, -8.8396, -9.6162, -12.0412] assert_almost_equal(mag, expected_mag, decimal=4) # Test phase expected_phase = [-7.1575, -35.2814, -67.9809, -180.0000] assert_almost_equal(phase, expected_phase, decimal=4) # Test frequency assert_equal(np.array(w) / dt, w2) def test_auto(self): # Test bode() magnitude calculation. # 1st order low-pass filter: H(s) = 0.3 / (z - 0.2), system = TransferFunction(0.3, [1, -0.2], dt=0.1) w = np.array([0.1, 0.5, 1, np.pi]) w2, mag, phase = dbode(system, w=w) jw = np.exp(w * 1j) y = np.polyval(system.num, jw) / np.polyval(system.den, jw) # Test mag expected_mag = 20.0 * np.log10(abs(y)) assert_almost_equal(mag, expected_mag) # Test phase expected_phase = np.rad2deg(np.angle(y)) assert_almost_equal(phase, expected_phase) def test_range(self): # Test that bode() finds a reasonable frequency range. # 1st order low-pass filter: H(s) = 0.3 / (z - 0.2), dt = 0.1 system = TransferFunction(0.3, [1, -0.2], dt=0.1) n = 10 # Expected range is from 0.01 to 10. expected_w = np.linspace(0, np.pi, n, endpoint=False) / dt w, mag, phase = dbode(system, n=n) assert_almost_equal(w, expected_w) def test_pole_one(self): # Test that freqresp() doesn't fail on a system with a pole at 0. # integrator, pole at zero: H(s) = 1 / s system = TransferFunction([1], [1, -1], dt=0.1) with suppress_warnings() as sup: sup.filter(RuntimeWarning, message="divide by zero") sup.filter(RuntimeWarning, message="invalid value encountered") w, mag, phase = dbode(system, n=2) assert_equal(w[0], 0.) # a fail would give not-a-number def test_imaginary(self): # bode() should not fail on a system with pure imaginary poles. # The test passes if bode doesn't raise an exception. system = TransferFunction([1], [1, 0, 100], dt=0.1) dbode(system, n=2) def test_error(self): # Raise an error for continuous-time systems system = lti([1], [1, 1]) assert_raises(AttributeError, dbode, system) class TestTransferFunctionZConversion: """Test private conversions between 'z' and 'z**-1' polynomials.""" def test_full(self): # Numerator and denominator same order num = [2, 3, 4] den = [5, 6, 7] num2, den2 = TransferFunction._z_to_zinv(num, den) assert_equal(num, num2) assert_equal(den, den2) num2, den2 = TransferFunction._zinv_to_z(num, den) assert_equal(num, num2) assert_equal(den, den2) def test_numerator(self): # Numerator lower order than denominator num = [2, 3] den = [5, 6, 7] num2, den2 = TransferFunction._z_to_zinv(num, den) assert_equal([0, 2, 3], num2) assert_equal(den, den2) num2, den2 = TransferFunction._zinv_to_z(num, den) assert_equal([2, 3, 0], num2) assert_equal(den, den2) def test_denominator(self): # Numerator higher order than denominator num = [2, 3, 4] den = [5, 6] num2, den2 = TransferFunction._z_to_zinv(num, den) assert_equal(num, num2) assert_equal([0, 5, 6], den2) num2, den2 = TransferFunction._zinv_to_z(num, den) assert_equal(num, num2) assert_equal([5, 6, 0], den2)
21,558
34.991653
78
py
scipy
scipy-main/scipy/signal/tests/test_signaltools.py
import sys from concurrent.futures import ThreadPoolExecutor, as_completed from decimal import Decimal from itertools import product from math import gcd import pytest from pytest import raises as assert_raises from numpy.testing import ( assert_equal, assert_almost_equal, assert_array_equal, assert_array_almost_equal, assert_allclose, assert_, assert_array_less, suppress_warnings) from numpy import array, arange import numpy as np from scipy.fft import fft from scipy.ndimage import correlate1d from scipy.optimize import fmin, linear_sum_assignment from scipy import signal from scipy.signal import ( correlate, correlate2d, correlation_lags, convolve, convolve2d, fftconvolve, oaconvolve, choose_conv_method, hilbert, hilbert2, lfilter, lfilter_zi, filtfilt, butter, zpk2tf, zpk2sos, invres, invresz, vectorstrength, lfiltic, tf2sos, sosfilt, sosfiltfilt, sosfilt_zi, tf2zpk, BadCoefficients, detrend, unique_roots, residue, residuez) from scipy.signal.windows import hann from scipy.signal._signaltools import (_filtfilt_gust, _compute_factors, _group_poles) from scipy.signal._upfirdn import _upfirdn_modes from scipy._lib import _testutils class _TestConvolve: def test_basic(self): a = [3, 4, 5, 6, 5, 4] b = [1, 2, 3] c = convolve(a, b) assert_array_equal(c, array([3, 10, 22, 28, 32, 32, 23, 12])) def test_same(self): a = [3, 4, 5] b = [1, 2, 3, 4] c = convolve(a, b, mode="same") assert_array_equal(c, array([10, 22, 34])) def test_same_eq(self): a = [3, 4, 5] b = [1, 2, 3] c = convolve(a, b, mode="same") assert_array_equal(c, array([10, 22, 22])) def test_complex(self): x = array([1 + 1j, 2 + 1j, 3 + 1j]) y = array([1 + 1j, 2 + 1j]) z = convolve(x, y) assert_array_equal(z, array([2j, 2 + 6j, 5 + 8j, 5 + 5j])) def test_zero_rank(self): a = 1289 b = 4567 c = convolve(a, b) assert_equal(c, a * b) def test_broadcastable(self): a = np.arange(27).reshape(3, 3, 3) b = np.arange(3) for i in range(3): b_shape = [1]*3 b_shape[i] = 3 x = convolve(a, b.reshape(b_shape), method='direct') y = convolve(a, b.reshape(b_shape), method='fft') assert_allclose(x, y) def test_single_element(self): a = array([4967]) b = array([3920]) c = convolve(a, b) assert_equal(c, a * b) def test_2d_arrays(self): a = [[1, 2, 3], [3, 4, 5]] b = [[2, 3, 4], [4, 5, 6]] c = convolve(a, b) d = array([[2, 7, 16, 17, 12], [10, 30, 62, 58, 38], [12, 31, 58, 49, 30]]) assert_array_equal(c, d) def test_input_swapping(self): small = arange(8).reshape(2, 2, 2) big = 1j * arange(27).reshape(3, 3, 3) big += arange(27)[::-1].reshape(3, 3, 3) out_array = array( [[[0 + 0j, 26 + 0j, 25 + 1j, 24 + 2j], [52 + 0j, 151 + 5j, 145 + 11j, 93 + 11j], [46 + 6j, 133 + 23j, 127 + 29j, 81 + 23j], [40 + 12j, 98 + 32j, 93 + 37j, 54 + 24j]], [[104 + 0j, 247 + 13j, 237 + 23j, 135 + 21j], [282 + 30j, 632 + 96j, 604 + 124j, 330 + 86j], [246 + 66j, 548 + 180j, 520 + 208j, 282 + 134j], [142 + 66j, 307 + 161j, 289 + 179j, 153 + 107j]], [[68 + 36j, 157 + 103j, 147 + 113j, 81 + 75j], [174 + 138j, 380 + 348j, 352 + 376j, 186 + 230j], [138 + 174j, 296 + 432j, 268 + 460j, 138 + 278j], [70 + 138j, 145 + 323j, 127 + 341j, 63 + 197j]], [[32 + 72j, 68 + 166j, 59 + 175j, 30 + 100j], [68 + 192j, 139 + 433j, 117 + 455j, 57 + 255j], [38 + 222j, 73 + 499j, 51 + 521j, 21 + 291j], [12 + 144j, 20 + 318j, 7 + 331j, 0 + 182j]]]) assert_array_equal(convolve(small, big, 'full'), out_array) assert_array_equal(convolve(big, small, 'full'), out_array) assert_array_equal(convolve(small, big, 'same'), out_array[1:3, 1:3, 1:3]) assert_array_equal(convolve(big, small, 'same'), out_array[0:3, 0:3, 0:3]) assert_array_equal(convolve(small, big, 'valid'), out_array[1:3, 1:3, 1:3]) assert_array_equal(convolve(big, small, 'valid'), out_array[1:3, 1:3, 1:3]) def test_invalid_params(self): a = [3, 4, 5] b = [1, 2, 3] assert_raises(ValueError, convolve, a, b, mode='spam') assert_raises(ValueError, convolve, a, b, mode='eggs', method='fft') assert_raises(ValueError, convolve, a, b, mode='ham', method='direct') assert_raises(ValueError, convolve, a, b, mode='full', method='bacon') assert_raises(ValueError, convolve, a, b, mode='same', method='bacon') class TestConvolve(_TestConvolve): def test_valid_mode2(self): # See gh-5897 a = [1, 2, 3, 6, 5, 3] b = [2, 3, 4, 5, 3, 4, 2, 2, 1] expected = [70, 78, 73, 65] out = convolve(a, b, 'valid') assert_array_equal(out, expected) out = convolve(b, a, 'valid') assert_array_equal(out, expected) a = [1 + 5j, 2 - 1j, 3 + 0j] b = [2 - 3j, 1 + 0j] expected = [2 - 3j, 8 - 10j] out = convolve(a, b, 'valid') assert_array_equal(out, expected) out = convolve(b, a, 'valid') assert_array_equal(out, expected) def test_same_mode(self): a = [1, 2, 3, 3, 1, 2] b = [1, 4, 3, 4, 5, 6, 7, 4, 3, 2, 1, 1, 3] c = convolve(a, b, 'same') d = array([57, 61, 63, 57, 45, 36]) assert_array_equal(c, d) def test_invalid_shapes(self): # By "invalid," we mean that no one # array has dimensions that are all at # least as large as the corresponding # dimensions of the other array. This # setup should throw a ValueError. a = np.arange(1, 7).reshape((2, 3)) b = np.arange(-6, 0).reshape((3, 2)) assert_raises(ValueError, convolve, *(a, b), **{'mode': 'valid'}) assert_raises(ValueError, convolve, *(b, a), **{'mode': 'valid'}) def test_convolve_method(self, n=100): # this types data structure was manually encoded instead of # using custom filters on the soon-to-be-removed np.sctypes types = {'uint16', 'uint64', 'int64', 'int32', 'complex128', 'float64', 'float16', 'complex64', 'float32', 'int16', 'uint8', 'uint32', 'int8', 'bool'} args = [(t1, t2, mode) for t1 in types for t2 in types for mode in ['valid', 'full', 'same']] # These are random arrays, which means test is much stronger than # convolving testing by convolving two np.ones arrays np.random.seed(42) array_types = {'i': np.random.choice([0, 1], size=n), 'f': np.random.randn(n)} array_types['b'] = array_types['u'] = array_types['i'] array_types['c'] = array_types['f'] + 0.5j*array_types['f'] for t1, t2, mode in args: x1 = array_types[np.dtype(t1).kind].astype(t1) x2 = array_types[np.dtype(t2).kind].astype(t2) results = {key: convolve(x1, x2, method=key, mode=mode) for key in ['fft', 'direct']} assert_equal(results['fft'].dtype, results['direct'].dtype) if 'bool' in t1 and 'bool' in t2: assert_equal(choose_conv_method(x1, x2), 'direct') continue # Found by experiment. Found approx smallest value for (rtol, atol) # threshold to have tests pass. if any([t in {'complex64', 'float32'} for t in [t1, t2]]): kwargs = {'rtol': 1.0e-4, 'atol': 1e-6} elif 'float16' in [t1, t2]: # atol is default for np.allclose kwargs = {'rtol': 1e-3, 'atol': 1e-3} else: # defaults for np.allclose (different from assert_allclose) kwargs = {'rtol': 1e-5, 'atol': 1e-8} assert_allclose(results['fft'], results['direct'], **kwargs) def test_convolve_method_large_input(self): # This is really a test that convolving two large integers goes to the # direct method even if they're in the fft method. for n in [10, 20, 50, 51, 52, 53, 54, 60, 62]: z = np.array([2**n], dtype=np.int64) fft = convolve(z, z, method='fft') direct = convolve(z, z, method='direct') # this is the case when integer precision gets to us # issue #6076 has more detail, hopefully more tests after resolved if n < 50: assert_equal(fft, direct) assert_equal(fft, 2**(2*n)) assert_equal(direct, 2**(2*n)) def test_mismatched_dims(self): # Input arrays should have the same number of dimensions assert_raises(ValueError, convolve, [1], 2, method='direct') assert_raises(ValueError, convolve, 1, [2], method='direct') assert_raises(ValueError, convolve, [1], 2, method='fft') assert_raises(ValueError, convolve, 1, [2], method='fft') assert_raises(ValueError, convolve, [1], [[2]]) assert_raises(ValueError, convolve, [3], 2) class _TestConvolve2d: def test_2d_arrays(self): a = [[1, 2, 3], [3, 4, 5]] b = [[2, 3, 4], [4, 5, 6]] d = array([[2, 7, 16, 17, 12], [10, 30, 62, 58, 38], [12, 31, 58, 49, 30]]) e = convolve2d(a, b) assert_array_equal(e, d) def test_valid_mode(self): e = [[2, 3, 4, 5, 6, 7, 8], [4, 5, 6, 7, 8, 9, 10]] f = [[1, 2, 3], [3, 4, 5]] h = array([[62, 80, 98, 116, 134]]) g = convolve2d(e, f, 'valid') assert_array_equal(g, h) # See gh-5897 g = convolve2d(f, e, 'valid') assert_array_equal(g, h) def test_valid_mode_complx(self): e = [[2, 3, 4, 5, 6, 7, 8], [4, 5, 6, 7, 8, 9, 10]] f = np.array([[1, 2, 3], [3, 4, 5]], dtype=complex) + 1j h = array([[62.+24.j, 80.+30.j, 98.+36.j, 116.+42.j, 134.+48.j]]) g = convolve2d(e, f, 'valid') assert_array_almost_equal(g, h) # See gh-5897 g = convolve2d(f, e, 'valid') assert_array_equal(g, h) def test_fillvalue(self): a = [[1, 2, 3], [3, 4, 5]] b = [[2, 3, 4], [4, 5, 6]] fillval = 1 c = convolve2d(a, b, 'full', 'fill', fillval) d = array([[24, 26, 31, 34, 32], [28, 40, 62, 64, 52], [32, 46, 67, 62, 48]]) assert_array_equal(c, d) def test_fillvalue_errors(self): msg = "could not cast `fillvalue` directly to the output " with np.testing.suppress_warnings() as sup: sup.filter(np.ComplexWarning, "Casting complex values") with assert_raises(ValueError, match=msg): convolve2d([[1]], [[1, 2]], fillvalue=1j) msg = "`fillvalue` must be scalar or an array with " with assert_raises(ValueError, match=msg): convolve2d([[1]], [[1, 2]], fillvalue=[1, 2]) def test_fillvalue_empty(self): # Check that fillvalue being empty raises an error: assert_raises(ValueError, convolve2d, [[1]], [[1, 2]], fillvalue=[]) def test_wrap_boundary(self): a = [[1, 2, 3], [3, 4, 5]] b = [[2, 3, 4], [4, 5, 6]] c = convolve2d(a, b, 'full', 'wrap') d = array([[80, 80, 74, 80, 80], [68, 68, 62, 68, 68], [80, 80, 74, 80, 80]]) assert_array_equal(c, d) def test_sym_boundary(self): a = [[1, 2, 3], [3, 4, 5]] b = [[2, 3, 4], [4, 5, 6]] c = convolve2d(a, b, 'full', 'symm') d = array([[34, 30, 44, 62, 66], [52, 48, 62, 80, 84], [82, 78, 92, 110, 114]]) assert_array_equal(c, d) @pytest.mark.parametrize('func', [convolve2d, correlate2d]) @pytest.mark.parametrize('boundary, expected', [('symm', [[37.0, 42.0, 44.0, 45.0]]), ('wrap', [[43.0, 44.0, 42.0, 39.0]])]) def test_same_with_boundary(self, func, boundary, expected): # Test boundary='symm' and boundary='wrap' with a "long" kernel. # The size of the kernel requires that the values in the "image" # be extended more than once to handle the requested boundary method. # This is a regression test for gh-8684 and gh-8814. image = np.array([[2.0, -1.0, 3.0, 4.0]]) kernel = np.ones((1, 21)) result = func(image, kernel, mode='same', boundary=boundary) # The expected results were calculated "by hand". Because the # kernel is all ones, the same result is expected for convolve2d # and correlate2d. assert_array_equal(result, expected) def test_boundary_extension_same(self): # Regression test for gh-12686. # Use ndimage.convolve with appropriate arguments to create the # expected result. import scipy.ndimage as ndi a = np.arange(1, 10*3+1, dtype=float).reshape(10, 3) b = np.arange(1, 10*10+1, dtype=float).reshape(10, 10) c = convolve2d(a, b, mode='same', boundary='wrap') assert_array_equal(c, ndi.convolve(a, b, mode='wrap', origin=(-1, -1))) def test_boundary_extension_full(self): # Regression test for gh-12686. # Use ndimage.convolve with appropriate arguments to create the # expected result. import scipy.ndimage as ndi a = np.arange(1, 3*3+1, dtype=float).reshape(3, 3) b = np.arange(1, 6*6+1, dtype=float).reshape(6, 6) c = convolve2d(a, b, mode='full', boundary='wrap') apad = np.pad(a, ((3, 3), (3, 3)), 'wrap') assert_array_equal(c, ndi.convolve(apad, b, mode='wrap')[:-1, :-1]) def test_invalid_shapes(self): # By "invalid," we mean that no one # array has dimensions that are all at # least as large as the corresponding # dimensions of the other array. This # setup should throw a ValueError. a = np.arange(1, 7).reshape((2, 3)) b = np.arange(-6, 0).reshape((3, 2)) assert_raises(ValueError, convolve2d, *(a, b), **{'mode': 'valid'}) assert_raises(ValueError, convolve2d, *(b, a), **{'mode': 'valid'}) class TestConvolve2d(_TestConvolve2d): def test_same_mode(self): e = [[1, 2, 3], [3, 4, 5]] f = [[2, 3, 4, 5, 6, 7, 8], [4, 5, 6, 7, 8, 9, 10]] g = convolve2d(e, f, 'same') h = array([[22, 28, 34], [80, 98, 116]]) assert_array_equal(g, h) def test_valid_mode2(self): # See gh-5897 e = [[1, 2, 3], [3, 4, 5]] f = [[2, 3, 4, 5, 6, 7, 8], [4, 5, 6, 7, 8, 9, 10]] expected = [[62, 80, 98, 116, 134]] out = convolve2d(e, f, 'valid') assert_array_equal(out, expected) out = convolve2d(f, e, 'valid') assert_array_equal(out, expected) e = [[1 + 1j, 2 - 3j], [3 + 1j, 4 + 0j]] f = [[2 - 1j, 3 + 2j, 4 + 0j], [4 - 0j, 5 + 1j, 6 - 3j]] expected = [[27 - 1j, 46. + 2j]] out = convolve2d(e, f, 'valid') assert_array_equal(out, expected) # See gh-5897 out = convolve2d(f, e, 'valid') assert_array_equal(out, expected) def test_consistency_convolve_funcs(self): # Compare np.convolve, signal.convolve, signal.convolve2d a = np.arange(5) b = np.array([3.2, 1.4, 3]) for mode in ['full', 'valid', 'same']: assert_almost_equal(np.convolve(a, b, mode=mode), signal.convolve(a, b, mode=mode)) assert_almost_equal(np.squeeze( signal.convolve2d([a], [b], mode=mode)), signal.convolve(a, b, mode=mode)) def test_invalid_dims(self): assert_raises(ValueError, convolve2d, 3, 4) assert_raises(ValueError, convolve2d, [3], [4]) assert_raises(ValueError, convolve2d, [[[3]]], [[[4]]]) @pytest.mark.slow @pytest.mark.xfail_on_32bit("Can't create large array for test") def test_large_array(self): # Test indexing doesn't overflow an int (gh-10761) n = 2**31 // (1000 * np.int64().itemsize) _testutils.check_free_memory(2 * n * 1001 * np.int64().itemsize / 1e6) # Create a chequered pattern of 1s and 0s a = np.zeros(1001 * n, dtype=np.int64) a[::2] = 1 a = np.lib.stride_tricks.as_strided(a, shape=(n, 1000), strides=(8008, 8)) count = signal.convolve2d(a, [[1, 1]]) fails = np.where(count > 1) assert fails[0].size == 0 class TestFFTConvolve: @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]]) def test_real(self, axes): a = array([1, 2, 3]) expected = array([1, 4, 10, 12, 9.]) if axes == '': out = fftconvolve(a, a) else: out = fftconvolve(a, a, axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', [1, [1], -1, [-1]]) def test_real_axes(self, axes): a = array([1, 2, 3]) expected = array([1, 4, 10, 12, 9.]) a = np.tile(a, [2, 1]) expected = np.tile(expected, [2, 1]) out = fftconvolve(a, a, axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]]) def test_complex(self, axes): a = array([1 + 1j, 2 + 2j, 3 + 3j]) expected = array([0 + 2j, 0 + 8j, 0 + 20j, 0 + 24j, 0 + 18j]) if axes == '': out = fftconvolve(a, a) else: out = fftconvolve(a, a, axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', [1, [1], -1, [-1]]) def test_complex_axes(self, axes): a = array([1 + 1j, 2 + 2j, 3 + 3j]) expected = array([0 + 2j, 0 + 8j, 0 + 20j, 0 + 24j, 0 + 18j]) a = np.tile(a, [2, 1]) expected = np.tile(expected, [2, 1]) out = fftconvolve(a, a, axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', ['', None, [0, 1], [1, 0], [0, -1], [-1, 0], [-2, 1], [1, -2], [-2, -1], [-1, -2]]) def test_2d_real_same(self, axes): a = array([[1, 2, 3], [4, 5, 6]]) expected = array([[1, 4, 10, 12, 9], [8, 26, 56, 54, 36], [16, 40, 73, 60, 36]]) if axes == '': out = fftconvolve(a, a) else: out = fftconvolve(a, a, axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', [[1, 2], [2, 1], [1, -1], [-1, 1], [-2, 2], [2, -2], [-2, -1], [-1, -2]]) def test_2d_real_same_axes(self, axes): a = array([[1, 2, 3], [4, 5, 6]]) expected = array([[1, 4, 10, 12, 9], [8, 26, 56, 54, 36], [16, 40, 73, 60, 36]]) a = np.tile(a, [2, 1, 1]) expected = np.tile(expected, [2, 1, 1]) out = fftconvolve(a, a, axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', ['', None, [0, 1], [1, 0], [0, -1], [-1, 0], [-2, 1], [1, -2], [-2, -1], [-1, -2]]) def test_2d_complex_same(self, axes): a = array([[1 + 2j, 3 + 4j, 5 + 6j], [2 + 1j, 4 + 3j, 6 + 5j]]) expected = array([ [-3 + 4j, -10 + 20j, -21 + 56j, -18 + 76j, -11 + 60j], [10j, 44j, 118j, 156j, 122j], [3 + 4j, 10 + 20j, 21 + 56j, 18 + 76j, 11 + 60j] ]) if axes == '': out = fftconvolve(a, a) else: out = fftconvolve(a, a, axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', [[1, 2], [2, 1], [1, -1], [-1, 1], [-2, 2], [2, -2], [-2, -1], [-1, -2]]) def test_2d_complex_same_axes(self, axes): a = array([[1 + 2j, 3 + 4j, 5 + 6j], [2 + 1j, 4 + 3j, 6 + 5j]]) expected = array([ [-3 + 4j, -10 + 20j, -21 + 56j, -18 + 76j, -11 + 60j], [10j, 44j, 118j, 156j, 122j], [3 + 4j, 10 + 20j, 21 + 56j, 18 + 76j, 11 + 60j] ]) a = np.tile(a, [2, 1, 1]) expected = np.tile(expected, [2, 1, 1]) out = fftconvolve(a, a, axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]]) def test_real_same_mode(self, axes): a = array([1, 2, 3]) b = array([3, 3, 5, 6, 8, 7, 9, 0, 1]) expected_1 = array([35., 41., 47.]) expected_2 = array([9., 20., 25., 35., 41., 47., 39., 28., 2.]) if axes == '': out = fftconvolve(a, b, 'same') else: out = fftconvolve(a, b, 'same', axes=axes) assert_array_almost_equal(out, expected_1) if axes == '': out = fftconvolve(b, a, 'same') else: out = fftconvolve(b, a, 'same', axes=axes) assert_array_almost_equal(out, expected_2) @pytest.mark.parametrize('axes', [1, -1, [1], [-1]]) def test_real_same_mode_axes(self, axes): a = array([1, 2, 3]) b = array([3, 3, 5, 6, 8, 7, 9, 0, 1]) expected_1 = array([35., 41., 47.]) expected_2 = array([9., 20., 25., 35., 41., 47., 39., 28., 2.]) a = np.tile(a, [2, 1]) b = np.tile(b, [2, 1]) expected_1 = np.tile(expected_1, [2, 1]) expected_2 = np.tile(expected_2, [2, 1]) out = fftconvolve(a, b, 'same', axes=axes) assert_array_almost_equal(out, expected_1) out = fftconvolve(b, a, 'same', axes=axes) assert_array_almost_equal(out, expected_2) @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]]) def test_valid_mode_real(self, axes): # See gh-5897 a = array([3, 2, 1]) b = array([3, 3, 5, 6, 8, 7, 9, 0, 1]) expected = array([24., 31., 41., 43., 49., 25., 12.]) if axes == '': out = fftconvolve(a, b, 'valid') else: out = fftconvolve(a, b, 'valid', axes=axes) assert_array_almost_equal(out, expected) if axes == '': out = fftconvolve(b, a, 'valid') else: out = fftconvolve(b, a, 'valid', axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', [1, [1]]) def test_valid_mode_real_axes(self, axes): # See gh-5897 a = array([3, 2, 1]) b = array([3, 3, 5, 6, 8, 7, 9, 0, 1]) expected = array([24., 31., 41., 43., 49., 25., 12.]) a = np.tile(a, [2, 1]) b = np.tile(b, [2, 1]) expected = np.tile(expected, [2, 1]) out = fftconvolve(a, b, 'valid', axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]]) def test_valid_mode_complex(self, axes): a = array([3 - 1j, 2 + 7j, 1 + 0j]) b = array([3 + 2j, 3 - 3j, 5 + 0j, 6 - 1j, 8 + 0j]) expected = array([45. + 12.j, 30. + 23.j, 48 + 32.j]) if axes == '': out = fftconvolve(a, b, 'valid') else: out = fftconvolve(a, b, 'valid', axes=axes) assert_array_almost_equal(out, expected) if axes == '': out = fftconvolve(b, a, 'valid') else: out = fftconvolve(b, a, 'valid', axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', [1, [1], -1, [-1]]) def test_valid_mode_complex_axes(self, axes): a = array([3 - 1j, 2 + 7j, 1 + 0j]) b = array([3 + 2j, 3 - 3j, 5 + 0j, 6 - 1j, 8 + 0j]) expected = array([45. + 12.j, 30. + 23.j, 48 + 32.j]) a = np.tile(a, [2, 1]) b = np.tile(b, [2, 1]) expected = np.tile(expected, [2, 1]) out = fftconvolve(a, b, 'valid', axes=axes) assert_array_almost_equal(out, expected) out = fftconvolve(b, a, 'valid', axes=axes) assert_array_almost_equal(out, expected) def test_valid_mode_ignore_nonaxes(self): # See gh-5897 a = array([3, 2, 1]) b = array([3, 3, 5, 6, 8, 7, 9, 0, 1]) expected = array([24., 31., 41., 43., 49., 25., 12.]) a = np.tile(a, [2, 1]) b = np.tile(b, [1, 1]) expected = np.tile(expected, [2, 1]) out = fftconvolve(a, b, 'valid', axes=1) assert_array_almost_equal(out, expected) def test_empty(self): # Regression test for #1745: crashes with 0-length input. assert_(fftconvolve([], []).size == 0) assert_(fftconvolve([5, 6], []).size == 0) assert_(fftconvolve([], [7]).size == 0) def test_zero_rank(self): a = array(4967) b = array(3920) out = fftconvolve(a, b) assert_equal(out, a * b) def test_single_element(self): a = array([4967]) b = array([3920]) out = fftconvolve(a, b) assert_equal(out, a * b) @pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]]) def test_random_data(self, axes): np.random.seed(1234) a = np.random.rand(1233) + 1j * np.random.rand(1233) b = np.random.rand(1321) + 1j * np.random.rand(1321) expected = np.convolve(a, b, 'full') if axes == '': out = fftconvolve(a, b, 'full') else: out = fftconvolve(a, b, 'full', axes=axes) assert_(np.allclose(out, expected, rtol=1e-10)) @pytest.mark.parametrize('axes', [1, [1], -1, [-1]]) def test_random_data_axes(self, axes): np.random.seed(1234) a = np.random.rand(1233) + 1j * np.random.rand(1233) b = np.random.rand(1321) + 1j * np.random.rand(1321) expected = np.convolve(a, b, 'full') a = np.tile(a, [2, 1]) b = np.tile(b, [2, 1]) expected = np.tile(expected, [2, 1]) out = fftconvolve(a, b, 'full', axes=axes) assert_(np.allclose(out, expected, rtol=1e-10)) @pytest.mark.parametrize('axes', [[1, 4], [4, 1], [1, -1], [-1, 1], [-4, 4], [4, -4], [-4, -1], [-1, -4]]) def test_random_data_multidim_axes(self, axes): a_shape, b_shape = (123, 22), (132, 11) np.random.seed(1234) a = np.random.rand(*a_shape) + 1j * np.random.rand(*a_shape) b = np.random.rand(*b_shape) + 1j * np.random.rand(*b_shape) expected = convolve2d(a, b, 'full') a = a[:, :, None, None, None] b = b[:, :, None, None, None] expected = expected[:, :, None, None, None] a = np.moveaxis(a.swapaxes(0, 2), 1, 4) b = np.moveaxis(b.swapaxes(0, 2), 1, 4) expected = np.moveaxis(expected.swapaxes(0, 2), 1, 4) # use 1 for dimension 2 in a and 3 in b to test broadcasting a = np.tile(a, [2, 1, 3, 1, 1]) b = np.tile(b, [2, 1, 1, 4, 1]) expected = np.tile(expected, [2, 1, 3, 4, 1]) out = fftconvolve(a, b, 'full', axes=axes) assert_allclose(out, expected, rtol=1e-10, atol=1e-10) @pytest.mark.slow @pytest.mark.parametrize( 'n', list(range(1, 100)) + list(range(1000, 1500)) + np.random.RandomState(1234).randint(1001, 10000, 5).tolist()) def test_many_sizes(self, n): a = np.random.rand(n) + 1j * np.random.rand(n) b = np.random.rand(n) + 1j * np.random.rand(n) expected = np.convolve(a, b, 'full') out = fftconvolve(a, b, 'full') assert_allclose(out, expected, atol=1e-10) out = fftconvolve(a, b, 'full', axes=[0]) assert_allclose(out, expected, atol=1e-10) def test_fft_nan(self): n = 1000 rng = np.random.default_rng(43876432987) sig_nan = rng.standard_normal(n) for val in [np.nan, np.inf]: sig_nan[100] = val coeffs = signal.firwin(200, 0.2) with pytest.warns(RuntimeWarning, match="Use of fft convolution"): signal.convolve(sig_nan, coeffs, mode='same', method='fft') def fftconvolve_err(*args, **kwargs): raise RuntimeError('Fell back to fftconvolve') def gen_oa_shapes(sizes): return [(a, b) for a, b in product(sizes, repeat=2) if abs(a - b) > 3] def gen_oa_shapes_2d(sizes): shapes0 = gen_oa_shapes(sizes) shapes1 = gen_oa_shapes(sizes) shapes = [ishapes0+ishapes1 for ishapes0, ishapes1 in zip(shapes0, shapes1)] modes = ['full', 'valid', 'same'] return [ishapes+(imode,) for ishapes, imode in product(shapes, modes) if imode != 'valid' or (ishapes[0] > ishapes[1] and ishapes[2] > ishapes[3]) or (ishapes[0] < ishapes[1] and ishapes[2] < ishapes[3])] def gen_oa_shapes_eq(sizes): return [(a, b) for a, b in product(sizes, repeat=2) if a >= b] class TestOAConvolve: @pytest.mark.slow() @pytest.mark.parametrize('shape_a_0, shape_b_0', gen_oa_shapes_eq(list(range(100)) + list(range(100, 1000, 23))) ) def test_real_manylens(self, shape_a_0, shape_b_0): a = np.random.rand(shape_a_0) b = np.random.rand(shape_b_0) expected = fftconvolve(a, b) out = oaconvolve(a, b) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('shape_a_0, shape_b_0', gen_oa_shapes([50, 47, 6, 4, 1])) @pytest.mark.parametrize('is_complex', [True, False]) @pytest.mark.parametrize('mode', ['full', 'valid', 'same']) def test_1d_noaxes(self, shape_a_0, shape_b_0, is_complex, mode, monkeypatch): a = np.random.rand(shape_a_0) b = np.random.rand(shape_b_0) if is_complex: a = a + 1j*np.random.rand(shape_a_0) b = b + 1j*np.random.rand(shape_b_0) expected = fftconvolve(a, b, mode=mode) monkeypatch.setattr(signal._signaltools, 'fftconvolve', fftconvolve_err) out = oaconvolve(a, b, mode=mode) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', [0, 1]) @pytest.mark.parametrize('shape_a_0, shape_b_0', gen_oa_shapes([50, 47, 6, 4])) @pytest.mark.parametrize('shape_a_extra', [1, 3]) @pytest.mark.parametrize('shape_b_extra', [1, 3]) @pytest.mark.parametrize('is_complex', [True, False]) @pytest.mark.parametrize('mode', ['full', 'valid', 'same']) def test_1d_axes(self, axes, shape_a_0, shape_b_0, shape_a_extra, shape_b_extra, is_complex, mode, monkeypatch): ax_a = [shape_a_extra]*2 ax_b = [shape_b_extra]*2 ax_a[axes] = shape_a_0 ax_b[axes] = shape_b_0 a = np.random.rand(*ax_a) b = np.random.rand(*ax_b) if is_complex: a = a + 1j*np.random.rand(*ax_a) b = b + 1j*np.random.rand(*ax_b) expected = fftconvolve(a, b, mode=mode, axes=axes) monkeypatch.setattr(signal._signaltools, 'fftconvolve', fftconvolve_err) out = oaconvolve(a, b, mode=mode, axes=axes) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('shape_a_0, shape_b_0, ' 'shape_a_1, shape_b_1, mode', gen_oa_shapes_2d([50, 47, 6, 4])) @pytest.mark.parametrize('is_complex', [True, False]) def test_2d_noaxes(self, shape_a_0, shape_b_0, shape_a_1, shape_b_1, mode, is_complex, monkeypatch): a = np.random.rand(shape_a_0, shape_a_1) b = np.random.rand(shape_b_0, shape_b_1) if is_complex: a = a + 1j*np.random.rand(shape_a_0, shape_a_1) b = b + 1j*np.random.rand(shape_b_0, shape_b_1) expected = fftconvolve(a, b, mode=mode) monkeypatch.setattr(signal._signaltools, 'fftconvolve', fftconvolve_err) out = oaconvolve(a, b, mode=mode) assert_array_almost_equal(out, expected) @pytest.mark.parametrize('axes', [[0, 1], [0, 2], [1, 2]]) @pytest.mark.parametrize('shape_a_0, shape_b_0, ' 'shape_a_1, shape_b_1, mode', gen_oa_shapes_2d([50, 47, 6, 4])) @pytest.mark.parametrize('shape_a_extra', [1, 3]) @pytest.mark.parametrize('shape_b_extra', [1, 3]) @pytest.mark.parametrize('is_complex', [True, False]) def test_2d_axes(self, axes, shape_a_0, shape_b_0, shape_a_1, shape_b_1, mode, shape_a_extra, shape_b_extra, is_complex, monkeypatch): ax_a = [shape_a_extra]*3 ax_b = [shape_b_extra]*3 ax_a[axes[0]] = shape_a_0 ax_b[axes[0]] = shape_b_0 ax_a[axes[1]] = shape_a_1 ax_b[axes[1]] = shape_b_1 a = np.random.rand(*ax_a) b = np.random.rand(*ax_b) if is_complex: a = a + 1j*np.random.rand(*ax_a) b = b + 1j*np.random.rand(*ax_b) expected = fftconvolve(a, b, mode=mode, axes=axes) monkeypatch.setattr(signal._signaltools, 'fftconvolve', fftconvolve_err) out = oaconvolve(a, b, mode=mode, axes=axes) assert_array_almost_equal(out, expected) def test_empty(self): # Regression test for #1745: crashes with 0-length input. assert_(oaconvolve([], []).size == 0) assert_(oaconvolve([5, 6], []).size == 0) assert_(oaconvolve([], [7]).size == 0) def test_zero_rank(self): a = array(4967) b = array(3920) out = oaconvolve(a, b) assert_equal(out, a * b) def test_single_element(self): a = array([4967]) b = array([3920]) out = oaconvolve(a, b) assert_equal(out, a * b) class TestAllFreqConvolves: @pytest.mark.parametrize('convapproach', [fftconvolve, oaconvolve]) def test_invalid_shapes(self, convapproach): a = np.arange(1, 7).reshape((2, 3)) b = np.arange(-6, 0).reshape((3, 2)) with assert_raises(ValueError, match="For 'valid' mode, one must be at least " "as large as the other in every dimension"): convapproach(a, b, mode='valid') @pytest.mark.parametrize('convapproach', [fftconvolve, oaconvolve]) def test_invalid_shapes_axes(self, convapproach): a = np.zeros([5, 6, 2, 1]) b = np.zeros([5, 6, 3, 1]) with assert_raises(ValueError, match=r"incompatible shapes for in1 and in2:" r" \(5L?, 6L?, 2L?, 1L?\) and" r" \(5L?, 6L?, 3L?, 1L?\)"): convapproach(a, b, axes=[0, 1]) @pytest.mark.parametrize('a,b', [([1], 2), (1, [2]), ([3], [[2]])]) @pytest.mark.parametrize('convapproach', [fftconvolve, oaconvolve]) def test_mismatched_dims(self, a, b, convapproach): with assert_raises(ValueError, match="in1 and in2 should have the same" " dimensionality"): convapproach(a, b) @pytest.mark.parametrize('convapproach', [fftconvolve, oaconvolve]) def test_invalid_flags(self, convapproach): with assert_raises(ValueError, match="acceptable mode flags are 'valid'," " 'same', or 'full'"): convapproach([1], [2], mode='chips') with assert_raises(ValueError, match="when provided, axes cannot be empty"): convapproach([1], [2], axes=[]) with assert_raises(ValueError, match="axes must be a scalar or " "iterable of integers"): convapproach([1], [2], axes=[[1, 2], [3, 4]]) with assert_raises(ValueError, match="axes must be a scalar or " "iterable of integers"): convapproach([1], [2], axes=[1., 2., 3., 4.]) with assert_raises(ValueError, match="axes exceeds dimensionality of input"): convapproach([1], [2], axes=[1]) with assert_raises(ValueError, match="axes exceeds dimensionality of input"): convapproach([1], [2], axes=[-2]) with assert_raises(ValueError, match="all axes must be unique"): convapproach([1], [2], axes=[0, 0]) @pytest.mark.parametrize('dtype', [np.longfloat, np.longcomplex]) def test_longdtype_input(self, dtype): x = np.random.random((27, 27)).astype(dtype) y = np.random.random((4, 4)).astype(dtype) if np.iscomplexobj(dtype()): x += .1j y -= .1j res = fftconvolve(x, y) assert_allclose(res, convolve(x, y, method='direct')) assert res.dtype == dtype class TestMedFilt: IN = [[50, 50, 50, 50, 50, 92, 18, 27, 65, 46], [50, 50, 50, 50, 50, 0, 72, 77, 68, 66], [50, 50, 50, 50, 50, 46, 47, 19, 64, 77], [50, 50, 50, 50, 50, 42, 15, 29, 95, 35], [50, 50, 50, 50, 50, 46, 34, 9, 21, 66], [70, 97, 28, 68, 78, 77, 61, 58, 71, 42], [64, 53, 44, 29, 68, 32, 19, 68, 24, 84], [3, 33, 53, 67, 1, 78, 74, 55, 12, 83], [7, 11, 46, 70, 60, 47, 24, 43, 61, 26], [32, 61, 88, 7, 39, 4, 92, 64, 45, 61]] OUT = [[0, 50, 50, 50, 42, 15, 15, 18, 27, 0], [0, 50, 50, 50, 50, 42, 19, 21, 29, 0], [50, 50, 50, 50, 50, 47, 34, 34, 46, 35], [50, 50, 50, 50, 50, 50, 42, 47, 64, 42], [50, 50, 50, 50, 50, 50, 46, 55, 64, 35], [33, 50, 50, 50, 50, 47, 46, 43, 55, 26], [32, 50, 50, 50, 50, 47, 46, 45, 55, 26], [7, 46, 50, 50, 47, 46, 46, 43, 45, 21], [0, 32, 33, 39, 32, 32, 43, 43, 43, 0], [0, 7, 11, 7, 4, 4, 19, 19, 24, 0]] KERNEL_SIZE = [7,3] def test_basic(self): d = signal.medfilt(self.IN, self.KERNEL_SIZE) e = signal.medfilt2d(np.array(self.IN, float), self.KERNEL_SIZE) assert_array_equal(d, self.OUT) assert_array_equal(d, e) @pytest.mark.parametrize('dtype', [np.ubyte, np.byte, np.ushort, np.short, np.uint, int, np.ulonglong, np.ulonglong, np.float32, np.float64]) def test_types(self, dtype): # volume input and output types match in_typed = np.array(self.IN, dtype=dtype) assert_equal(signal.medfilt(in_typed).dtype, dtype) assert_equal(signal.medfilt2d(in_typed).dtype, dtype) def test_types_deprecated(self): dtype = np.longdouble in_typed = np.array(self.IN, dtype=dtype) msg = "Using medfilt with arrays of dtype" with pytest.deprecated_call(match=msg): assert_equal(signal.medfilt(in_typed).dtype, dtype) with pytest.deprecated_call(match=msg): assert_equal(signal.medfilt2d(in_typed).dtype, dtype) @pytest.mark.parametrize('dtype', [np.bool_, np.cfloat, np.cdouble, np.clongdouble, np.float16,]) def test_invalid_dtypes(self, dtype): in_typed = np.array(self.IN, dtype=dtype) with pytest.raises(ValueError, match="not supported"): signal.medfilt(in_typed) with pytest.raises(ValueError, match="not supported"): signal.medfilt2d(in_typed) def test_none(self): # gh-1651, trac #1124. Ensure this does not segfault. with pytest.warns(UserWarning): assert_raises(TypeError, signal.medfilt, None) # Expand on this test to avoid a regression with possible contiguous # numpy arrays that have odd strides. The stride value below gets # us into wrong memory if used (but it does not need to be used) dummy = np.arange(10, dtype=np.float64) a = dummy[5:6] a.strides = 16 assert_(signal.medfilt(a, 1) == 5.) def test_refcounting(self): # Check a refcounting-related crash a = Decimal(123) x = np.array([a, a], dtype=object) if hasattr(sys, 'getrefcount'): n = 2 * sys.getrefcount(a) else: n = 10 # Shouldn't segfault: with pytest.warns(UserWarning): for j in range(n): signal.medfilt(x) if hasattr(sys, 'getrefcount'): assert_(sys.getrefcount(a) < n) assert_equal(x, [a, a]) def test_object(self,): msg = "Using medfilt with arrays of dtype" with pytest.deprecated_call(match=msg): in_object = np.array(self.IN, dtype=object) out_object = np.array(self.OUT, dtype=object) assert_array_equal(signal.medfilt(in_object, self.KERNEL_SIZE), out_object) @pytest.mark.parametrize("dtype", [np.ubyte, np.float32, np.float64]) def test_medfilt2d_parallel(self, dtype): in_typed = np.array(self.IN, dtype=dtype) expected = np.array(self.OUT, dtype=dtype) # This is used to simplify the indexing calculations. assert in_typed.shape == expected.shape # We'll do the calculation in four chunks. M1 and N1 are the dimensions # of the first output chunk. We have to extend the input by half the # kernel size to be able to calculate the full output chunk. M1 = expected.shape[0] // 2 N1 = expected.shape[1] // 2 offM = self.KERNEL_SIZE[0] // 2 + 1 offN = self.KERNEL_SIZE[1] // 2 + 1 def apply(chunk): # in = slice of in_typed to use. # sel = slice of output to crop it to the correct region. # out = slice of output array to store in. M, N = chunk if M == 0: Min = slice(0, M1 + offM) Msel = slice(0, -offM) Mout = slice(0, M1) else: Min = slice(M1 - offM, None) Msel = slice(offM, None) Mout = slice(M1, None) if N == 0: Nin = slice(0, N1 + offN) Nsel = slice(0, -offN) Nout = slice(0, N1) else: Nin = slice(N1 - offN, None) Nsel = slice(offN, None) Nout = slice(N1, None) # Do the calculation, but do not write to the output in the threads. chunk_data = in_typed[Min, Nin] med = signal.medfilt2d(chunk_data, self.KERNEL_SIZE) return med[Msel, Nsel], Mout, Nout # Give each chunk to a different thread. output = np.zeros_like(expected) with ThreadPoolExecutor(max_workers=4) as pool: chunks = {(0, 0), (0, 1), (1, 0), (1, 1)} futures = {pool.submit(apply, chunk) for chunk in chunks} # Store each result in the output as it arrives. for future in as_completed(futures): data, Mslice, Nslice = future.result() output[Mslice, Nslice] = data assert_array_equal(output, expected) class TestWiener: def test_basic(self): g = array([[5, 6, 4, 3], [3, 5, 6, 2], [2, 3, 5, 6], [1, 6, 9, 7]], 'd') h = array([[2.16374269, 3.2222222222, 2.8888888889, 1.6666666667], [2.666666667, 4.33333333333, 4.44444444444, 2.8888888888], [2.222222222, 4.4444444444, 5.4444444444, 4.801066874837], [1.33333333333, 3.92735042735, 6.0712560386, 5.0404040404]]) assert_array_almost_equal(signal.wiener(g), h, decimal=6) assert_array_almost_equal(signal.wiener(g, mysize=3), h, decimal=6) padtype_options = ["mean", "median", "minimum", "maximum", "line"] padtype_options += _upfirdn_modes class TestResample: def test_basic(self): # Some basic tests # Regression test for issue #3603. # window.shape must equal to sig.shape[0] sig = np.arange(128) num = 256 win = signal.get_window(('kaiser', 8.0), 160) assert_raises(ValueError, signal.resample, sig, num, window=win) # Other degenerate conditions assert_raises(ValueError, signal.resample_poly, sig, 'yo', 1) assert_raises(ValueError, signal.resample_poly, sig, 1, 0) assert_raises(ValueError, signal.resample_poly, sig, 2, 1, padtype='') assert_raises(ValueError, signal.resample_poly, sig, 2, 1, padtype='mean', cval=10) # test for issue #6505 - should not modify window.shape when axis ≠ 0 sig2 = np.tile(np.arange(160), (2, 1)) signal.resample(sig2, num, axis=-1, window=win) assert_(win.shape == (160,)) @pytest.mark.parametrize('window', (None, 'hamming')) @pytest.mark.parametrize('N', (20, 19)) @pytest.mark.parametrize('num', (100, 101, 10, 11)) def test_rfft(self, N, num, window): # Make sure the speed up using rfft gives the same result as the normal # way using fft x = np.linspace(0, 10, N, endpoint=False) y = np.cos(-x**2/6.0) assert_allclose(signal.resample(y, num, window=window), signal.resample(y + 0j, num, window=window).real) y = np.array([np.cos(-x**2/6.0), np.sin(-x**2/6.0)]) y_complex = y + 0j assert_allclose( signal.resample(y, num, axis=1, window=window), signal.resample(y_complex, num, axis=1, window=window).real, atol=1e-9) def test_input_domain(self): # Test if both input domain modes produce the same results. tsig = np.arange(256) + 0j fsig = fft(tsig) num = 256 assert_allclose( signal.resample(fsig, num, domain='freq'), signal.resample(tsig, num, domain='time'), atol=1e-9) @pytest.mark.parametrize('nx', (1, 2, 3, 5, 8)) @pytest.mark.parametrize('ny', (1, 2, 3, 5, 8)) @pytest.mark.parametrize('dtype', ('float', 'complex')) def test_dc(self, nx, ny, dtype): x = np.array([1] * nx, dtype) y = signal.resample(x, ny) assert_allclose(y, [1] * ny) @pytest.mark.parametrize('padtype', padtype_options) def test_mutable_window(self, padtype): # Test that a mutable window is not modified impulse = np.zeros(3) window = np.random.RandomState(0).randn(2) window_orig = window.copy() signal.resample_poly(impulse, 5, 1, window=window, padtype=padtype) assert_array_equal(window, window_orig) @pytest.mark.parametrize('padtype', padtype_options) def test_output_float32(self, padtype): # Test that float32 inputs yield a float32 output x = np.arange(10, dtype=np.float32) h = np.array([1, 1, 1], dtype=np.float32) y = signal.resample_poly(x, 1, 2, window=h, padtype=padtype) assert y.dtype == np.float32 @pytest.mark.parametrize('padtype', padtype_options) @pytest.mark.parametrize('dtype', [np.float32, np.float64]) def test_output_match_dtype(self, padtype, dtype): # Test that the dtype of x is preserved per issue #14733 x = np.arange(10, dtype=dtype) y = signal.resample_poly(x, 1, 2, padtype=padtype) assert y.dtype == x.dtype @pytest.mark.parametrize( "method, ext, padtype", [("fft", False, None)] + list( product( ["polyphase"], [False, True], padtype_options, ) ), ) def test_resample_methods(self, method, ext, padtype): # Test resampling of sinusoids and random noise (1-sec) rate = 100 rates_to = [49, 50, 51, 99, 100, 101, 199, 200, 201] # Sinusoids, windowed to avoid edge artifacts t = np.arange(rate) / float(rate) freqs = np.array((1., 10., 40.))[:, np.newaxis] x = np.sin(2 * np.pi * freqs * t) * hann(rate) for rate_to in rates_to: t_to = np.arange(rate_to) / float(rate_to) y_tos = np.sin(2 * np.pi * freqs * t_to) * hann(rate_to) if method == 'fft': y_resamps = signal.resample(x, rate_to, axis=-1) else: if ext and rate_to != rate: # Match default window design g = gcd(rate_to, rate) up = rate_to // g down = rate // g max_rate = max(up, down) f_c = 1. / max_rate half_len = 10 * max_rate window = signal.firwin(2 * half_len + 1, f_c, window=('kaiser', 5.0)) polyargs = {'window': window, 'padtype': padtype} else: polyargs = {'padtype': padtype} y_resamps = signal.resample_poly(x, rate_to, rate, axis=-1, **polyargs) for y_to, y_resamp, freq in zip(y_tos, y_resamps, freqs): if freq >= 0.5 * rate_to: y_to.fill(0.) # mostly low-passed away if padtype in ['minimum', 'maximum']: assert_allclose(y_resamp, y_to, atol=3e-1) else: assert_allclose(y_resamp, y_to, atol=1e-3) else: assert_array_equal(y_to.shape, y_resamp.shape) corr = np.corrcoef(y_to, y_resamp)[0, 1] assert_(corr > 0.99, msg=(corr, rate, rate_to)) # Random data rng = np.random.RandomState(0) x = hann(rate) * np.cumsum(rng.randn(rate)) # low-pass, wind for rate_to in rates_to: # random data t_to = np.arange(rate_to) / float(rate_to) y_to = np.interp(t_to, t, x) if method == 'fft': y_resamp = signal.resample(x, rate_to) else: y_resamp = signal.resample_poly(x, rate_to, rate, padtype=padtype) assert_array_equal(y_to.shape, y_resamp.shape) corr = np.corrcoef(y_to, y_resamp)[0, 1] assert_(corr > 0.99, msg=corr) # More tests of fft method (Master 0.18.1 fails these) if method == 'fft': x1 = np.array([1.+0.j, 0.+0.j]) y1_test = signal.resample(x1, 4) # upsampling a complex array y1_true = np.array([1.+0.j, 0.5+0.j, 0.+0.j, 0.5+0.j]) assert_allclose(y1_test, y1_true, atol=1e-12) x2 = np.array([1., 0.5, 0., 0.5]) y2_test = signal.resample(x2, 2) # downsampling a real array y2_true = np.array([1., 0.]) assert_allclose(y2_test, y2_true, atol=1e-12) def test_poly_vs_filtfilt(self): # Check that up=1.0 gives same answer as filtfilt + slicing random_state = np.random.RandomState(17) try_types = (int, np.float32, np.complex64, float, complex) size = 10000 down_factors = [2, 11, 79] for dtype in try_types: x = random_state.randn(size).astype(dtype) if dtype in (np.complex64, np.complex128): x += 1j * random_state.randn(size) # resample_poly assumes zeros outside of signl, whereas filtfilt # can only constant-pad. Make them equivalent: x[0] = 0 x[-1] = 0 for down in down_factors: h = signal.firwin(31, 1. / down, window='hamming') yf = filtfilt(h, 1.0, x, padtype='constant')[::down] # Need to pass convolved version of filter to resample_poly, # since filtfilt does forward and backward, but resample_poly # only goes forward hc = convolve(h, h[::-1]) y = signal.resample_poly(x, 1, down, window=hc) assert_allclose(yf, y, atol=1e-7, rtol=1e-7) def test_correlate1d(self): for down in [2, 4]: for nx in range(1, 40, down): for nweights in (32, 33): x = np.random.random((nx,)) weights = np.random.random((nweights,)) y_g = correlate1d(x, weights[::-1], mode='constant') y_s = signal.resample_poly( x, up=1, down=down, window=weights) assert_allclose(y_g[::down], y_s) class TestCSpline1DEval: def test_basic(self): y = array([1, 2, 3, 4, 3, 2, 1, 2, 3.0]) x = arange(len(y)) dx = x[1] - x[0] cj = signal.cspline1d(y) x2 = arange(len(y) * 10.0) / 10.0 y2 = signal.cspline1d_eval(cj, x2, dx=dx, x0=x[0]) # make sure interpolated values are on knot points assert_array_almost_equal(y2[::10], y, decimal=5) def test_complex(self): # create some smoothly varying complex signal to interpolate x = np.arange(2) y = np.zeros(x.shape, dtype=np.complex64) T = 10.0 f = 1.0 / T y = np.exp(2.0J * np.pi * f * x) # get the cspline transform cy = signal.cspline1d(y) # determine new test x value and interpolate xnew = np.array([0.5]) ynew = signal.cspline1d_eval(cy, xnew) assert_equal(ynew.dtype, y.dtype) class TestOrderFilt: def test_basic(self): assert_array_equal(signal.order_filter([1, 2, 3], [1, 0, 1], 1), [2, 3, 2]) class _TestLinearFilter: def generate(self, shape): x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape) return self.convert_dtype(x) def convert_dtype(self, arr): if self.dtype == np.dtype('O'): arr = np.asarray(arr) out = np.empty(arr.shape, self.dtype) iter = np.nditer([arr, out], ['refs_ok','zerosize_ok'], [['readonly'],['writeonly']]) for x, y in iter: y[...] = self.type(x[()]) return out else: return np.array(arr, self.dtype, copy=False) def test_rank_1_IIR(self): x = self.generate((6,)) b = self.convert_dtype([1, -1]) a = self.convert_dtype([0.5, -0.5]) y_r = self.convert_dtype([0, 2, 4, 6, 8, 10.]) assert_array_almost_equal(lfilter(b, a, x), y_r) def test_rank_1_FIR(self): x = self.generate((6,)) b = self.convert_dtype([1, 1]) a = self.convert_dtype([1]) y_r = self.convert_dtype([0, 1, 3, 5, 7, 9.]) assert_array_almost_equal(lfilter(b, a, x), y_r) def test_rank_1_IIR_init_cond(self): x = self.generate((6,)) b = self.convert_dtype([1, 0, -1]) a = self.convert_dtype([0.5, -0.5]) zi = self.convert_dtype([1, 2]) y_r = self.convert_dtype([1, 5, 9, 13, 17, 21]) zf_r = self.convert_dtype([13, -10]) y, zf = lfilter(b, a, x, zi=zi) assert_array_almost_equal(y, y_r) assert_array_almost_equal(zf, zf_r) def test_rank_1_FIR_init_cond(self): x = self.generate((6,)) b = self.convert_dtype([1, 1, 1]) a = self.convert_dtype([1]) zi = self.convert_dtype([1, 1]) y_r = self.convert_dtype([1, 2, 3, 6, 9, 12.]) zf_r = self.convert_dtype([9, 5]) y, zf = lfilter(b, a, x, zi=zi) assert_array_almost_equal(y, y_r) assert_array_almost_equal(zf, zf_r) def test_rank_2_IIR_axis_0(self): x = self.generate((4, 3)) b = self.convert_dtype([1, -1]) a = self.convert_dtype([0.5, 0.5]) y_r2_a0 = self.convert_dtype([[0, 2, 4], [6, 4, 2], [0, 2, 4], [6, 4, 2]]) y = lfilter(b, a, x, axis=0) assert_array_almost_equal(y_r2_a0, y) def test_rank_2_IIR_axis_1(self): x = self.generate((4, 3)) b = self.convert_dtype([1, -1]) a = self.convert_dtype([0.5, 0.5]) y_r2_a1 = self.convert_dtype([[0, 2, 0], [6, -4, 6], [12, -10, 12], [18, -16, 18]]) y = lfilter(b, a, x, axis=1) assert_array_almost_equal(y_r2_a1, y) def test_rank_2_IIR_axis_0_init_cond(self): x = self.generate((4, 3)) b = self.convert_dtype([1, -1]) a = self.convert_dtype([0.5, 0.5]) zi = self.convert_dtype(np.ones((4,1))) y_r2_a0_1 = self.convert_dtype([[1, 1, 1], [7, -5, 7], [13, -11, 13], [19, -17, 19]]) zf_r = self.convert_dtype([-5, -17, -29, -41])[:, np.newaxis] y, zf = lfilter(b, a, x, axis=1, zi=zi) assert_array_almost_equal(y_r2_a0_1, y) assert_array_almost_equal(zf, zf_r) def test_rank_2_IIR_axis_1_init_cond(self): x = self.generate((4,3)) b = self.convert_dtype([1, -1]) a = self.convert_dtype([0.5, 0.5]) zi = self.convert_dtype(np.ones((1,3))) y_r2_a0_0 = self.convert_dtype([[1, 3, 5], [5, 3, 1], [1, 3, 5], [5, 3, 1]]) zf_r = self.convert_dtype([[-23, -23, -23]]) y, zf = lfilter(b, a, x, axis=0, zi=zi) assert_array_almost_equal(y_r2_a0_0, y) assert_array_almost_equal(zf, zf_r) def test_rank_3_IIR(self): x = self.generate((4, 3, 2)) b = self.convert_dtype([1, -1]) a = self.convert_dtype([0.5, 0.5]) for axis in range(x.ndim): y = lfilter(b, a, x, axis) y_r = np.apply_along_axis(lambda w: lfilter(b, a, w), axis, x) assert_array_almost_equal(y, y_r) def test_rank_3_IIR_init_cond(self): x = self.generate((4, 3, 2)) b = self.convert_dtype([1, -1]) a = self.convert_dtype([0.5, 0.5]) for axis in range(x.ndim): zi_shape = list(x.shape) zi_shape[axis] = 1 zi = self.convert_dtype(np.ones(zi_shape)) zi1 = self.convert_dtype([1]) y, zf = lfilter(b, a, x, axis, zi) def lf0(w): return lfilter(b, a, w, zi=zi1)[0] def lf1(w): return lfilter(b, a, w, zi=zi1)[1] y_r = np.apply_along_axis(lf0, axis, x) zf_r = np.apply_along_axis(lf1, axis, x) assert_array_almost_equal(y, y_r) assert_array_almost_equal(zf, zf_r) def test_rank_3_FIR(self): x = self.generate((4, 3, 2)) b = self.convert_dtype([1, 0, -1]) a = self.convert_dtype([1]) for axis in range(x.ndim): y = lfilter(b, a, x, axis) y_r = np.apply_along_axis(lambda w: lfilter(b, a, w), axis, x) assert_array_almost_equal(y, y_r) def test_rank_3_FIR_init_cond(self): x = self.generate((4, 3, 2)) b = self.convert_dtype([1, 0, -1]) a = self.convert_dtype([1]) for axis in range(x.ndim): zi_shape = list(x.shape) zi_shape[axis] = 2 zi = self.convert_dtype(np.ones(zi_shape)) zi1 = self.convert_dtype([1, 1]) y, zf = lfilter(b, a, x, axis, zi) def lf0(w): return lfilter(b, a, w, zi=zi1)[0] def lf1(w): return lfilter(b, a, w, zi=zi1)[1] y_r = np.apply_along_axis(lf0, axis, x) zf_r = np.apply_along_axis(lf1, axis, x) assert_array_almost_equal(y, y_r) assert_array_almost_equal(zf, zf_r) def test_zi_pseudobroadcast(self): x = self.generate((4, 5, 20)) b,a = signal.butter(8, 0.2, output='ba') b = self.convert_dtype(b) a = self.convert_dtype(a) zi_size = b.shape[0] - 1 # lfilter requires x.ndim == zi.ndim exactly. However, zi can have # length 1 dimensions. zi_full = self.convert_dtype(np.ones((4, 5, zi_size))) zi_sing = self.convert_dtype(np.ones((1, 1, zi_size))) y_full, zf_full = lfilter(b, a, x, zi=zi_full) y_sing, zf_sing = lfilter(b, a, x, zi=zi_sing) assert_array_almost_equal(y_sing, y_full) assert_array_almost_equal(zf_full, zf_sing) # lfilter does not prepend ones assert_raises(ValueError, lfilter, b, a, x, -1, np.ones(zi_size)) def test_scalar_a(self): # a can be a scalar. x = self.generate(6) b = self.convert_dtype([1, 0, -1]) a = self.convert_dtype([1]) y_r = self.convert_dtype([0, 1, 2, 2, 2, 2]) y = lfilter(b, a[0], x) assert_array_almost_equal(y, y_r) def test_zi_some_singleton_dims(self): # lfilter doesn't really broadcast (no prepending of 1's). But does # do singleton expansion if x and zi have the same ndim. This was # broken only if a subset of the axes were singletons (gh-4681). x = self.convert_dtype(np.zeros((3,2,5), 'l')) b = self.convert_dtype(np.ones(5, 'l')) a = self.convert_dtype(np.array([1,0,0])) zi = np.ones((3,1,4), 'l') zi[1,:,:] *= 2 zi[2,:,:] *= 3 zi = self.convert_dtype(zi) zf_expected = self.convert_dtype(np.zeros((3,2,4), 'l')) y_expected = np.zeros((3,2,5), 'l') y_expected[:,:,:4] = [[[1]], [[2]], [[3]]] y_expected = self.convert_dtype(y_expected) # IIR y_iir, zf_iir = lfilter(b, a, x, -1, zi) assert_array_almost_equal(y_iir, y_expected) assert_array_almost_equal(zf_iir, zf_expected) # FIR y_fir, zf_fir = lfilter(b, a[0], x, -1, zi) assert_array_almost_equal(y_fir, y_expected) assert_array_almost_equal(zf_fir, zf_expected) def base_bad_size_zi(self, b, a, x, axis, zi): b = self.convert_dtype(b) a = self.convert_dtype(a) x = self.convert_dtype(x) zi = self.convert_dtype(zi) assert_raises(ValueError, lfilter, b, a, x, axis, zi) def test_bad_size_zi(self): # rank 1 x1 = np.arange(6) self.base_bad_size_zi([1], [1], x1, -1, [1]) self.base_bad_size_zi([1, 1], [1], x1, -1, [0, 1]) self.base_bad_size_zi([1, 1], [1], x1, -1, [[0]]) self.base_bad_size_zi([1, 1], [1], x1, -1, [0, 1, 2]) self.base_bad_size_zi([1, 1, 1], [1], x1, -1, [[0]]) self.base_bad_size_zi([1, 1, 1], [1], x1, -1, [0, 1, 2]) self.base_bad_size_zi([1], [1, 1], x1, -1, [0, 1]) self.base_bad_size_zi([1], [1, 1], x1, -1, [[0]]) self.base_bad_size_zi([1], [1, 1], x1, -1, [0, 1, 2]) self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [0]) self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [[0], [1]]) self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [0, 1, 2]) self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [0, 1, 2, 3]) self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [0]) self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [[0], [1]]) self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [0, 1, 2]) self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [0, 1, 2, 3]) # rank 2 x2 = np.arange(12).reshape((4,3)) # for axis=0 zi.shape should == (max(len(a),len(b))-1, 3) self.base_bad_size_zi([1], [1], x2, 0, [0]) # for each of these there are 5 cases tested (in this order): # 1. not deep enough, right # elements # 2. too deep, right # elements # 3. right depth, right # elements, transposed # 4. right depth, too few elements # 5. right depth, too many elements self.base_bad_size_zi([1, 1], [1], x2, 0, [0,1,2]) self.base_bad_size_zi([1, 1], [1], x2, 0, [[[0,1,2]]]) self.base_bad_size_zi([1, 1], [1], x2, 0, [[0], [1], [2]]) self.base_bad_size_zi([1, 1], [1], x2, 0, [[0,1]]) self.base_bad_size_zi([1, 1], [1], x2, 0, [[0,1,2,3]]) self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [0,1,2,3,4,5]) self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[[0,1,2],[3,4,5]]]) self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[0,1],[2,3],[4,5]]) self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[0,1],[2,3]]) self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[0,1,2,3],[4,5,6,7]]) self.base_bad_size_zi([1], [1, 1], x2, 0, [0,1,2]) self.base_bad_size_zi([1], [1, 1], x2, 0, [[[0,1,2]]]) self.base_bad_size_zi([1], [1, 1], x2, 0, [[0], [1], [2]]) self.base_bad_size_zi([1], [1, 1], x2, 0, [[0,1]]) self.base_bad_size_zi([1], [1, 1], x2, 0, [[0,1,2,3]]) self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [0,1,2,3,4,5]) self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[[0,1,2],[3,4,5]]]) self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[0,1],[2,3],[4,5]]) self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[0,1],[2,3]]) self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[0,1,2,3],[4,5,6,7]]) self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [0,1,2,3,4,5]) self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[[0,1,2],[3,4,5]]]) self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[0,1],[2,3],[4,5]]) self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[0,1],[2,3]]) self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[0,1,2,3],[4,5,6,7]]) # for axis=1 zi.shape should == (4, max(len(a),len(b))-1) self.base_bad_size_zi([1], [1], x2, 1, [0]) self.base_bad_size_zi([1, 1], [1], x2, 1, [0,1,2,3]) self.base_bad_size_zi([1, 1], [1], x2, 1, [[[0],[1],[2],[3]]]) self.base_bad_size_zi([1, 1], [1], x2, 1, [[0, 1, 2, 3]]) self.base_bad_size_zi([1, 1], [1], x2, 1, [[0],[1],[2]]) self.base_bad_size_zi([1, 1], [1], x2, 1, [[0],[1],[2],[3],[4]]) self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [0,1,2,3,4,5,6,7]) self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[[0,1],[2,3],[4,5],[6,7]]]) self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[0,1,2,3],[4,5,6,7]]) self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[0,1],[2,3],[4,5]]) self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[0,1],[2,3],[4,5],[6,7],[8,9]]) self.base_bad_size_zi([1], [1, 1], x2, 1, [0,1,2,3]) self.base_bad_size_zi([1], [1, 1], x2, 1, [[[0],[1],[2],[3]]]) self.base_bad_size_zi([1], [1, 1], x2, 1, [[0, 1, 2, 3]]) self.base_bad_size_zi([1], [1, 1], x2, 1, [[0],[1],[2]]) self.base_bad_size_zi([1], [1, 1], x2, 1, [[0],[1],[2],[3],[4]]) self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [0,1,2,3,4,5,6,7]) self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[[0,1],[2,3],[4,5],[6,7]]]) self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[0,1,2,3],[4,5,6,7]]) self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[0,1],[2,3],[4,5]]) self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[0,1],[2,3],[4,5],[6,7],[8,9]]) self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [0,1,2,3,4,5,6,7]) self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[[0,1],[2,3],[4,5],[6,7]]]) self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[0,1,2,3],[4,5,6,7]]) self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[0,1],[2,3],[4,5]]) self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[0,1],[2,3],[4,5],[6,7],[8,9]]) def test_empty_zi(self): # Regression test for #880: empty array for zi crashes. x = self.generate((5,)) a = self.convert_dtype([1]) b = self.convert_dtype([1]) zi = self.convert_dtype([]) y, zf = lfilter(b, a, x, zi=zi) assert_array_almost_equal(y, x) assert_equal(zf.dtype, self.dtype) assert_equal(zf.size, 0) def test_lfiltic_bad_zi(self): # Regression test for #3699: bad initial conditions a = self.convert_dtype([1]) b = self.convert_dtype([1]) # "y" sets the datatype of zi, so it truncates if int zi = lfiltic(b, a, [1., 0]) zi_1 = lfiltic(b, a, [1, 0]) zi_2 = lfiltic(b, a, [True, False]) assert_array_equal(zi, zi_1) assert_array_equal(zi, zi_2) def test_short_x_FIR(self): # regression test for #5116 # x shorter than b, with non None zi fails a = self.convert_dtype([1]) b = self.convert_dtype([1, 0, -1]) zi = self.convert_dtype([2, 7]) x = self.convert_dtype([72]) ye = self.convert_dtype([74]) zfe = self.convert_dtype([7, -72]) y, zf = lfilter(b, a, x, zi=zi) assert_array_almost_equal(y, ye) assert_array_almost_equal(zf, zfe) def test_short_x_IIR(self): # regression test for #5116 # x shorter than b, with non None zi fails a = self.convert_dtype([1, 1]) b = self.convert_dtype([1, 0, -1]) zi = self.convert_dtype([2, 7]) x = self.convert_dtype([72]) ye = self.convert_dtype([74]) zfe = self.convert_dtype([-67, -72]) y, zf = lfilter(b, a, x, zi=zi) assert_array_almost_equal(y, ye) assert_array_almost_equal(zf, zfe) def test_do_not_modify_a_b_IIR(self): x = self.generate((6,)) b = self.convert_dtype([1, -1]) b0 = b.copy() a = self.convert_dtype([0.5, -0.5]) a0 = a.copy() y_r = self.convert_dtype([0, 2, 4, 6, 8, 10.]) y_f = lfilter(b, a, x) assert_array_almost_equal(y_f, y_r) assert_equal(b, b0) assert_equal(a, a0) def test_do_not_modify_a_b_FIR(self): x = self.generate((6,)) b = self.convert_dtype([1, 0, 1]) b0 = b.copy() a = self.convert_dtype([2]) a0 = a.copy() y_r = self.convert_dtype([0, 0.5, 1, 2, 3, 4.]) y_f = lfilter(b, a, x) assert_array_almost_equal(y_f, y_r) assert_equal(b, b0) assert_equal(a, a0) class TestLinearFilterFloat32(_TestLinearFilter): dtype = np.dtype('f') class TestLinearFilterFloat64(_TestLinearFilter): dtype = np.dtype('d') class TestLinearFilterFloatExtended(_TestLinearFilter): dtype = np.dtype('g') class TestLinearFilterComplex64(_TestLinearFilter): dtype = np.dtype('F') class TestLinearFilterComplex128(_TestLinearFilter): dtype = np.dtype('D') class TestLinearFilterComplexExtended(_TestLinearFilter): dtype = np.dtype('G') class TestLinearFilterDecimal(_TestLinearFilter): dtype = np.dtype('O') def type(self, x): return Decimal(str(x)) class TestLinearFilterObject(_TestLinearFilter): dtype = np.dtype('O') type = float def test_lfilter_bad_object(): # lfilter: object arrays with non-numeric objects raise TypeError. # Regression test for ticket #1452. if hasattr(sys, 'abiflags') and 'd' in sys.abiflags: pytest.skip('test is flaky when run with python3-dbg') assert_raises(TypeError, lfilter, [1.0], [1.0], [1.0, None, 2.0]) assert_raises(TypeError, lfilter, [1.0], [None], [1.0, 2.0, 3.0]) assert_raises(TypeError, lfilter, [None], [1.0], [1.0, 2.0, 3.0]) def test_lfilter_notimplemented_input(): # Should not crash, gh-7991 assert_raises(NotImplementedError, lfilter, [2,3], [4,5], [1,2,3,4,5]) @pytest.mark.parametrize('dt', [np.ubyte, np.byte, np.ushort, np.short, np.uint, int, np.ulonglong, np.ulonglong, np.float32, np.float64, np.longdouble, Decimal]) class TestCorrelateReal: def _setup_rank1(self, dt): a = np.linspace(0, 3, 4).astype(dt) b = np.linspace(1, 2, 2).astype(dt) y_r = np.array([0, 2, 5, 8, 3]).astype(dt) return a, b, y_r def equal_tolerance(self, res_dt): # default value of keyword decimal = 6 try: dt_info = np.finfo(res_dt) if hasattr(dt_info, 'resolution'): decimal = int(-0.5*np.log10(dt_info.resolution)) except Exception: pass return decimal def equal_tolerance_fft(self, res_dt): # FFT implementations convert longdouble arguments down to # double so don't expect better precision, see gh-9520 if res_dt == np.longdouble: return self.equal_tolerance(np.double) else: return self.equal_tolerance(res_dt) def test_method(self, dt): if dt == Decimal: method = choose_conv_method([Decimal(4)], [Decimal(3)]) assert_equal(method, 'direct') else: a, b, y_r = self._setup_rank3(dt) y_fft = correlate(a, b, method='fft') y_direct = correlate(a, b, method='direct') assert_array_almost_equal(y_r, y_fft, decimal=self.equal_tolerance_fft(y_fft.dtype)) assert_array_almost_equal(y_r, y_direct, decimal=self.equal_tolerance(y_direct.dtype)) assert_equal(y_fft.dtype, dt) assert_equal(y_direct.dtype, dt) def test_rank1_valid(self, dt): a, b, y_r = self._setup_rank1(dt) y = correlate(a, b, 'valid') assert_array_almost_equal(y, y_r[1:4]) assert_equal(y.dtype, dt) # See gh-5897 y = correlate(b, a, 'valid') assert_array_almost_equal(y, y_r[1:4][::-1]) assert_equal(y.dtype, dt) def test_rank1_same(self, dt): a, b, y_r = self._setup_rank1(dt) y = correlate(a, b, 'same') assert_array_almost_equal(y, y_r[:-1]) assert_equal(y.dtype, dt) def test_rank1_full(self, dt): a, b, y_r = self._setup_rank1(dt) y = correlate(a, b, 'full') assert_array_almost_equal(y, y_r) assert_equal(y.dtype, dt) def _setup_rank3(self, dt): a = np.linspace(0, 39, 40).reshape((2, 4, 5), order='F').astype( dt) b = np.linspace(0, 23, 24).reshape((2, 3, 4), order='F').astype( dt) y_r = array([[[0., 184., 504., 912., 1360., 888., 472., 160.], [46., 432., 1062., 1840., 2672., 1698., 864., 266.], [134., 736., 1662., 2768., 3920., 2418., 1168., 314.], [260., 952., 1932., 3056., 4208., 2580., 1240., 332.], [202., 664., 1290., 1984., 2688., 1590., 712., 150.], [114., 344., 642., 960., 1280., 726., 296., 38.]], [[23., 400., 1035., 1832., 2696., 1737., 904., 293.], [134., 920., 2166., 3680., 5280., 3306., 1640., 474.], [325., 1544., 3369., 5512., 7720., 4683., 2192., 535.], [571., 1964., 3891., 6064., 8272., 4989., 2324., 565.], [434., 1360., 2586., 3920., 5264., 3054., 1312., 230.], [241., 700., 1281., 1888., 2496., 1383., 532., 39.]], [[22., 214., 528., 916., 1332., 846., 430., 132.], [86., 484., 1098., 1832., 2600., 1602., 772., 206.], [188., 802., 1698., 2732., 3788., 2256., 1018., 218.], [308., 1006., 1950., 2996., 4052., 2400., 1078., 230.], [230., 692., 1290., 1928., 2568., 1458., 596., 78.], [126., 354., 636., 924., 1212., 654., 234., 0.]]], dtype=dt) return a, b, y_r def test_rank3_valid(self, dt): a, b, y_r = self._setup_rank3(dt) y = correlate(a, b, "valid") assert_array_almost_equal(y, y_r[1:2, 2:4, 3:5]) assert_equal(y.dtype, dt) # See gh-5897 y = correlate(b, a, "valid") assert_array_almost_equal(y, y_r[1:2, 2:4, 3:5][::-1, ::-1, ::-1]) assert_equal(y.dtype, dt) def test_rank3_same(self, dt): a, b, y_r = self._setup_rank3(dt) y = correlate(a, b, "same") assert_array_almost_equal(y, y_r[0:-1, 1:-1, 1:-2]) assert_equal(y.dtype, dt) def test_rank3_all(self, dt): a, b, y_r = self._setup_rank3(dt) y = correlate(a, b) assert_array_almost_equal(y, y_r) assert_equal(y.dtype, dt) class TestCorrelate: # Tests that don't depend on dtype def test_invalid_shapes(self): # By "invalid," we mean that no one # array has dimensions that are all at # least as large as the corresponding # dimensions of the other array. This # setup should throw a ValueError. a = np.arange(1, 7).reshape((2, 3)) b = np.arange(-6, 0).reshape((3, 2)) assert_raises(ValueError, correlate, *(a, b), **{'mode': 'valid'}) assert_raises(ValueError, correlate, *(b, a), **{'mode': 'valid'}) def test_invalid_params(self): a = [3, 4, 5] b = [1, 2, 3] assert_raises(ValueError, correlate, a, b, mode='spam') assert_raises(ValueError, correlate, a, b, mode='eggs', method='fft') assert_raises(ValueError, correlate, a, b, mode='ham', method='direct') assert_raises(ValueError, correlate, a, b, mode='full', method='bacon') assert_raises(ValueError, correlate, a, b, mode='same', method='bacon') def test_mismatched_dims(self): # Input arrays should have the same number of dimensions assert_raises(ValueError, correlate, [1], 2, method='direct') assert_raises(ValueError, correlate, 1, [2], method='direct') assert_raises(ValueError, correlate, [1], 2, method='fft') assert_raises(ValueError, correlate, 1, [2], method='fft') assert_raises(ValueError, correlate, [1], [[2]]) assert_raises(ValueError, correlate, [3], 2) def test_numpy_fastpath(self): a = [1, 2, 3] b = [4, 5] assert_allclose(correlate(a, b, mode='same'), [5, 14, 23]) a = [1, 2, 3] b = [4, 5, 6] assert_allclose(correlate(a, b, mode='same'), [17, 32, 23]) assert_allclose(correlate(a, b, mode='full'), [6, 17, 32, 23, 12]) assert_allclose(correlate(a, b, mode='valid'), [32]) @pytest.mark.parametrize("mode", ["valid", "same", "full"]) @pytest.mark.parametrize("behind", [True, False]) @pytest.mark.parametrize("input_size", [100, 101, 1000, 1001, 10000, 10001]) def test_correlation_lags(mode, behind, input_size): # generate random data rng = np.random.RandomState(0) in1 = rng.standard_normal(input_size) offset = int(input_size/10) # generate offset version of array to correlate with if behind: # y is behind x in2 = np.concatenate([rng.standard_normal(offset), in1]) expected = -offset else: # y is ahead of x in2 = in1[offset:] expected = offset # cross correlate, returning lag information correlation = correlate(in1, in2, mode=mode) lags = correlation_lags(in1.size, in2.size, mode=mode) # identify the peak lag_index = np.argmax(correlation) # Check as expected assert_equal(lags[lag_index], expected) # Correlation and lags shape should match assert_equal(lags.shape, correlation.shape) @pytest.mark.parametrize('dt', [np.csingle, np.cdouble, np.clongdouble]) class TestCorrelateComplex: # The decimal precision to be used for comparing results. # This value will be passed as the 'decimal' keyword argument of # assert_array_almost_equal(). # Since correlate may chose to use FFT method which converts # longdoubles to doubles internally don't expect better precision # for longdouble than for double (see gh-9520). def decimal(self, dt): if dt == np.clongdouble: dt = np.cdouble return int(2 * np.finfo(dt).precision / 3) def _setup_rank1(self, dt, mode): np.random.seed(9) a = np.random.randn(10).astype(dt) a += 1j * np.random.randn(10).astype(dt) b = np.random.randn(8).astype(dt) b += 1j * np.random.randn(8).astype(dt) y_r = (correlate(a.real, b.real, mode=mode) + correlate(a.imag, b.imag, mode=mode)).astype(dt) y_r += 1j * (-correlate(a.real, b.imag, mode=mode) + correlate(a.imag, b.real, mode=mode)) return a, b, y_r def test_rank1_valid(self, dt): a, b, y_r = self._setup_rank1(dt, 'valid') y = correlate(a, b, 'valid') assert_array_almost_equal(y, y_r, decimal=self.decimal(dt)) assert_equal(y.dtype, dt) # See gh-5897 y = correlate(b, a, 'valid') assert_array_almost_equal(y, y_r[::-1].conj(), decimal=self.decimal(dt)) assert_equal(y.dtype, dt) def test_rank1_same(self, dt): a, b, y_r = self._setup_rank1(dt, 'same') y = correlate(a, b, 'same') assert_array_almost_equal(y, y_r, decimal=self.decimal(dt)) assert_equal(y.dtype, dt) def test_rank1_full(self, dt): a, b, y_r = self._setup_rank1(dt, 'full') y = correlate(a, b, 'full') assert_array_almost_equal(y, y_r, decimal=self.decimal(dt)) assert_equal(y.dtype, dt) def test_swap_full(self, dt): d = np.array([0.+0.j, 1.+1.j, 2.+2.j], dtype=dt) k = np.array([1.+3.j, 2.+4.j, 3.+5.j, 4.+6.j], dtype=dt) y = correlate(d, k) assert_equal(y, [0.+0.j, 10.-2.j, 28.-6.j, 22.-6.j, 16.-6.j, 8.-4.j]) def test_swap_same(self, dt): d = [0.+0.j, 1.+1.j, 2.+2.j] k = [1.+3.j, 2.+4.j, 3.+5.j, 4.+6.j] y = correlate(d, k, mode="same") assert_equal(y, [10.-2.j, 28.-6.j, 22.-6.j]) def test_rank3(self, dt): a = np.random.randn(10, 8, 6).astype(dt) a += 1j * np.random.randn(10, 8, 6).astype(dt) b = np.random.randn(8, 6, 4).astype(dt) b += 1j * np.random.randn(8, 6, 4).astype(dt) y_r = (correlate(a.real, b.real) + correlate(a.imag, b.imag)).astype(dt) y_r += 1j * (-correlate(a.real, b.imag) + correlate(a.imag, b.real)) y = correlate(a, b, 'full') assert_array_almost_equal(y, y_r, decimal=self.decimal(dt) - 1) assert_equal(y.dtype, dt) def test_rank0(self, dt): a = np.array(np.random.randn()).astype(dt) a += 1j * np.array(np.random.randn()).astype(dt) b = np.array(np.random.randn()).astype(dt) b += 1j * np.array(np.random.randn()).astype(dt) y_r = (correlate(a.real, b.real) + correlate(a.imag, b.imag)).astype(dt) y_r += 1j * np.array(-correlate(a.real, b.imag) + correlate(a.imag, b.real)) y = correlate(a, b, 'full') assert_array_almost_equal(y, y_r, decimal=self.decimal(dt) - 1) assert_equal(y.dtype, dt) assert_equal(correlate([1], [2j]), correlate(1, 2j)) assert_equal(correlate([2j], [3j]), correlate(2j, 3j)) assert_equal(correlate([3j], [4]), correlate(3j, 4)) class TestCorrelate2d: def test_consistency_correlate_funcs(self): # Compare np.correlate, signal.correlate, signal.correlate2d a = np.arange(5) b = np.array([3.2, 1.4, 3]) for mode in ['full', 'valid', 'same']: assert_almost_equal(np.correlate(a, b, mode=mode), signal.correlate(a, b, mode=mode)) assert_almost_equal(np.squeeze(signal.correlate2d([a], [b], mode=mode)), signal.correlate(a, b, mode=mode)) # See gh-5897 if mode == 'valid': assert_almost_equal(np.correlate(b, a, mode=mode), signal.correlate(b, a, mode=mode)) assert_almost_equal(np.squeeze(signal.correlate2d([b], [a], mode=mode)), signal.correlate(b, a, mode=mode)) def test_invalid_shapes(self): # By "invalid," we mean that no one # array has dimensions that are all at # least as large as the corresponding # dimensions of the other array. This # setup should throw a ValueError. a = np.arange(1, 7).reshape((2, 3)) b = np.arange(-6, 0).reshape((3, 2)) assert_raises(ValueError, signal.correlate2d, *(a, b), **{'mode': 'valid'}) assert_raises(ValueError, signal.correlate2d, *(b, a), **{'mode': 'valid'}) def test_complex_input(self): assert_equal(signal.correlate2d([[1]], [[2j]]), -2j) assert_equal(signal.correlate2d([[2j]], [[3j]]), 6) assert_equal(signal.correlate2d([[3j]], [[4]]), 12j) class TestLFilterZI: def test_basic(self): a = np.array([1.0, -1.0, 0.5]) b = np.array([1.0, 0.0, 2.0]) zi_expected = np.array([5.0, -1.0]) zi = lfilter_zi(b, a) assert_array_almost_equal(zi, zi_expected) def test_scale_invariance(self): # Regression test. There was a bug in which b was not correctly # rescaled when a[0] was nonzero. b = np.array([2, 8, 5]) a = np.array([1, 1, 8]) zi1 = lfilter_zi(b, a) zi2 = lfilter_zi(2*b, 2*a) assert_allclose(zi2, zi1, rtol=1e-12) @pytest.mark.parametrize('dtype', [np.float32, np.float64]) def test_types(self, dtype): b = np.zeros((8), dtype=dtype) a = np.array([1], dtype=dtype) assert_equal(np.real(signal.lfilter_zi(b, a)).dtype, dtype) class TestFiltFilt: filtfilt_kind = 'tf' def filtfilt(self, zpk, x, axis=-1, padtype='odd', padlen=None, method='pad', irlen=None): if self.filtfilt_kind == 'tf': b, a = zpk2tf(*zpk) return filtfilt(b, a, x, axis, padtype, padlen, method, irlen) elif self.filtfilt_kind == 'sos': sos = zpk2sos(*zpk) return sosfiltfilt(sos, x, axis, padtype, padlen) def test_basic(self): zpk = tf2zpk([1, 2, 3], [1, 2, 3]) out = self.filtfilt(zpk, np.arange(12)) assert_allclose(out, arange(12), atol=5.28e-11) def test_sine(self): rate = 2000 t = np.linspace(0, 1.0, rate + 1) # A signal with low frequency and a high frequency. xlow = np.sin(5 * 2 * np.pi * t) xhigh = np.sin(250 * 2 * np.pi * t) x = xlow + xhigh zpk = butter(8, 0.125, output='zpk') # r is the magnitude of the largest pole. r = np.abs(zpk[1]).max() eps = 1e-5 # n estimates the number of steps for the # transient to decay by a factor of eps. n = int(np.ceil(np.log(eps) / np.log(r))) # High order lowpass filter... y = self.filtfilt(zpk, x, padlen=n) # Result should be just xlow. err = np.abs(y - xlow).max() assert_(err < 1e-4) # A 2D case. x2d = np.vstack([xlow, xlow + xhigh]) y2d = self.filtfilt(zpk, x2d, padlen=n, axis=1) assert_equal(y2d.shape, x2d.shape) err = np.abs(y2d - xlow).max() assert_(err < 1e-4) # Use the previous result to check the use of the axis keyword. # (Regression test for ticket #1620) y2dt = self.filtfilt(zpk, x2d.T, padlen=n, axis=0) assert_equal(y2d, y2dt.T) def test_axis(self): # Test the 'axis' keyword on a 3D array. x = np.arange(10.0 * 11.0 * 12.0).reshape(10, 11, 12) zpk = butter(3, 0.125, output='zpk') y0 = self.filtfilt(zpk, x, padlen=0, axis=0) y1 = self.filtfilt(zpk, np.swapaxes(x, 0, 1), padlen=0, axis=1) assert_array_equal(y0, np.swapaxes(y1, 0, 1)) y2 = self.filtfilt(zpk, np.swapaxes(x, 0, 2), padlen=0, axis=2) assert_array_equal(y0, np.swapaxes(y2, 0, 2)) def test_acoeff(self): if self.filtfilt_kind != 'tf': return # only necessary for TF # test for 'a' coefficient as single number out = signal.filtfilt([.5, .5], 1, np.arange(10)) assert_allclose(out, np.arange(10), rtol=1e-14, atol=1e-14) def test_gust_simple(self): if self.filtfilt_kind != 'tf': pytest.skip('gust only implemented for TF systems') # The input array has length 2. The exact solution for this case # was computed "by hand". x = np.array([1.0, 2.0]) b = np.array([0.5]) a = np.array([1.0, -0.5]) y, z1, z2 = _filtfilt_gust(b, a, x) assert_allclose([z1[0], z2[0]], [0.3*x[0] + 0.2*x[1], 0.2*x[0] + 0.3*x[1]]) assert_allclose(y, [z1[0] + 0.25*z2[0] + 0.25*x[0] + 0.125*x[1], 0.25*z1[0] + z2[0] + 0.125*x[0] + 0.25*x[1]]) def test_gust_scalars(self): if self.filtfilt_kind != 'tf': pytest.skip('gust only implemented for TF systems') # The filter coefficients are both scalars, so the filter simply # multiplies its input by b/a. When it is used in filtfilt, the # factor is (b/a)**2. x = np.arange(12) b = 3.0 a = 2.0 y = filtfilt(b, a, x, method="gust") expected = (b/a)**2 * x assert_allclose(y, expected) class TestSOSFiltFilt(TestFiltFilt): filtfilt_kind = 'sos' def test_equivalence(self): """Test equivalence between sosfiltfilt and filtfilt""" x = np.random.RandomState(0).randn(1000) for order in range(1, 6): zpk = signal.butter(order, 0.35, output='zpk') b, a = zpk2tf(*zpk) sos = zpk2sos(*zpk) y = filtfilt(b, a, x) y_sos = sosfiltfilt(sos, x) assert_allclose(y, y_sos, atol=1e-12, err_msg='order=%s' % order) def filtfilt_gust_opt(b, a, x): """ An alternative implementation of filtfilt with Gustafsson edges. This function computes the same result as `scipy.signal._signaltools._filtfilt_gust`, but only 1-d arrays are accepted. The problem is solved using `fmin` from `scipy.optimize`. `_filtfilt_gust` is significanly faster than this implementation. """ def filtfilt_gust_opt_func(ics, b, a, x): """Objective function used in filtfilt_gust_opt.""" m = max(len(a), len(b)) - 1 z0f = ics[:m] z0b = ics[m:] y_f = lfilter(b, a, x, zi=z0f)[0] y_fb = lfilter(b, a, y_f[::-1], zi=z0b)[0][::-1] y_b = lfilter(b, a, x[::-1], zi=z0b)[0][::-1] y_bf = lfilter(b, a, y_b, zi=z0f)[0] value = np.sum((y_fb - y_bf)**2) return value m = max(len(a), len(b)) - 1 zi = lfilter_zi(b, a) ics = np.concatenate((x[:m].mean()*zi, x[-m:].mean()*zi)) result = fmin(filtfilt_gust_opt_func, ics, args=(b, a, x), xtol=1e-10, ftol=1e-12, maxfun=10000, maxiter=10000, full_output=True, disp=False) opt, fopt, niter, funcalls, warnflag = result if warnflag > 0: raise RuntimeError("minimization failed in filtfilt_gust_opt: " "warnflag=%d" % warnflag) z0f = opt[:m] z0b = opt[m:] # Apply the forward-backward filter using the computed initial # conditions. y_b = lfilter(b, a, x[::-1], zi=z0b)[0][::-1] y = lfilter(b, a, y_b, zi=z0f)[0] return y, z0f, z0b def check_filtfilt_gust(b, a, shape, axis, irlen=None): # Generate x, the data to be filtered. np.random.seed(123) x = np.random.randn(*shape) # Apply filtfilt to x. This is the main calculation to be checked. y = filtfilt(b, a, x, axis=axis, method="gust", irlen=irlen) # Also call the private function so we can test the ICs. yg, zg1, zg2 = _filtfilt_gust(b, a, x, axis=axis, irlen=irlen) # filtfilt_gust_opt is an independent implementation that gives the # expected result, but it only handles 1-D arrays, so use some looping # and reshaping shenanigans to create the expected output arrays. xx = np.swapaxes(x, axis, -1) out_shape = xx.shape[:-1] yo = np.empty_like(xx) m = max(len(a), len(b)) - 1 zo1 = np.empty(out_shape + (m,)) zo2 = np.empty(out_shape + (m,)) for indx in product(*[range(d) for d in out_shape]): yo[indx], zo1[indx], zo2[indx] = filtfilt_gust_opt(b, a, xx[indx]) yo = np.swapaxes(yo, -1, axis) zo1 = np.swapaxes(zo1, -1, axis) zo2 = np.swapaxes(zo2, -1, axis) assert_allclose(y, yo, rtol=1e-8, atol=1e-9) assert_allclose(yg, yo, rtol=1e-8, atol=1e-9) assert_allclose(zg1, zo1, rtol=1e-8, atol=1e-9) assert_allclose(zg2, zo2, rtol=1e-8, atol=1e-9) def test_choose_conv_method(): for mode in ['valid', 'same', 'full']: for ndim in [1, 2]: n, k, true_method = 8, 6, 'direct' x = np.random.randn(*((n,) * ndim)) h = np.random.randn(*((k,) * ndim)) method = choose_conv_method(x, h, mode=mode) assert_equal(method, true_method) method_try, times = choose_conv_method(x, h, mode=mode, measure=True) assert_(method_try in {'fft', 'direct'}) assert_(type(times) is dict) assert_('fft' in times.keys() and 'direct' in times.keys()) n = 10 for not_fft_conv_supp in ["complex256", "complex192"]: if hasattr(np, not_fft_conv_supp): x = np.ones(n, dtype=not_fft_conv_supp) h = x.copy() assert_equal(choose_conv_method(x, h, mode=mode), 'direct') x = np.array([2**51], dtype=np.int64) h = x.copy() assert_equal(choose_conv_method(x, h, mode=mode), 'direct') x = [Decimal(3), Decimal(2)] h = [Decimal(1), Decimal(4)] assert_equal(choose_conv_method(x, h, mode=mode), 'direct') def test_filtfilt_gust(): # Design a filter. z, p, k = signal.ellip(3, 0.01, 120, 0.0875, output='zpk') # Find the approximate impulse response length of the filter. eps = 1e-10 r = np.max(np.abs(p)) approx_impulse_len = int(np.ceil(np.log(eps) / np.log(r))) np.random.seed(123) b, a = zpk2tf(z, p, k) for irlen in [None, approx_impulse_len]: signal_len = 5 * approx_impulse_len # 1-d test case check_filtfilt_gust(b, a, (signal_len,), 0, irlen) # 3-d test case; test each axis. for axis in range(3): shape = [2, 2, 2] shape[axis] = signal_len check_filtfilt_gust(b, a, shape, axis, irlen) # Test case with length less than 2*approx_impulse_len. # In this case, `filtfilt_gust` should behave the same as if # `irlen=None` was given. length = 2*approx_impulse_len - 50 check_filtfilt_gust(b, a, (length,), 0, approx_impulse_len) class TestDecimate: def test_bad_args(self): x = np.arange(12) assert_raises(TypeError, signal.decimate, x, q=0.5, n=1) assert_raises(TypeError, signal.decimate, x, q=2, n=0.5) def test_basic_IIR(self): x = np.arange(12) y = signal.decimate(x, 2, n=1, ftype='iir', zero_phase=False).round() assert_array_equal(y, x[::2]) def test_basic_FIR(self): x = np.arange(12) y = signal.decimate(x, 2, n=1, ftype='fir', zero_phase=False).round() assert_array_equal(y, x[::2]) def test_shape(self): # Regression test for ticket #1480. z = np.zeros((30, 30)) d0 = signal.decimate(z, 2, axis=0, zero_phase=False) assert_equal(d0.shape, (15, 30)) d1 = signal.decimate(z, 2, axis=1, zero_phase=False) assert_equal(d1.shape, (30, 15)) def test_phaseshift_FIR(self): with suppress_warnings() as sup: sup.filter(BadCoefficients, "Badly conditioned filter") self._test_phaseshift(method='fir', zero_phase=False) def test_zero_phase_FIR(self): with suppress_warnings() as sup: sup.filter(BadCoefficients, "Badly conditioned filter") self._test_phaseshift(method='fir', zero_phase=True) def test_phaseshift_IIR(self): self._test_phaseshift(method='iir', zero_phase=False) def test_zero_phase_IIR(self): self._test_phaseshift(method='iir', zero_phase=True) def _test_phaseshift(self, method, zero_phase): rate = 120 rates_to = [15, 20, 30, 40] # q = 8, 6, 4, 3 t_tot = int(100) # Need to let antialiasing filters settle t = np.arange(rate*t_tot+1) / float(rate) # Sinusoids at 0.8*nyquist, windowed to avoid edge artifacts freqs = np.array(rates_to) * 0.8 / 2 d = (np.exp(1j * 2 * np.pi * freqs[:, np.newaxis] * t) * signal.windows.tukey(t.size, 0.1)) for rate_to in rates_to: q = rate // rate_to t_to = np.arange(rate_to*t_tot+1) / float(rate_to) d_tos = (np.exp(1j * 2 * np.pi * freqs[:, np.newaxis] * t_to) * signal.windows.tukey(t_to.size, 0.1)) # Set up downsampling filters, match v0.17 defaults if method == 'fir': n = 30 system = signal.dlti(signal.firwin(n + 1, 1. / q, window='hamming'), 1.) elif method == 'iir': n = 8 wc = 0.8*np.pi/q system = signal.dlti(*signal.cheby1(n, 0.05, wc/np.pi)) # Calculate expected phase response, as unit complex vector if zero_phase is False: _, h_resps = signal.freqz(system.num, system.den, freqs/rate*2*np.pi) h_resps /= np.abs(h_resps) else: h_resps = np.ones_like(freqs) y_resamps = signal.decimate(d.real, q, n, ftype=system, zero_phase=zero_phase) # Get phase from complex inner product, like CSD h_resamps = np.sum(d_tos.conj() * y_resamps, axis=-1) h_resamps /= np.abs(h_resamps) subnyq = freqs < 0.5*rate_to # Complex vectors should be aligned, only compare below nyquist assert_allclose(np.angle(h_resps.conj()*h_resamps)[subnyq], 0, atol=1e-3, rtol=1e-3) def test_auto_n(self): # Test that our value of n is a reasonable choice (depends on # the downsampling factor) sfreq = 100. n = 1000 t = np.arange(n) / sfreq # will alias for decimations (>= 15) x = np.sqrt(2. / n) * np.sin(2 * np.pi * (sfreq / 30.) * t) assert_allclose(np.linalg.norm(x), 1., rtol=1e-3) x_out = signal.decimate(x, 30, ftype='fir') assert_array_less(np.linalg.norm(x_out), 0.01) def test_long_float32(self): # regression: gh-15072. With 32-bit float and either lfilter # or filtfilt, this is numerically unstable x = signal.decimate(np.ones(10_000, dtype=np.float32), 10) assert not any(np.isnan(x)) def test_float16_upcast(self): # float16 must be upcast to float64 x = signal.decimate(np.ones(100, dtype=np.float16), 10) assert x.dtype.type == np.float64 def test_complex_iir_dlti(self): # regression: gh-17845 # centre frequency for filter [Hz] fcentre = 50 # filter passband width [Hz] fwidth = 5 # sample rate [Hz] fs = 1e3 z, p, k = signal.butter(2, 2*np.pi*fwidth/2, output='zpk', fs=fs) z = z.astype(complex) * np.exp(2j * np.pi * fcentre/fs) p = p.astype(complex) * np.exp(2j * np.pi * fcentre/fs) system = signal.dlti(z, p, k) t = np.arange(200) / fs # input u = (np.exp(2j * np.pi * fcentre * t) + 0.5 * np.exp(-2j * np.pi * fcentre * t)) ynzp = signal.decimate(u, 2, ftype=system, zero_phase=False) ynzpref = signal.lfilter(*signal.zpk2tf(z, p, k), u)[::2] assert_equal(ynzp, ynzpref) yzp = signal.decimate(u, 2, ftype=system, zero_phase=True) yzpref = signal.filtfilt(*signal.zpk2tf(z, p, k), u)[::2] assert_allclose(yzp, yzpref, rtol=1e-10, atol=1e-13) def test_complex_fir_dlti(self): # centre frequency for filter [Hz] fcentre = 50 # filter passband width [Hz] fwidth = 5 # sample rate [Hz] fs = 1e3 numtaps = 20 # FIR filter about 0Hz bbase = signal.firwin(numtaps, fwidth/2, fs=fs) # rotate these to desired frequency zbase = np.roots(bbase) zrot = zbase * np.exp(2j * np.pi * fcentre/fs) # FIR filter about 50Hz, maintaining passband gain of 0dB bz = bbase[0] * np.poly(zrot) system = signal.dlti(bz, 1) t = np.arange(200) / fs # input u = (np.exp(2j * np.pi * fcentre * t) + 0.5 * np.exp(-2j * np.pi * fcentre * t)) ynzp = signal.decimate(u, 2, ftype=system, zero_phase=False) ynzpref = signal.upfirdn(bz, u, up=1, down=2)[:100] assert_equal(ynzp, ynzpref) yzp = signal.decimate(u, 2, ftype=system, zero_phase=True) yzpref = signal.resample_poly(u, 1, 2, window=bz) assert_equal(yzp, yzpref) class TestHilbert: def test_bad_args(self): x = np.array([1.0 + 0.0j]) assert_raises(ValueError, hilbert, x) x = np.arange(8.0) assert_raises(ValueError, hilbert, x, N=0) def test_hilbert_theoretical(self): # test cases by Ariel Rokem decimal = 14 pi = np.pi t = np.arange(0, 2 * pi, pi / 256) a0 = np.sin(t) a1 = np.cos(t) a2 = np.sin(2 * t) a3 = np.cos(2 * t) a = np.vstack([a0, a1, a2, a3]) h = hilbert(a) h_abs = np.abs(h) h_angle = np.angle(h) h_real = np.real(h) # The real part should be equal to the original signals: assert_almost_equal(h_real, a, decimal) # The absolute value should be one everywhere, for this input: assert_almost_equal(h_abs, np.ones(a.shape), decimal) # For the 'slow' sine - the phase should go from -pi/2 to pi/2 in # the first 256 bins: assert_almost_equal(h_angle[0, :256], np.arange(-pi / 2, pi / 2, pi / 256), decimal) # For the 'slow' cosine - the phase should go from 0 to pi in the # same interval: assert_almost_equal( h_angle[1, :256], np.arange(0, pi, pi / 256), decimal) # The 'fast' sine should make this phase transition in half the time: assert_almost_equal(h_angle[2, :128], np.arange(-pi / 2, pi / 2, pi / 128), decimal) # Ditto for the 'fast' cosine: assert_almost_equal( h_angle[3, :128], np.arange(0, pi, pi / 128), decimal) # The imaginary part of hilbert(cos(t)) = sin(t) Wikipedia assert_almost_equal(h[1].imag, a0, decimal) def test_hilbert_axisN(self): # tests for axis and N arguments a = np.arange(18).reshape(3, 6) # test axis aa = hilbert(a, axis=-1) assert_equal(hilbert(a.T, axis=0), aa.T) # test 1d assert_almost_equal(hilbert(a[0]), aa[0], 14) # test N aan = hilbert(a, N=20, axis=-1) assert_equal(aan.shape, [3, 20]) assert_equal(hilbert(a.T, N=20, axis=0).shape, [20, 3]) # the next test is just a regression test, # no idea whether numbers make sense a0hilb = np.array([0.000000000000000e+00 - 1.72015830311905j, 1.000000000000000e+00 - 2.047794505137069j, 1.999999999999999e+00 - 2.244055555687583j, 3.000000000000000e+00 - 1.262750302935009j, 4.000000000000000e+00 - 1.066489252384493j, 5.000000000000000e+00 + 2.918022706971047j, 8.881784197001253e-17 + 3.845658908989067j, -9.444121133484362e-17 + 0.985044202202061j, -1.776356839400251e-16 + 1.332257797702019j, -3.996802888650564e-16 + 0.501905089898885j, 1.332267629550188e-16 + 0.668696078880782j, -1.192678053963799e-16 + 0.235487067862679j, -1.776356839400251e-16 + 0.286439612812121j, 3.108624468950438e-16 + 0.031676888064907j, 1.332267629550188e-16 - 0.019275656884536j, -2.360035624836702e-16 - 0.1652588660287j, 0.000000000000000e+00 - 0.332049855010597j, 3.552713678800501e-16 - 0.403810179797771j, 8.881784197001253e-17 - 0.751023775297729j, 9.444121133484362e-17 - 0.79252210110103j]) assert_almost_equal(aan[0], a0hilb, 14, 'N regression') @pytest.mark.parametrize('dtype', [np.float32, np.float64]) def test_hilbert_types(self, dtype): in_typed = np.zeros(8, dtype=dtype) assert_equal(np.real(signal.hilbert(in_typed)).dtype, dtype) class TestHilbert2: def test_bad_args(self): # x must be real. x = np.array([[1.0 + 0.0j]]) assert_raises(ValueError, hilbert2, x) # x must be rank 2. x = np.arange(24).reshape(2, 3, 4) assert_raises(ValueError, hilbert2, x) # Bad value for N. x = np.arange(16).reshape(4, 4) assert_raises(ValueError, hilbert2, x, N=0) assert_raises(ValueError, hilbert2, x, N=(2, 0)) assert_raises(ValueError, hilbert2, x, N=(2,)) @pytest.mark.parametrize('dtype', [np.float32, np.float64]) def test_hilbert2_types(self, dtype): in_typed = np.zeros((2, 32), dtype=dtype) assert_equal(np.real(signal.hilbert2(in_typed)).dtype, dtype) class TestPartialFractionExpansion: @staticmethod def assert_rp_almost_equal(r, p, r_true, p_true, decimal=7): r_true = np.asarray(r_true) p_true = np.asarray(p_true) distance = np.hypot(abs(p[:, None] - p_true), abs(r[:, None] - r_true)) rows, cols = linear_sum_assignment(distance) assert_almost_equal(p[rows], p_true[cols], decimal=decimal) assert_almost_equal(r[rows], r_true[cols], decimal=decimal) def test_compute_factors(self): factors, poly = _compute_factors([1, 2, 3], [3, 2, 1]) assert_equal(len(factors), 3) assert_almost_equal(factors[0], np.poly([2, 2, 3])) assert_almost_equal(factors[1], np.poly([1, 1, 1, 3])) assert_almost_equal(factors[2], np.poly([1, 1, 1, 2, 2])) assert_almost_equal(poly, np.poly([1, 1, 1, 2, 2, 3])) factors, poly = _compute_factors([1, 2, 3], [3, 2, 1], include_powers=True) assert_equal(len(factors), 6) assert_almost_equal(factors[0], np.poly([1, 1, 2, 2, 3])) assert_almost_equal(factors[1], np.poly([1, 2, 2, 3])) assert_almost_equal(factors[2], np.poly([2, 2, 3])) assert_almost_equal(factors[3], np.poly([1, 1, 1, 2, 3])) assert_almost_equal(factors[4], np.poly([1, 1, 1, 3])) assert_almost_equal(factors[5], np.poly([1, 1, 1, 2, 2])) assert_almost_equal(poly, np.poly([1, 1, 1, 2, 2, 3])) def test_group_poles(self): unique, multiplicity = _group_poles( [1.0, 1.001, 1.003, 2.0, 2.003, 3.0], 0.1, 'min') assert_equal(unique, [1.0, 2.0, 3.0]) assert_equal(multiplicity, [3, 2, 1]) def test_residue_general(self): # Test are taken from issue #4464, note that poles in scipy are # in increasing by absolute value order, opposite to MATLAB. r, p, k = residue([5, 3, -2, 7], [-4, 0, 8, 3]) assert_almost_equal(r, [1.3320, -0.6653, -1.4167], decimal=4) assert_almost_equal(p, [-0.4093, -1.1644, 1.5737], decimal=4) assert_almost_equal(k, [-1.2500], decimal=4) r, p, k = residue([-4, 8], [1, 6, 8]) assert_almost_equal(r, [8, -12]) assert_almost_equal(p, [-2, -4]) assert_equal(k.size, 0) r, p, k = residue([4, 1], [1, -1, -2]) assert_almost_equal(r, [1, 3]) assert_almost_equal(p, [-1, 2]) assert_equal(k.size, 0) r, p, k = residue([4, 3], [2, -3.4, 1.98, -0.406]) self.assert_rp_almost_equal( r, p, [-18.125 - 13.125j, -18.125 + 13.125j, 36.25], [0.5 - 0.2j, 0.5 + 0.2j, 0.7]) assert_equal(k.size, 0) r, p, k = residue([2, 1], [1, 5, 8, 4]) self.assert_rp_almost_equal(r, p, [-1, 1, 3], [-1, -2, -2]) assert_equal(k.size, 0) r, p, k = residue([3, -1.1, 0.88, -2.396, 1.348], [1, -0.7, -0.14, 0.048]) assert_almost_equal(r, [-3, 4, 1]) assert_almost_equal(p, [0.2, -0.3, 0.8]) assert_almost_equal(k, [3, 1]) r, p, k = residue([1], [1, 2, -3]) assert_almost_equal(r, [0.25, -0.25]) assert_almost_equal(p, [1, -3]) assert_equal(k.size, 0) r, p, k = residue([1, 0, -5], [1, 0, 0, 0, -1]) self.assert_rp_almost_equal(r, p, [1, 1.5j, -1.5j, -1], [-1, -1j, 1j, 1]) assert_equal(k.size, 0) r, p, k = residue([3, 8, 6], [1, 3, 3, 1]) self.assert_rp_almost_equal(r, p, [1, 2, 3], [-1, -1, -1]) assert_equal(k.size, 0) r, p, k = residue([3, -1], [1, -3, 2]) assert_almost_equal(r, [-2, 5]) assert_almost_equal(p, [1, 2]) assert_equal(k.size, 0) r, p, k = residue([2, 3, -1], [1, -3, 2]) assert_almost_equal(r, [-4, 13]) assert_almost_equal(p, [1, 2]) assert_almost_equal(k, [2]) r, p, k = residue([7, 2, 3, -1], [1, -3, 2]) assert_almost_equal(r, [-11, 69]) assert_almost_equal(p, [1, 2]) assert_almost_equal(k, [7, 23]) r, p, k = residue([2, 3, -1], [1, -3, 4, -2]) self.assert_rp_almost_equal(r, p, [4, -1 + 3.5j, -1 - 3.5j], [1, 1 - 1j, 1 + 1j]) assert_almost_equal(k.size, 0) def test_residue_leading_zeros(self): # Leading zeros in numerator or denominator must not affect the answer. r0, p0, k0 = residue([5, 3, -2, 7], [-4, 0, 8, 3]) r1, p1, k1 = residue([0, 5, 3, -2, 7], [-4, 0, 8, 3]) r2, p2, k2 = residue([5, 3, -2, 7], [0, -4, 0, 8, 3]) r3, p3, k3 = residue([0, 0, 5, 3, -2, 7], [0, 0, 0, -4, 0, 8, 3]) assert_almost_equal(r0, r1) assert_almost_equal(r0, r2) assert_almost_equal(r0, r3) assert_almost_equal(p0, p1) assert_almost_equal(p0, p2) assert_almost_equal(p0, p3) assert_almost_equal(k0, k1) assert_almost_equal(k0, k2) assert_almost_equal(k0, k3) def test_resiude_degenerate(self): # Several tests for zero numerator and denominator. r, p, k = residue([0, 0], [1, 6, 8]) assert_almost_equal(r, [0, 0]) assert_almost_equal(p, [-2, -4]) assert_equal(k.size, 0) r, p, k = residue(0, 1) assert_equal(r.size, 0) assert_equal(p.size, 0) assert_equal(k.size, 0) with pytest.raises(ValueError, match="Denominator `a` is zero."): residue(1, 0) def test_residuez_general(self): r, p, k = residuez([1, 6, 6, 2], [1, -(2 + 1j), (1 + 2j), -1j]) self.assert_rp_almost_equal(r, p, [-2+2.5j, 7.5+7.5j, -4.5-12j], [1j, 1, 1]) assert_almost_equal(k, [2j]) r, p, k = residuez([1, 2, 1], [1, -1, 0.3561]) self.assert_rp_almost_equal(r, p, [-0.9041 - 5.9928j, -0.9041 + 5.9928j], [0.5 + 0.3257j, 0.5 - 0.3257j], decimal=4) assert_almost_equal(k, [2.8082], decimal=4) r, p, k = residuez([1, -1], [1, -5, 6]) assert_almost_equal(r, [-1, 2]) assert_almost_equal(p, [2, 3]) assert_equal(k.size, 0) r, p, k = residuez([2, 3, 4], [1, 3, 3, 1]) self.assert_rp_almost_equal(r, p, [4, -5, 3], [-1, -1, -1]) assert_equal(k.size, 0) r, p, k = residuez([1, -10, -4, 4], [2, -2, -4]) assert_almost_equal(r, [0.5, -1.5]) assert_almost_equal(p, [-1, 2]) assert_almost_equal(k, [1.5, -1]) r, p, k = residuez([18], [18, 3, -4, -1]) self.assert_rp_almost_equal(r, p, [0.36, 0.24, 0.4], [0.5, -1/3, -1/3]) assert_equal(k.size, 0) r, p, k = residuez([2, 3], np.polymul([1, -1/2], [1, 1/4])) assert_almost_equal(r, [-10/3, 16/3]) assert_almost_equal(p, [-0.25, 0.5]) assert_equal(k.size, 0) r, p, k = residuez([1, -2, 1], [1, -1]) assert_almost_equal(r, [0]) assert_almost_equal(p, [1]) assert_almost_equal(k, [1, -1]) r, p, k = residuez(1, [1, -1j]) assert_almost_equal(r, [1]) assert_almost_equal(p, [1j]) assert_equal(k.size, 0) r, p, k = residuez(1, [1, -1, 0.25]) assert_almost_equal(r, [0, 1]) assert_almost_equal(p, [0.5, 0.5]) assert_equal(k.size, 0) r, p, k = residuez(1, [1, -0.75, .125]) assert_almost_equal(r, [-1, 2]) assert_almost_equal(p, [0.25, 0.5]) assert_equal(k.size, 0) r, p, k = residuez([1, 6, 2], [1, -2, 1]) assert_almost_equal(r, [-10, 9]) assert_almost_equal(p, [1, 1]) assert_almost_equal(k, [2]) r, p, k = residuez([6, 2], [1, -2, 1]) assert_almost_equal(r, [-2, 8]) assert_almost_equal(p, [1, 1]) assert_equal(k.size, 0) r, p, k = residuez([1, 6, 6, 2], [1, -2, 1]) assert_almost_equal(r, [-24, 15]) assert_almost_equal(p, [1, 1]) assert_almost_equal(k, [10, 2]) r, p, k = residuez([1, 0, 1], [1, 0, 0, 0, 0, -1]) self.assert_rp_almost_equal(r, p, [0.2618 + 0.1902j, 0.2618 - 0.1902j, 0.4, 0.0382 - 0.1176j, 0.0382 + 0.1176j], [-0.8090 + 0.5878j, -0.8090 - 0.5878j, 1.0, 0.3090 + 0.9511j, 0.3090 - 0.9511j], decimal=4) assert_equal(k.size, 0) def test_residuez_trailing_zeros(self): # Trailing zeros in numerator or denominator must not affect the # answer. r0, p0, k0 = residuez([5, 3, -2, 7], [-4, 0, 8, 3]) r1, p1, k1 = residuez([5, 3, -2, 7, 0], [-4, 0, 8, 3]) r2, p2, k2 = residuez([5, 3, -2, 7], [-4, 0, 8, 3, 0]) r3, p3, k3 = residuez([5, 3, -2, 7, 0, 0], [-4, 0, 8, 3, 0, 0, 0]) assert_almost_equal(r0, r1) assert_almost_equal(r0, r2) assert_almost_equal(r0, r3) assert_almost_equal(p0, p1) assert_almost_equal(p0, p2) assert_almost_equal(p0, p3) assert_almost_equal(k0, k1) assert_almost_equal(k0, k2) assert_almost_equal(k0, k3) def test_residuez_degenerate(self): r, p, k = residuez([0, 0], [1, 6, 8]) assert_almost_equal(r, [0, 0]) assert_almost_equal(p, [-2, -4]) assert_equal(k.size, 0) r, p, k = residuez(0, 1) assert_equal(r.size, 0) assert_equal(p.size, 0) assert_equal(k.size, 0) with pytest.raises(ValueError, match="Denominator `a` is zero."): residuez(1, 0) with pytest.raises(ValueError, match="First coefficient of determinant `a` must " "be non-zero."): residuez(1, [0, 1, 2, 3]) def test_inverse_unique_roots_different_rtypes(self): # This test was inspired by github issue 2496. r = [3 / 10, -1 / 6, -2 / 15] p = [0, -2, -5] k = [] b_expected = [0, 1, 3] a_expected = [1, 7, 10, 0] # With the default tolerance, the rtype does not matter # for this example. for rtype in ('avg', 'mean', 'min', 'minimum', 'max', 'maximum'): b, a = invres(r, p, k, rtype=rtype) assert_allclose(b, b_expected) assert_allclose(a, a_expected) b, a = invresz(r, p, k, rtype=rtype) assert_allclose(b, b_expected) assert_allclose(a, a_expected) def test_inverse_repeated_roots_different_rtypes(self): r = [3 / 20, -7 / 36, -1 / 6, 2 / 45] p = [0, -2, -2, -5] k = [] b_expected = [0, 0, 1, 3] b_expected_z = [-1/6, -2/3, 11/6, 3] a_expected = [1, 9, 24, 20, 0] for rtype in ('avg', 'mean', 'min', 'minimum', 'max', 'maximum'): b, a = invres(r, p, k, rtype=rtype) assert_allclose(b, b_expected, atol=1e-14) assert_allclose(a, a_expected) b, a = invresz(r, p, k, rtype=rtype) assert_allclose(b, b_expected_z, atol=1e-14) assert_allclose(a, a_expected) def test_inverse_bad_rtype(self): r = [3 / 20, -7 / 36, -1 / 6, 2 / 45] p = [0, -2, -2, -5] k = [] with pytest.raises(ValueError, match="`rtype` must be one of"): invres(r, p, k, rtype='median') with pytest.raises(ValueError, match="`rtype` must be one of"): invresz(r, p, k, rtype='median') def test_invresz_one_coefficient_bug(self): # Regression test for issue in gh-4646. r = [1] p = [2] k = [0] b, a = invresz(r, p, k) assert_allclose(b, [1.0]) assert_allclose(a, [1.0, -2.0]) def test_invres(self): b, a = invres([1], [1], []) assert_almost_equal(b, [1]) assert_almost_equal(a, [1, -1]) b, a = invres([1 - 1j, 2, 0.5 - 3j], [1, 0.5j, 1 + 1j], []) assert_almost_equal(b, [3.5 - 4j, -8.5 + 0.25j, 3.5 + 3.25j]) assert_almost_equal(a, [1, -2 - 1.5j, 0.5 + 2j, 0.5 - 0.5j]) b, a = invres([0.5, 1], [1 - 1j, 2 + 2j], [1, 2, 3]) assert_almost_equal(b, [1, -1 - 1j, 1 - 2j, 0.5 - 3j, 10]) assert_almost_equal(a, [1, -3 - 1j, 4]) b, a = invres([-1, 2, 1j, 3 - 1j, 4, -2], [-1, 2 - 1j, 2 - 1j, 3, 3, 3], []) assert_almost_equal(b, [4 - 1j, -28 + 16j, 40 - 62j, 100 + 24j, -292 + 219j, 192 - 268j]) assert_almost_equal(a, [1, -12 + 2j, 53 - 20j, -96 + 68j, 27 - 72j, 108 - 54j, -81 + 108j]) b, a = invres([-1, 1j], [1, 1], [1, 2]) assert_almost_equal(b, [1, 0, -4, 3 + 1j]) assert_almost_equal(a, [1, -2, 1]) def test_invresz(self): b, a = invresz([1], [1], []) assert_almost_equal(b, [1]) assert_almost_equal(a, [1, -1]) b, a = invresz([1 - 1j, 2, 0.5 - 3j], [1, 0.5j, 1 + 1j], []) assert_almost_equal(b, [3.5 - 4j, -8.5 + 0.25j, 3.5 + 3.25j]) assert_almost_equal(a, [1, -2 - 1.5j, 0.5 + 2j, 0.5 - 0.5j]) b, a = invresz([0.5, 1], [1 - 1j, 2 + 2j], [1, 2, 3]) assert_almost_equal(b, [2.5, -3 - 1j, 1 - 2j, -1 - 3j, 12]) assert_almost_equal(a, [1, -3 - 1j, 4]) b, a = invresz([-1, 2, 1j, 3 - 1j, 4, -2], [-1, 2 - 1j, 2 - 1j, 3, 3, 3], []) assert_almost_equal(b, [6, -50 + 11j, 100 - 72j, 80 + 58j, -354 + 228j, 234 - 297j]) assert_almost_equal(a, [1, -12 + 2j, 53 - 20j, -96 + 68j, 27 - 72j, 108 - 54j, -81 + 108j]) b, a = invresz([-1, 1j], [1, 1], [1, 2]) assert_almost_equal(b, [1j, 1, -3, 2]) assert_almost_equal(a, [1, -2, 1]) def test_inverse_scalar_arguments(self): b, a = invres(1, 1, 1) assert_almost_equal(b, [1, 0]) assert_almost_equal(a, [1, -1]) b, a = invresz(1, 1, 1) assert_almost_equal(b, [2, -1]) assert_almost_equal(a, [1, -1]) class TestVectorstrength: def test_single_1dperiod(self): events = np.array([.5]) period = 5. targ_strength = 1. targ_phase = .1 strength, phase = vectorstrength(events, period) assert_equal(strength.ndim, 0) assert_equal(phase.ndim, 0) assert_almost_equal(strength, targ_strength) assert_almost_equal(phase, 2 * np.pi * targ_phase) def test_single_2dperiod(self): events = np.array([.5]) period = [1, 2, 5.] targ_strength = [1.] * 3 targ_phase = np.array([.5, .25, .1]) strength, phase = vectorstrength(events, period) assert_equal(strength.ndim, 1) assert_equal(phase.ndim, 1) assert_array_almost_equal(strength, targ_strength) assert_almost_equal(phase, 2 * np.pi * targ_phase) def test_equal_1dperiod(self): events = np.array([.25, .25, .25, .25, .25, .25]) period = 2 targ_strength = 1. targ_phase = .125 strength, phase = vectorstrength(events, period) assert_equal(strength.ndim, 0) assert_equal(phase.ndim, 0) assert_almost_equal(strength, targ_strength) assert_almost_equal(phase, 2 * np.pi * targ_phase) def test_equal_2dperiod(self): events = np.array([.25, .25, .25, .25, .25, .25]) period = [1, 2, ] targ_strength = [1.] * 2 targ_phase = np.array([.25, .125]) strength, phase = vectorstrength(events, period) assert_equal(strength.ndim, 1) assert_equal(phase.ndim, 1) assert_almost_equal(strength, targ_strength) assert_almost_equal(phase, 2 * np.pi * targ_phase) def test_spaced_1dperiod(self): events = np.array([.1, 1.1, 2.1, 4.1, 10.1]) period = 1 targ_strength = 1. targ_phase = .1 strength, phase = vectorstrength(events, period) assert_equal(strength.ndim, 0) assert_equal(phase.ndim, 0) assert_almost_equal(strength, targ_strength) assert_almost_equal(phase, 2 * np.pi * targ_phase) def test_spaced_2dperiod(self): events = np.array([.1, 1.1, 2.1, 4.1, 10.1]) period = [1, .5] targ_strength = [1.] * 2 targ_phase = np.array([.1, .2]) strength, phase = vectorstrength(events, period) assert_equal(strength.ndim, 1) assert_equal(phase.ndim, 1) assert_almost_equal(strength, targ_strength) assert_almost_equal(phase, 2 * np.pi * targ_phase) def test_partial_1dperiod(self): events = np.array([.25, .5, .75]) period = 1 targ_strength = 1. / 3. targ_phase = .5 strength, phase = vectorstrength(events, period) assert_equal(strength.ndim, 0) assert_equal(phase.ndim, 0) assert_almost_equal(strength, targ_strength) assert_almost_equal(phase, 2 * np.pi * targ_phase) def test_partial_2dperiod(self): events = np.array([.25, .5, .75]) period = [1., 1., 1., 1.] targ_strength = [1. / 3.] * 4 targ_phase = np.array([.5, .5, .5, .5]) strength, phase = vectorstrength(events, period) assert_equal(strength.ndim, 1) assert_equal(phase.ndim, 1) assert_almost_equal(strength, targ_strength) assert_almost_equal(phase, 2 * np.pi * targ_phase) def test_opposite_1dperiod(self): events = np.array([0, .25, .5, .75]) period = 1. targ_strength = 0 strength, phase = vectorstrength(events, period) assert_equal(strength.ndim, 0) assert_equal(phase.ndim, 0) assert_almost_equal(strength, targ_strength) def test_opposite_2dperiod(self): events = np.array([0, .25, .5, .75]) period = [1.] * 10 targ_strength = [0.] * 10 strength, phase = vectorstrength(events, period) assert_equal(strength.ndim, 1) assert_equal(phase.ndim, 1) assert_almost_equal(strength, targ_strength) def test_2d_events_ValueError(self): events = np.array([[1, 2]]) period = 1. assert_raises(ValueError, vectorstrength, events, period) def test_2d_period_ValueError(self): events = 1. period = np.array([[1]]) assert_raises(ValueError, vectorstrength, events, period) def test_zero_period_ValueError(self): events = 1. period = 0 assert_raises(ValueError, vectorstrength, events, period) def test_negative_period_ValueError(self): events = 1. period = -1 assert_raises(ValueError, vectorstrength, events, period) def cast_tf2sos(b, a): """Convert TF2SOS, casting to complex128 and back to the original dtype.""" # tf2sos does not support all of the dtypes that we want to check, e.g.: # # TypeError: array type complex256 is unsupported in linalg # # so let's cast, convert, and cast back -- should be fine for the # systems and precisions we are testing. dtype = np.asarray(b).dtype b = np.array(b, np.complex128) a = np.array(a, np.complex128) return tf2sos(b, a).astype(dtype) def assert_allclose_cast(actual, desired, rtol=1e-7, atol=0): """Wrap assert_allclose while casting object arrays.""" if actual.dtype.kind == 'O': dtype = np.array(actual.flat[0]).dtype actual, desired = actual.astype(dtype), desired.astype(dtype) assert_allclose(actual, desired, rtol, atol) @pytest.mark.parametrize('func', (sosfilt, lfilter)) def test_nonnumeric_dtypes(func): x = [Decimal(1), Decimal(2), Decimal(3)] b = [Decimal(1), Decimal(2), Decimal(3)] a = [Decimal(1), Decimal(2), Decimal(3)] x = np.array(x) assert x.dtype.kind == 'O' desired = lfilter(np.array(b, float), np.array(a, float), x.astype(float)) if func is sosfilt: actual = sosfilt([b + a], x) else: actual = lfilter(b, a, x) assert all(isinstance(x, Decimal) for x in actual) assert_allclose(actual.astype(float), desired.astype(float)) # Degenerate cases if func is lfilter: args = [1., 1.] else: args = [tf2sos(1., 1.)] with pytest.raises(ValueError, match='must be at least 1-D'): func(*args, x=1.) @pytest.mark.parametrize('dt', 'fdgFDGO') class TestSOSFilt: # The test_rank* tests are pulled from _TestLinearFilter def test_rank1(self, dt): x = np.linspace(0, 5, 6).astype(dt) b = np.array([1, -1]).astype(dt) a = np.array([0.5, -0.5]).astype(dt) # Test simple IIR y_r = np.array([0, 2, 4, 6, 8, 10.]).astype(dt) sos = cast_tf2sos(b, a) assert sos.dtype.char == dt assert_array_almost_equal(sosfilt(cast_tf2sos(b, a), x), y_r) # Test simple FIR b = np.array([1, 1]).astype(dt) # NOTE: This was changed (rel. to TestLinear...) to add a pole @zero: a = np.array([1, 0]).astype(dt) y_r = np.array([0, 1, 3, 5, 7, 9.]).astype(dt) assert_array_almost_equal(sosfilt(cast_tf2sos(b, a), x), y_r) b = [1, 1, 0] a = [1, 0, 0] x = np.ones(8) sos = np.concatenate((b, a)) sos.shape = (1, 6) y = sosfilt(sos, x) assert_allclose(y, [1, 2, 2, 2, 2, 2, 2, 2]) def test_rank2(self, dt): shape = (4, 3) x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape) x = x.astype(dt) b = np.array([1, -1]).astype(dt) a = np.array([0.5, 0.5]).astype(dt) y_r2_a0 = np.array([[0, 2, 4], [6, 4, 2], [0, 2, 4], [6, 4, 2]], dtype=dt) y_r2_a1 = np.array([[0, 2, 0], [6, -4, 6], [12, -10, 12], [18, -16, 18]], dtype=dt) y = sosfilt(cast_tf2sos(b, a), x, axis=0) assert_array_almost_equal(y_r2_a0, y) y = sosfilt(cast_tf2sos(b, a), x, axis=1) assert_array_almost_equal(y_r2_a1, y) def test_rank3(self, dt): shape = (4, 3, 2) x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape) b = np.array([1, -1]).astype(dt) a = np.array([0.5, 0.5]).astype(dt) # Test last axis y = sosfilt(cast_tf2sos(b, a), x) for i in range(x.shape[0]): for j in range(x.shape[1]): assert_array_almost_equal(y[i, j], lfilter(b, a, x[i, j])) def test_initial_conditions(self, dt): b1, a1 = signal.butter(2, 0.25, 'low') b2, a2 = signal.butter(2, 0.75, 'low') b3, a3 = signal.butter(2, 0.75, 'low') b = np.convolve(np.convolve(b1, b2), b3) a = np.convolve(np.convolve(a1, a2), a3) sos = np.array((np.r_[b1, a1], np.r_[b2, a2], np.r_[b3, a3])) x = np.random.rand(50).astype(dt) # Stopping filtering and continuing y_true, zi = lfilter(b, a, x[:20], zi=np.zeros(6)) y_true = np.r_[y_true, lfilter(b, a, x[20:], zi=zi)[0]] assert_allclose_cast(y_true, lfilter(b, a, x)) y_sos, zi = sosfilt(sos, x[:20], zi=np.zeros((3, 2))) y_sos = np.r_[y_sos, sosfilt(sos, x[20:], zi=zi)[0]] assert_allclose_cast(y_true, y_sos) # Use a step function zi = sosfilt_zi(sos) x = np.ones(8, dt) y, zf = sosfilt(sos, x, zi=zi) assert_allclose_cast(y, np.ones(8)) assert_allclose_cast(zf, zi) # Initial condition shape matching x.shape = (1, 1) + x.shape # 3D assert_raises(ValueError, sosfilt, sos, x, zi=zi) zi_nd = zi.copy() zi_nd.shape = (zi.shape[0], 1, 1, zi.shape[-1]) assert_raises(ValueError, sosfilt, sos, x, zi=zi_nd[:, :, :, [0, 1, 1]]) y, zf = sosfilt(sos, x, zi=zi_nd) assert_allclose_cast(y[0, 0], np.ones(8)) assert_allclose_cast(zf[:, 0, 0, :], zi) def test_initial_conditions_3d_axis1(self, dt): # Test the use of zi when sosfilt is applied to axis 1 of a 3-d input. # Input array is x. x = np.random.RandomState(159).randint(0, 5, size=(2, 15, 3)) x = x.astype(dt) # Design a filter in ZPK format and convert to SOS zpk = signal.butter(6, 0.35, output='zpk') sos = zpk2sos(*zpk) nsections = sos.shape[0] # Filter along this axis. axis = 1 # Initial conditions, all zeros. shp = list(x.shape) shp[axis] = 2 shp = [nsections] + shp z0 = np.zeros(shp) # Apply the filter to x. yf, zf = sosfilt(sos, x, axis=axis, zi=z0) # Apply the filter to x in two stages. y1, z1 = sosfilt(sos, x[:, :5, :], axis=axis, zi=z0) y2, z2 = sosfilt(sos, x[:, 5:, :], axis=axis, zi=z1) # y should equal yf, and z2 should equal zf. y = np.concatenate((y1, y2), axis=axis) assert_allclose_cast(y, yf, rtol=1e-10, atol=1e-13) assert_allclose_cast(z2, zf, rtol=1e-10, atol=1e-13) # let's try the "step" initial condition zi = sosfilt_zi(sos) zi.shape = [nsections, 1, 2, 1] zi = zi * x[:, 0:1, :] y = sosfilt(sos, x, axis=axis, zi=zi)[0] # check it against the TF form b, a = zpk2tf(*zpk) zi = lfilter_zi(b, a) zi.shape = [1, zi.size, 1] zi = zi * x[:, 0:1, :] y_tf = lfilter(b, a, x, axis=axis, zi=zi)[0] assert_allclose_cast(y, y_tf, rtol=1e-10, atol=1e-13) def test_bad_zi_shape(self, dt): # The shape of zi is checked before using any values in the # arguments, so np.empty is fine for creating the arguments. x = np.empty((3, 15, 3), dt) sos = np.zeros((4, 6)) zi = np.empty((4, 3, 3, 2)) # Correct shape is (4, 3, 2, 3) with pytest.raises(ValueError, match='should be all ones'): sosfilt(sos, x, zi=zi, axis=1) sos[:, 3] = 1. with pytest.raises(ValueError, match='Invalid zi shape'): sosfilt(sos, x, zi=zi, axis=1) def test_sosfilt_zi(self, dt): sos = signal.butter(6, 0.2, output='sos') zi = sosfilt_zi(sos) y, zf = sosfilt(sos, np.ones(40, dt), zi=zi) assert_allclose_cast(zf, zi, rtol=1e-13) # Expected steady state value of the step response of this filter: ss = np.prod(sos[:, :3].sum(axis=-1) / sos[:, 3:].sum(axis=-1)) assert_allclose_cast(y, ss, rtol=1e-13) # zi as array-like _, zf = sosfilt(sos, np.ones(40, dt), zi=zi.tolist()) assert_allclose_cast(zf, zi, rtol=1e-13) class TestDeconvolve: def test_basic(self): # From docstring example original = [0, 1, 0, 0, 1, 1, 0, 0] impulse_response = [2, 1] recorded = [0, 2, 1, 0, 2, 3, 1, 0, 0] recovered, remainder = signal.deconvolve(recorded, impulse_response) assert_allclose(recovered, original) def test_n_dimensional_signal(self): recorded = [[0, 0], [0, 0]] impulse_response = [0, 0] with pytest.raises(ValueError, match="signal must be 1-D."): quotient, remainder = signal.deconvolve(recorded, impulse_response) def test_n_dimensional_divisor(self): recorded = [0, 0] impulse_response = [[0, 0], [0, 0]] with pytest.raises(ValueError, match="divisor must be 1-D."): quotient, remainder = signal.deconvolve(recorded, impulse_response) class TestDetrend: def test_basic(self): detrended = detrend(array([1, 2, 3])) detrended_exact = array([0, 0, 0]) assert_array_almost_equal(detrended, detrended_exact) def test_copy(self): x = array([1, 1.2, 1.5, 1.6, 2.4]) copy_array = detrend(x, overwrite_data=False) inplace = detrend(x, overwrite_data=True) assert_array_almost_equal(copy_array, inplace) @pytest.mark.parametrize('kind', ['linear', 'constant']) @pytest.mark.parametrize('axis', [0, 1, 2]) def test_axis(self, axis, kind): data = np.arange(5*6*7).reshape(5, 6, 7) detrended = detrend(data, type=kind, axis=axis) assert detrended.shape == data.shape def test_bp(self): data = [0, 1, 2] + [5, 0, -5, -10] detrended = detrend(data, type='linear', bp=3) assert_allclose(detrended, 0, atol=1e-14) # repeat with ndim > 1 and axis data = np.asarray(data)[None, :, None] detrended = detrend(data, type="linear", bp=3, axis=1) assert_allclose(detrended, 0, atol=1e-14) # breakpoint index > shape[axis]: raises with assert_raises(ValueError): detrend(data, type="linear", bp=3) @pytest.mark.parametrize('bp', [np.array([0, 2]), [0, 2]]) def test_detrend_array_bp(self, bp): # regression test for https://github.com/scipy/scipy/issues/18675 rng = np.random.RandomState(12345) x = rng.rand(10) # bp = np.array([0, 2]) res = detrend(x, bp=bp) res_scipy_191 = np.array([-4.44089210e-16, -2.22044605e-16, -1.11128506e-01, -1.69470553e-01, 1.14710683e-01, 6.35468419e-02, 3.53533144e-01, -3.67877935e-02, -2.00417675e-02, -1.94362049e-01]) assert_allclose(res, res_scipy_191, atol=1e-14) class TestUniqueRoots: def test_real_no_repeat(self): p = [-1.0, -0.5, 0.3, 1.2, 10.0] unique, multiplicity = unique_roots(p) assert_almost_equal(unique, p, decimal=15) assert_equal(multiplicity, np.ones(len(p))) def test_real_repeat(self): p = [-1.0, -0.95, -0.89, -0.8, 0.5, 1.0, 1.05] unique, multiplicity = unique_roots(p, tol=1e-1, rtype='min') assert_almost_equal(unique, [-1.0, -0.89, 0.5, 1.0], decimal=15) assert_equal(multiplicity, [2, 2, 1, 2]) unique, multiplicity = unique_roots(p, tol=1e-1, rtype='max') assert_almost_equal(unique, [-0.95, -0.8, 0.5, 1.05], decimal=15) assert_equal(multiplicity, [2, 2, 1, 2]) unique, multiplicity = unique_roots(p, tol=1e-1, rtype='avg') assert_almost_equal(unique, [-0.975, -0.845, 0.5, 1.025], decimal=15) assert_equal(multiplicity, [2, 2, 1, 2]) def test_complex_no_repeat(self): p = [-1.0, 1.0j, 0.5 + 0.5j, -1.0 - 1.0j, 3.0 + 2.0j] unique, multiplicity = unique_roots(p) assert_almost_equal(unique, p, decimal=15) assert_equal(multiplicity, np.ones(len(p))) def test_complex_repeat(self): p = [-1.0, -1.0 + 0.05j, -0.95 + 0.15j, -0.90 + 0.15j, 0.0, 0.5 + 0.5j, 0.45 + 0.55j] unique, multiplicity = unique_roots(p, tol=1e-1, rtype='min') assert_almost_equal(unique, [-1.0, -0.95 + 0.15j, 0.0, 0.45 + 0.55j], decimal=15) assert_equal(multiplicity, [2, 2, 1, 2]) unique, multiplicity = unique_roots(p, tol=1e-1, rtype='max') assert_almost_equal(unique, [-1.0 + 0.05j, -0.90 + 0.15j, 0.0, 0.5 + 0.5j], decimal=15) assert_equal(multiplicity, [2, 2, 1, 2]) unique, multiplicity = unique_roots(p, tol=1e-1, rtype='avg') assert_almost_equal( unique, [-1.0 + 0.025j, -0.925 + 0.15j, 0.0, 0.475 + 0.525j], decimal=15) assert_equal(multiplicity, [2, 2, 1, 2]) def test_gh_4915(self): p = np.roots(np.convolve(np.ones(5), np.ones(5))) true_roots = [-(-1)**(1/5), (-1)**(4/5), -(-1)**(3/5), (-1)**(2/5)] unique, multiplicity = unique_roots(p) unique = np.sort(unique) assert_almost_equal(np.sort(unique), true_roots, decimal=7) assert_equal(multiplicity, [2, 2, 2, 2]) def test_complex_roots_extra(self): unique, multiplicity = unique_roots([1.0, 1.0j, 1.0]) assert_almost_equal(unique, [1.0, 1.0j], decimal=15) assert_equal(multiplicity, [2, 1]) unique, multiplicity = unique_roots([1, 1 + 2e-9, 1e-9 + 1j], tol=0.1) assert_almost_equal(unique, [1.0, 1e-9 + 1.0j], decimal=15) assert_equal(multiplicity, [2, 1]) def test_single_unique_root(self): p = np.random.rand(100) + 1j * np.random.rand(100) unique, multiplicity = unique_roots(p, 2) assert_almost_equal(unique, [np.min(p)], decimal=15) assert_equal(multiplicity, [100])
141,168
37.226103
98
py
scipy
scipy-main/scipy/signal/tests/test_ltisys.py
from abc import abstractmethod import warnings import numpy as np from numpy.testing import (assert_almost_equal, assert_equal, assert_allclose, assert_, suppress_warnings) from pytest import raises as assert_raises from pytest import warns from scipy.signal import (ss2tf, tf2ss, lsim2, impulse2, step2, lti, dlti, bode, freqresp, lsim, impulse, step, abcd_normalize, place_poles, TransferFunction, StateSpace, ZerosPolesGain) from scipy.signal._filter_design import BadCoefficients import scipy.linalg as linalg def _assert_poles_close(P1,P2, rtol=1e-8, atol=1e-8): """ Check each pole in P1 is close to a pole in P2 with a 1e-8 relative tolerance or 1e-8 absolute tolerance (useful for zero poles). These tolerances are very strict but the systems tested are known to accept these poles so we should not be far from what is requested. """ P2 = P2.copy() for p1 in P1: found = False for p2_idx in range(P2.shape[0]): if np.allclose([np.real(p1), np.imag(p1)], [np.real(P2[p2_idx]), np.imag(P2[p2_idx])], rtol, atol): found = True np.delete(P2, p2_idx) break if not found: raise ValueError("Can't find pole " + str(p1) + " in " + str(P2)) class TestPlacePoles: def _check(self, A, B, P, **kwargs): """ Perform the most common tests on the poles computed by place_poles and return the Bunch object for further specific tests """ fsf = place_poles(A, B, P, **kwargs) expected, _ = np.linalg.eig(A - np.dot(B, fsf.gain_matrix)) _assert_poles_close(expected, fsf.requested_poles) _assert_poles_close(expected, fsf.computed_poles) _assert_poles_close(P,fsf.requested_poles) return fsf def test_real(self): # Test real pole placement using KNV and YT0 algorithm and example 1 in # section 4 of the reference publication (see place_poles docstring) A = np.array([1.380, -0.2077, 6.715, -5.676, -0.5814, -4.290, 0, 0.6750, 1.067, 4.273, -6.654, 5.893, 0.0480, 4.273, 1.343, -2.104]).reshape(4, 4) B = np.array([0, 5.679, 1.136, 1.136, 0, 0, -3.146,0]).reshape(4, 2) P = np.array([-0.2, -0.5, -5.0566, -8.6659]) # Check that both KNV and YT compute correct K matrix self._check(A, B, P, method='KNV0') self._check(A, B, P, method='YT') # Try to reach the specific case in _YT_real where two singular # values are almost equal. This is to improve code coverage but I # have no way to be sure this code is really reached # on some architectures this can lead to a RuntimeWarning invalid # value in divide (see gh-7590), so suppress it for now with np.errstate(invalid='ignore'): self._check(A, B, (2,2,3,3)) def test_complex(self): # Test complex pole placement on a linearized car model, taken from L. # Jaulin, Automatique pour la robotique, Cours et Exercices, iSTE # editions p 184/185 A = np.array([[0, 7, 0, 0], [0, 0, 0, 7/3.], [0, 0, 0, 0], [0, 0, 0, 0]]) B = np.array([[0, 0], [0, 0], [1, 0], [0, 1]]) # Test complex poles on YT P = np.array([-3, -1, -2-1j, -2+1j]) # on macOS arm64 this can lead to a RuntimeWarning invalid # value in divide, so suppress it for now with np.errstate(divide='ignore', invalid='ignore'): self._check(A, B, P) # Try to reach the specific case in _YT_complex where two singular # values are almost equal. This is to improve code coverage but I # have no way to be sure this code is really reached P = [0-1e-6j,0+1e-6j,-10,10] with np.errstate(divide='ignore', invalid='ignore'): self._check(A, B, P, maxiter=1000) # Try to reach the specific case in _YT_complex where the rank two # update yields two null vectors. This test was found via Monte Carlo. A = np.array( [-2148,-2902, -2267, -598, -1722, -1829, -165, -283, -2546, -167, -754, -2285, -543, -1700, -584, -2978, -925, -1300, -1583, -984, -386, -2650, -764, -897, -517, -1598, 2, -1709, -291, -338, -153, -1804, -1106, -1168, -867, -2297] ).reshape(6,6) B = np.array( [-108, -374, -524, -1285, -1232, -161, -1204, -672, -637, -15, -483, -23, -931, -780, -1245, -1129, -1290, -1502, -952, -1374, -62, -964, -930, -939, -792, -756, -1437, -491, -1543, -686] ).reshape(6,5) P = [-25.-29.j, -25.+29.j, 31.-42.j, 31.+42.j, 33.-41.j, 33.+41.j] self._check(A, B, P) # Use a lot of poles to go through all cases for update_order # in _YT_loop big_A = np.ones((11,11))-np.eye(11) big_B = np.ones((11,10))-np.diag([1]*10,1)[:,1:] big_A[:6,:6] = A big_B[:6,:5] = B P = [-10,-20,-30,40,50,60,70,-20-5j,-20+5j,5+3j,5-3j] with np.errstate(divide='ignore', invalid='ignore'): self._check(big_A, big_B, P) #check with only complex poles and only real poles P = [-10,-20,-30,-40,-50,-60,-70,-80,-90,-100] self._check(big_A[:-1,:-1], big_B[:-1,:-1], P) P = [-10+10j,-20+20j,-30+30j,-40+40j,-50+50j, -10-10j,-20-20j,-30-30j,-40-40j,-50-50j] self._check(big_A[:-1,:-1], big_B[:-1,:-1], P) # need a 5x5 array to ensure YT handles properly when there # is only one real pole and several complex A = np.array([0,7,0,0,0,0,0,7/3.,0,0,0,0,0,0,0,0, 0,0,0,5,0,0,0,0,9]).reshape(5,5) B = np.array([0,0,0,0,1,0,0,1,2,3]).reshape(5,2) P = np.array([-2, -3+1j, -3-1j, -1+1j, -1-1j]) with np.errstate(divide='ignore', invalid='ignore'): place_poles(A, B, P) # same test with an odd number of real poles > 1 # this is another specific case of YT P = np.array([-2, -3, -4, -1+1j, -1-1j]) with np.errstate(divide='ignore', invalid='ignore'): self._check(A, B, P) def test_tricky_B(self): # check we handle as we should the 1 column B matrices and # n column B matrices (with n such as shape(A)=(n, n)) A = np.array([1.380, -0.2077, 6.715, -5.676, -0.5814, -4.290, 0, 0.6750, 1.067, 4.273, -6.654, 5.893, 0.0480, 4.273, 1.343, -2.104]).reshape(4, 4) B = np.array([0, 5.679, 1.136, 1.136, 0, 0, -3.146, 0, 1, 2, 3, 4, 5, 6, 7, 8]).reshape(4, 4) # KNV or YT are not called here, it's a specific case with only # one unique solution P = np.array([-0.2, -0.5, -5.0566, -8.6659]) fsf = self._check(A, B, P) # rtol and nb_iter should be set to np.nan as the identity can be # used as transfer matrix assert_equal(fsf.rtol, np.nan) assert_equal(fsf.nb_iter, np.nan) # check with complex poles too as they trigger a specific case in # the specific case :-) P = np.array((-2+1j,-2-1j,-3,-2)) fsf = self._check(A, B, P) assert_equal(fsf.rtol, np.nan) assert_equal(fsf.nb_iter, np.nan) #now test with a B matrix with only one column (no optimisation) B = B[:,0].reshape(4,1) P = np.array((-2+1j,-2-1j,-3,-2)) fsf = self._check(A, B, P) # we can't optimize anything, check they are set to 0 as expected assert_equal(fsf.rtol, 0) assert_equal(fsf.nb_iter, 0) def test_errors(self): # Test input mistakes from user A = np.array([0,7,0,0,0,0,0,7/3.,0,0,0,0,0,0,0,0]).reshape(4,4) B = np.array([0,0,0,0,1,0,0,1]).reshape(4,2) #should fail as the method keyword is invalid assert_raises(ValueError, place_poles, A, B, (-2.1,-2.2,-2.3,-2.4), method="foo") #should fail as poles are not 1D array assert_raises(ValueError, place_poles, A, B, np.array((-2.1,-2.2,-2.3,-2.4)).reshape(4,1)) #should fail as A is not a 2D array assert_raises(ValueError, place_poles, A[:,:,np.newaxis], B, (-2.1,-2.2,-2.3,-2.4)) #should fail as B is not a 2D array assert_raises(ValueError, place_poles, A, B[:,:,np.newaxis], (-2.1,-2.2,-2.3,-2.4)) #should fail as there are too many poles assert_raises(ValueError, place_poles, A, B, (-2.1,-2.2,-2.3,-2.4,-3)) #should fail as there are not enough poles assert_raises(ValueError, place_poles, A, B, (-2.1,-2.2,-2.3)) #should fail as the rtol is greater than 1 assert_raises(ValueError, place_poles, A, B, (-2.1,-2.2,-2.3,-2.4), rtol=42) #should fail as maxiter is smaller than 1 assert_raises(ValueError, place_poles, A, B, (-2.1,-2.2,-2.3,-2.4), maxiter=-42) # should fail as ndim(B) is two assert_raises(ValueError, place_poles, A, B, (-2,-2,-2,-2)) #unctrollable system assert_raises(ValueError, place_poles, np.ones((4,4)), np.ones((4,2)), (1,2,3,4)) # Should not raise ValueError as the poles can be placed but should # raise a warning as the convergence is not reached with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") fsf = place_poles(A, B, (-1,-2,-3,-4), rtol=1e-16, maxiter=42) assert_(len(w) == 1) assert_(issubclass(w[-1].category, UserWarning)) assert_("Convergence was not reached after maxiter iterations" in str(w[-1].message)) assert_equal(fsf.nb_iter, 42) # should fail as a complex misses its conjugate assert_raises(ValueError, place_poles, A, B, (-2+1j,-2-1j,-2+3j,-2)) # should fail as A is not square assert_raises(ValueError, place_poles, A[:,:3], B, (-2,-3,-4,-5)) # should fail as B has not the same number of lines as A assert_raises(ValueError, place_poles, A, B[:3,:], (-2,-3,-4,-5)) # should fail as KNV0 does not support complex poles assert_raises(ValueError, place_poles, A, B, (-2+1j,-2-1j,-2+3j,-2-3j), method="KNV0") class TestSS2TF: def check_matrix_shapes(self, p, q, r): ss2tf(np.zeros((p, p)), np.zeros((p, q)), np.zeros((r, p)), np.zeros((r, q)), 0) def test_shapes(self): # Each tuple holds: # number of states, number of inputs, number of outputs for p, q, r in [(3, 3, 3), (1, 3, 3), (1, 1, 1)]: self.check_matrix_shapes(p, q, r) def test_basic(self): # Test a round trip through tf2ss and ss2tf. b = np.array([1.0, 3.0, 5.0]) a = np.array([1.0, 2.0, 3.0]) A, B, C, D = tf2ss(b, a) assert_allclose(A, [[-2, -3], [1, 0]], rtol=1e-13) assert_allclose(B, [[1], [0]], rtol=1e-13) assert_allclose(C, [[1, 2]], rtol=1e-13) assert_allclose(D, [[1]], rtol=1e-14) bb, aa = ss2tf(A, B, C, D) assert_allclose(bb[0], b, rtol=1e-13) assert_allclose(aa, a, rtol=1e-13) def test_zero_order_round_trip(self): # See gh-5760 tf = (2, 1) A, B, C, D = tf2ss(*tf) assert_allclose(A, [[0]], rtol=1e-13) assert_allclose(B, [[0]], rtol=1e-13) assert_allclose(C, [[0]], rtol=1e-13) assert_allclose(D, [[2]], rtol=1e-13) num, den = ss2tf(A, B, C, D) assert_allclose(num, [[2, 0]], rtol=1e-13) assert_allclose(den, [1, 0], rtol=1e-13) tf = ([[5], [2]], 1) A, B, C, D = tf2ss(*tf) assert_allclose(A, [[0]], rtol=1e-13) assert_allclose(B, [[0]], rtol=1e-13) assert_allclose(C, [[0], [0]], rtol=1e-13) assert_allclose(D, [[5], [2]], rtol=1e-13) num, den = ss2tf(A, B, C, D) assert_allclose(num, [[5, 0], [2, 0]], rtol=1e-13) assert_allclose(den, [1, 0], rtol=1e-13) def test_simo_round_trip(self): # See gh-5753 tf = ([[1, 2], [1, 1]], [1, 2]) A, B, C, D = tf2ss(*tf) assert_allclose(A, [[-2]], rtol=1e-13) assert_allclose(B, [[1]], rtol=1e-13) assert_allclose(C, [[0], [-1]], rtol=1e-13) assert_allclose(D, [[1], [1]], rtol=1e-13) num, den = ss2tf(A, B, C, D) assert_allclose(num, [[1, 2], [1, 1]], rtol=1e-13) assert_allclose(den, [1, 2], rtol=1e-13) tf = ([[1, 0, 1], [1, 1, 1]], [1, 1, 1]) A, B, C, D = tf2ss(*tf) assert_allclose(A, [[-1, -1], [1, 0]], rtol=1e-13) assert_allclose(B, [[1], [0]], rtol=1e-13) assert_allclose(C, [[-1, 0], [0, 0]], rtol=1e-13) assert_allclose(D, [[1], [1]], rtol=1e-13) num, den = ss2tf(A, B, C, D) assert_allclose(num, [[1, 0, 1], [1, 1, 1]], rtol=1e-13) assert_allclose(den, [1, 1, 1], rtol=1e-13) tf = ([[1, 2, 3], [1, 2, 3]], [1, 2, 3, 4]) A, B, C, D = tf2ss(*tf) assert_allclose(A, [[-2, -3, -4], [1, 0, 0], [0, 1, 0]], rtol=1e-13) assert_allclose(B, [[1], [0], [0]], rtol=1e-13) assert_allclose(C, [[1, 2, 3], [1, 2, 3]], rtol=1e-13) assert_allclose(D, [[0], [0]], rtol=1e-13) num, den = ss2tf(A, B, C, D) assert_allclose(num, [[0, 1, 2, 3], [0, 1, 2, 3]], rtol=1e-13) assert_allclose(den, [1, 2, 3, 4], rtol=1e-13) tf = (np.array([1, [2, 3]], dtype=object), [1, 6]) A, B, C, D = tf2ss(*tf) assert_allclose(A, [[-6]], rtol=1e-31) assert_allclose(B, [[1]], rtol=1e-31) assert_allclose(C, [[1], [-9]], rtol=1e-31) assert_allclose(D, [[0], [2]], rtol=1e-31) num, den = ss2tf(A, B, C, D) assert_allclose(num, [[0, 1], [2, 3]], rtol=1e-13) assert_allclose(den, [1, 6], rtol=1e-13) tf = (np.array([[1, -3], [1, 2, 3]], dtype=object), [1, 6, 5]) A, B, C, D = tf2ss(*tf) assert_allclose(A, [[-6, -5], [1, 0]], rtol=1e-13) assert_allclose(B, [[1], [0]], rtol=1e-13) assert_allclose(C, [[1, -3], [-4, -2]], rtol=1e-13) assert_allclose(D, [[0], [1]], rtol=1e-13) num, den = ss2tf(A, B, C, D) assert_allclose(num, [[0, 1, -3], [1, 2, 3]], rtol=1e-13) assert_allclose(den, [1, 6, 5], rtol=1e-13) def test_all_int_arrays(self): A = [[0, 1, 0], [0, 0, 1], [-3, -4, -2]] B = [[0], [0], [1]] C = [[5, 1, 0]] D = [[0]] num, den = ss2tf(A, B, C, D) assert_allclose(num, [[0.0, 0.0, 1.0, 5.0]], rtol=1e-13, atol=1e-14) assert_allclose(den, [1.0, 2.0, 4.0, 3.0], rtol=1e-13) def test_multioutput(self): # Regression test for gh-2669. # 4 states A = np.array([[-1.0, 0.0, 1.0, 0.0], [-1.0, 0.0, 2.0, 0.0], [-4.0, 0.0, 3.0, 0.0], [-8.0, 8.0, 0.0, 4.0]]) # 1 input B = np.array([[0.3], [0.0], [7.0], [0.0]]) # 3 outputs C = np.array([[0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 1.0], [8.0, 8.0, 0.0, 0.0]]) D = np.array([[0.0], [0.0], [1.0]]) # Get the transfer functions for all the outputs in one call. b_all, a = ss2tf(A, B, C, D) # Get the transfer functions for each output separately. b0, a0 = ss2tf(A, B, C[0], D[0]) b1, a1 = ss2tf(A, B, C[1], D[1]) b2, a2 = ss2tf(A, B, C[2], D[2]) # Check that we got the same results. assert_allclose(a0, a, rtol=1e-13) assert_allclose(a1, a, rtol=1e-13) assert_allclose(a2, a, rtol=1e-13) assert_allclose(b_all, np.vstack((b0, b1, b2)), rtol=1e-13, atol=1e-14) class _TestLsimFuncs: digits_accuracy = 7 @abstractmethod def func(self, *args, **kwargs): pass def lti_nowarn(self, *args): with suppress_warnings() as sup: sup.filter(BadCoefficients) system = lti(*args) return system def test_first_order(self): # y' = -y # exact solution is y(t) = exp(-t) system = self.lti_nowarn(-1.,1.,1.,0.) t = np.linspace(0,5) u = np.zeros_like(t) tout, y, x = self.func(system, u, t, X0=[1.0]) expected_x = np.exp(-tout) assert_almost_equal(x, expected_x) assert_almost_equal(y, expected_x) def test_second_order(self): t = np.linspace(0, 10, 1001) u = np.zeros_like(t) # Second order system with a repeated root: x''(t) + 2*x(t) + x(t) = 0. # With initial conditions x(0)=1.0 and x'(t)=0.0, the exact solution # is (1-t)*exp(-t). system = self.lti_nowarn([1.0], [1.0, 2.0, 1.0]) tout, y, x = self.func(system, u, t, X0=[1.0, 0.0]) expected_x = (1.0 - tout) * np.exp(-tout) assert_almost_equal(x[:, 0], expected_x) def test_integrator(self): # integrator: y' = u system = self.lti_nowarn(0., 1., 1., 0.) t = np.linspace(0,5) u = t tout, y, x = self.func(system, u, t) expected_x = 0.5 * tout**2 assert_almost_equal(x, expected_x, decimal=self.digits_accuracy) assert_almost_equal(y, expected_x, decimal=self.digits_accuracy) def test_two_states(self): # A system with two state variables, two inputs, and one output. A = np.array([[-1.0, 0.0], [0.0, -2.0]]) B = np.array([[1.0, 0.0], [0.0, 1.0]]) C = np.array([1.0, 0.0]) D = np.zeros((1, 2)) system = self.lti_nowarn(A, B, C, D) t = np.linspace(0, 10.0, 21) u = np.zeros((len(t), 2)) tout, y, x = self.func(system, U=u, T=t, X0=[1.0, 1.0]) expected_y = np.exp(-tout) expected_x0 = np.exp(-tout) expected_x1 = np.exp(-2.0 * tout) assert_almost_equal(y, expected_y) assert_almost_equal(x[:, 0], expected_x0) assert_almost_equal(x[:, 1], expected_x1) def test_double_integrator(self): # double integrator: y'' = 2u A = np.array([[0., 1.], [0., 0.]]) B = np.array([[0.], [1.]]) C = np.array([[2., 0.]]) system = self.lti_nowarn(A, B, C, 0.) t = np.linspace(0,5) u = np.ones_like(t) tout, y, x = self.func(system, u, t) expected_x = np.transpose(np.array([0.5 * tout**2, tout])) expected_y = tout**2 assert_almost_equal(x, expected_x, decimal=self.digits_accuracy) assert_almost_equal(y, expected_y, decimal=self.digits_accuracy) def test_jordan_block(self): # Non-diagonalizable A matrix # x1' + x1 = x2 # x2' + x2 = u # y = x1 # Exact solution with u = 0 is y(t) = t exp(-t) A = np.array([[-1., 1.], [0., -1.]]) B = np.array([[0.], [1.]]) C = np.array([[1., 0.]]) system = self.lti_nowarn(A, B, C, 0.) t = np.linspace(0,5) u = np.zeros_like(t) tout, y, x = self.func(system, u, t, X0=[0.0, 1.0]) expected_y = tout * np.exp(-tout) assert_almost_equal(y, expected_y) def test_miso(self): # A system with two state variables, two inputs, and one output. A = np.array([[-1.0, 0.0], [0.0, -2.0]]) B = np.array([[1.0, 0.0], [0.0, 1.0]]) C = np.array([1.0, 0.0]) D = np.zeros((1,2)) system = self.lti_nowarn(A, B, C, D) t = np.linspace(0, 5.0, 101) u = np.zeros((len(t), 2)) tout, y, x = self.func(system, u, t, X0=[1.0, 1.0]) expected_y = np.exp(-tout) expected_x0 = np.exp(-tout) expected_x1 = np.exp(-2.0*tout) assert_almost_equal(y, expected_y) assert_almost_equal(x[:,0], expected_x0) assert_almost_equal(x[:,1], expected_x1) class TestLsim(_TestLsimFuncs): def func(self, *args, **kwargs): return lsim(*args, **kwargs) def test_nonzero_initial_time(self): system = self.lti_nowarn(-1.,1.,1.,0.) t = np.linspace(1,2) u = np.zeros_like(t) tout, y, x = self.func(system, u, t, X0=[1.0]) expected_y = np.exp(-tout) assert_almost_equal(y, expected_y) def test_nonequal_timesteps(self): t = np.array([0.0, 1.0, 1.0, 3.0]) u = np.array([0.0, 0.0, 1.0, 1.0]) # Simple integrator: x'(t) = u(t) system = ([1.0], [1.0, 0.0]) with assert_raises(ValueError, match="Time steps are not equally spaced."): tout, y, x = self.func(system, u, t, X0=[1.0]) class TestLsim2(_TestLsimFuncs): digits_accuracy = 6 def func(self, *args, **kwargs): with warns(DeprecationWarning, match="lsim2 is deprecated"): t, y, x = lsim2(*args, **kwargs) return t, np.squeeze(y), np.squeeze(x) def test_integrator_nonequal_timestamp(self): t = np.array([0.0, 1.0, 1.0, 3.0]) u = np.array([0.0, 0.0, 1.0, 1.0]) # Simple integrator: x'(t) = u(t) system = ([1.0],[1.0,0.0]) tout, y, x = self.func(system, u, t, X0=[1.0]) expected_x = np.maximum(1.0, tout) assert_almost_equal(x, expected_x) def test_integrator_nonequal_timestamp_kwarg(self): t = np.array([0.0, 1.0, 1.0, 1.1, 1.1, 2.0]) u = np.array([0.0, 0.0, 1.0, 1.0, 0.0, 0.0]) # Simple integrator: x'(t) = u(t) system = ([1.0],[1.0, 0.0]) tout, y, x = self.func(system, u, t, hmax=0.01) expected_x = np.array([0.0, 0.0, 0.0, 0.1, 0.1, 0.1]) assert_almost_equal(x, expected_x) def test_default_arguments(self): # Test use of the default values of the arguments `T` and `U`. # Second order system with a repeated root: x''(t) + 2*x(t) + x(t) = 0. # With initial conditions x(0)=1.0 and x'(t)=0.0, the exact solution # is (1-t)*exp(-t). system = ([1.0], [1.0, 2.0, 1.0]) tout, y, x = self.func(system, X0=[1.0, 0.0]) expected_x = (1.0 - tout) * np.exp(-tout) assert_almost_equal(x[:,0], expected_x) class _TestImpulseFuncs: # Common tests for impulse/impulse2 (= self.func) def test_first_order(self): # First order system: x'(t) + x(t) = u(t) # Exact impulse response is x(t) = exp(-t). system = ([1.0], [1.0,1.0]) tout, y = self.func(system) expected_y = np.exp(-tout) assert_almost_equal(y, expected_y) def test_first_order_fixed_time(self): # Specify the desired time values for the output. # First order system: x'(t) + x(t) = u(t) # Exact impulse response is x(t) = exp(-t). system = ([1.0], [1.0,1.0]) n = 21 t = np.linspace(0, 2.0, n) tout, y = self.func(system, T=t) assert_equal(tout.shape, (n,)) assert_almost_equal(tout, t) expected_y = np.exp(-t) assert_almost_equal(y, expected_y) def test_first_order_initial(self): # Specify an initial condition as a scalar. # First order system: x'(t) + x(t) = u(t), x(0)=3.0 # Exact impulse response is x(t) = 4*exp(-t). system = ([1.0], [1.0,1.0]) tout, y = self.func(system, X0=3.0) expected_y = 4.0 * np.exp(-tout) assert_almost_equal(y, expected_y) def test_first_order_initial_list(self): # Specify an initial condition as a list. # First order system: x'(t) + x(t) = u(t), x(0)=3.0 # Exact impulse response is x(t) = 4*exp(-t). system = ([1.0], [1.0,1.0]) tout, y = self.func(system, X0=[3.0]) expected_y = 4.0 * np.exp(-tout) assert_almost_equal(y, expected_y) def test_integrator(self): # Simple integrator: x'(t) = u(t) system = ([1.0], [1.0,0.0]) tout, y = self.func(system) expected_y = np.ones_like(tout) assert_almost_equal(y, expected_y) def test_second_order(self): # Second order system with a repeated root: # x''(t) + 2*x(t) + x(t) = u(t) # The exact impulse response is t*exp(-t). system = ([1.0], [1.0, 2.0, 1.0]) tout, y = self.func(system) expected_y = tout * np.exp(-tout) assert_almost_equal(y, expected_y) def test_array_like(self): # Test that function can accept sequences, scalars. system = ([1.0], [1.0, 2.0, 1.0]) # TODO: add meaningful test where X0 is a list tout, y = self.func(system, X0=[3], T=[5, 6]) tout, y = self.func(system, X0=[3], T=[5]) def test_array_like2(self): system = ([1.0], [1.0, 2.0, 1.0]) tout, y = self.func(system, X0=3, T=5) class TestImpulse2(_TestImpulseFuncs): def func(self, *args, **kwargs): with warns(DeprecationWarning, match="impulse2 is deprecated"): return impulse2(*args, **kwargs) class TestImpulse(_TestImpulseFuncs): def func(self, *args, **kwargs): return impulse(*args, **kwargs) class _TestStepFuncs: def test_first_order(self): # First order system: x'(t) + x(t) = u(t) # Exact step response is x(t) = 1 - exp(-t). system = ([1.0], [1.0,1.0]) tout, y = self.func(system) expected_y = 1.0 - np.exp(-tout) assert_almost_equal(y, expected_y) def test_first_order_fixed_time(self): # Specify the desired time values for the output. # First order system: x'(t) + x(t) = u(t) # Exact step response is x(t) = 1 - exp(-t). system = ([1.0], [1.0,1.0]) n = 21 t = np.linspace(0, 2.0, n) tout, y = self.func(system, T=t) assert_equal(tout.shape, (n,)) assert_almost_equal(tout, t) expected_y = 1 - np.exp(-t) assert_almost_equal(y, expected_y) def test_first_order_initial(self): # Specify an initial condition as a scalar. # First order system: x'(t) + x(t) = u(t), x(0)=3.0 # Exact step response is x(t) = 1 + 2*exp(-t). system = ([1.0], [1.0,1.0]) tout, y = self.func(system, X0=3.0) expected_y = 1 + 2.0*np.exp(-tout) assert_almost_equal(y, expected_y) def test_first_order_initial_list(self): # Specify an initial condition as a list. # First order system: x'(t) + x(t) = u(t), x(0)=3.0 # Exact step response is x(t) = 1 + 2*exp(-t). system = ([1.0], [1.0,1.0]) tout, y = self.func(system, X0=[3.0]) expected_y = 1 + 2.0*np.exp(-tout) assert_almost_equal(y, expected_y) def test_integrator(self): # Simple integrator: x'(t) = u(t) # Exact step response is x(t) = t. system = ([1.0],[1.0,0.0]) tout, y = self.func(system) expected_y = tout assert_almost_equal(y, expected_y) def test_second_order(self): # Second order system with a repeated root: # x''(t) + 2*x(t) + x(t) = u(t) # The exact step response is 1 - (1 + t)*exp(-t). system = ([1.0], [1.0, 2.0, 1.0]) tout, y = self.func(system) expected_y = 1 - (1 + tout) * np.exp(-tout) assert_almost_equal(y, expected_y) def test_array_like(self): # Test that function can accept sequences, scalars. system = ([1.0], [1.0, 2.0, 1.0]) # TODO: add meaningful test where X0 is a list tout, y = self.func(system, T=[5, 6]) class TestStep2(_TestStepFuncs): def func(self, *args, **kwargs): with warns(DeprecationWarning, match="step2 is deprecated"): return step2(*args, **kwargs) def test_integrator(self): # This test is almost the same as the one it overwrites in the base # class. The only difference is the tolerances passed to step2: # the default tolerances are not accurate enough for this test # Simple integrator: x'(t) = u(t) # Exact step response is x(t) = t. system = ([1.0], [1.0,0.0]) tout, y = self.func(system, atol=1e-10, rtol=1e-8) expected_y = tout assert_almost_equal(y, expected_y) class TestStep(_TestStepFuncs): def func(self, *args, **kwargs): return step(*args, **kwargs) def test_complex_input(self): # Test that complex input doesn't raise an error. # `step` doesn't seem to have been designed for complex input, but this # works and may be used, so add regression test. See gh-2654. step(([], [-1], 1+0j)) class TestLti: def test_lti_instantiation(self): # Test that lti can be instantiated with sequences, scalars. # See PR-225. # TransferFunction s = lti([1], [-1]) assert_(isinstance(s, TransferFunction)) assert_(isinstance(s, lti)) assert_(not isinstance(s, dlti)) assert_(s.dt is None) # ZerosPolesGain s = lti(np.array([]), np.array([-1]), 1) assert_(isinstance(s, ZerosPolesGain)) assert_(isinstance(s, lti)) assert_(not isinstance(s, dlti)) assert_(s.dt is None) # StateSpace s = lti([], [-1], 1) s = lti([1], [-1], 1, 3) assert_(isinstance(s, StateSpace)) assert_(isinstance(s, lti)) assert_(not isinstance(s, dlti)) assert_(s.dt is None) class TestStateSpace: def test_initialization(self): # Check that all initializations work StateSpace(1, 1, 1, 1) StateSpace([1], [2], [3], [4]) StateSpace(np.array([[1, 2], [3, 4]]), np.array([[1], [2]]), np.array([[1, 0]]), np.array([[0]])) def test_conversion(self): # Check the conversion functions s = StateSpace(1, 2, 3, 4) assert_(isinstance(s.to_ss(), StateSpace)) assert_(isinstance(s.to_tf(), TransferFunction)) assert_(isinstance(s.to_zpk(), ZerosPolesGain)) # Make sure copies work assert_(StateSpace(s) is not s) assert_(s.to_ss() is not s) def test_properties(self): # Test setters/getters for cross class properties. # This implicitly tests to_tf() and to_zpk() # Getters s = StateSpace(1, 1, 1, 1) assert_equal(s.poles, [1]) assert_equal(s.zeros, [0]) assert_(s.dt is None) def test_operators(self): # Test +/-/* operators on systems class BadType: pass s1 = StateSpace(np.array([[-0.5, 0.7], [0.3, -0.8]]), np.array([[1], [0]]), np.array([[1, 0]]), np.array([[0]]), ) s2 = StateSpace(np.array([[-0.2, -0.1], [0.4, -0.1]]), np.array([[1], [0]]), np.array([[1, 0]]), np.array([[0]]) ) s_discrete = s1.to_discrete(0.1) s2_discrete = s2.to_discrete(0.2) s3_discrete = s2.to_discrete(0.1) # Impulse response t = np.linspace(0, 1, 100) u = np.zeros_like(t) u[0] = 1 # Test multiplication for typ in (int, float, complex, np.float32, np.complex128, np.array): assert_allclose(lsim(typ(2) * s1, U=u, T=t)[1], typ(2) * lsim(s1, U=u, T=t)[1]) assert_allclose(lsim(s1 * typ(2), U=u, T=t)[1], lsim(s1, U=u, T=t)[1] * typ(2)) assert_allclose(lsim(s1 / typ(2), U=u, T=t)[1], lsim(s1, U=u, T=t)[1] / typ(2)) with assert_raises(TypeError): typ(2) / s1 assert_allclose(lsim(s1 * 2, U=u, T=t)[1], lsim(s1, U=2 * u, T=t)[1]) assert_allclose(lsim(s1 * s2, U=u, T=t)[1], lsim(s1, U=lsim(s2, U=u, T=t)[1], T=t)[1], atol=1e-5) with assert_raises(TypeError): s1 / s1 with assert_raises(TypeError): s1 * s_discrete with assert_raises(TypeError): # Check different discretization constants s_discrete * s2_discrete with assert_raises(TypeError): s1 * BadType() with assert_raises(TypeError): BadType() * s1 with assert_raises(TypeError): s1 / BadType() with assert_raises(TypeError): BadType() / s1 # Test addition assert_allclose(lsim(s1 + 2, U=u, T=t)[1], 2 * u + lsim(s1, U=u, T=t)[1]) # Check for dimension mismatch with assert_raises(ValueError): s1 + np.array([1, 2]) with assert_raises(ValueError): np.array([1, 2]) + s1 with assert_raises(TypeError): s1 + s_discrete with assert_raises(ValueError): s1 / np.array([[1, 2], [3, 4]]) with assert_raises(TypeError): # Check different discretization constants s_discrete + s2_discrete with assert_raises(TypeError): s1 + BadType() with assert_raises(TypeError): BadType() + s1 assert_allclose(lsim(s1 + s2, U=u, T=t)[1], lsim(s1, U=u, T=t)[1] + lsim(s2, U=u, T=t)[1]) # Test subtraction assert_allclose(lsim(s1 - 2, U=u, T=t)[1], -2 * u + lsim(s1, U=u, T=t)[1]) assert_allclose(lsim(2 - s1, U=u, T=t)[1], 2 * u + lsim(-s1, U=u, T=t)[1]) assert_allclose(lsim(s1 - s2, U=u, T=t)[1], lsim(s1, U=u, T=t)[1] - lsim(s2, U=u, T=t)[1]) with assert_raises(TypeError): s1 - BadType() with assert_raises(TypeError): BadType() - s1 s = s_discrete + s3_discrete assert_(s.dt == 0.1) s = s_discrete * s3_discrete assert_(s.dt == 0.1) s = 3 * s_discrete assert_(s.dt == 0.1) s = -s_discrete assert_(s.dt == 0.1) class TestTransferFunction: def test_initialization(self): # Check that all initializations work TransferFunction(1, 1) TransferFunction([1], [2]) TransferFunction(np.array([1]), np.array([2])) def test_conversion(self): # Check the conversion functions s = TransferFunction([1, 0], [1, -1]) assert_(isinstance(s.to_ss(), StateSpace)) assert_(isinstance(s.to_tf(), TransferFunction)) assert_(isinstance(s.to_zpk(), ZerosPolesGain)) # Make sure copies work assert_(TransferFunction(s) is not s) assert_(s.to_tf() is not s) def test_properties(self): # Test setters/getters for cross class properties. # This implicitly tests to_ss() and to_zpk() # Getters s = TransferFunction([1, 0], [1, -1]) assert_equal(s.poles, [1]) assert_equal(s.zeros, [0]) class TestZerosPolesGain: def test_initialization(self): # Check that all initializations work ZerosPolesGain(1, 1, 1) ZerosPolesGain([1], [2], 1) ZerosPolesGain(np.array([1]), np.array([2]), 1) def test_conversion(self): #Check the conversion functions s = ZerosPolesGain(1, 2, 3) assert_(isinstance(s.to_ss(), StateSpace)) assert_(isinstance(s.to_tf(), TransferFunction)) assert_(isinstance(s.to_zpk(), ZerosPolesGain)) # Make sure copies work assert_(ZerosPolesGain(s) is not s) assert_(s.to_zpk() is not s) class Test_abcd_normalize: def setup_method(self): self.A = np.array([[1.0, 2.0], [3.0, 4.0]]) self.B = np.array([[-1.0], [5.0]]) self.C = np.array([[4.0, 5.0]]) self.D = np.array([[2.5]]) def test_no_matrix_fails(self): assert_raises(ValueError, abcd_normalize) def test_A_nosquare_fails(self): assert_raises(ValueError, abcd_normalize, [1, -1], self.B, self.C, self.D) def test_AB_mismatch_fails(self): assert_raises(ValueError, abcd_normalize, self.A, [-1, 5], self.C, self.D) def test_AC_mismatch_fails(self): assert_raises(ValueError, abcd_normalize, self.A, self.B, [[4.0], [5.0]], self.D) def test_CD_mismatch_fails(self): assert_raises(ValueError, abcd_normalize, self.A, self.B, self.C, [2.5, 0]) def test_BD_mismatch_fails(self): assert_raises(ValueError, abcd_normalize, self.A, [-1, 5], self.C, self.D) def test_normalized_matrices_unchanged(self): A, B, C, D = abcd_normalize(self.A, self.B, self.C, self.D) assert_equal(A, self.A) assert_equal(B, self.B) assert_equal(C, self.C) assert_equal(D, self.D) def test_shapes(self): A, B, C, D = abcd_normalize(self.A, self.B, [1, 0], 0) assert_equal(A.shape[0], A.shape[1]) assert_equal(A.shape[0], B.shape[0]) assert_equal(A.shape[0], C.shape[1]) assert_equal(C.shape[0], D.shape[0]) assert_equal(B.shape[1], D.shape[1]) def test_zero_dimension_is_not_none1(self): B_ = np.zeros((2, 0)) D_ = np.zeros((0, 0)) A, B, C, D = abcd_normalize(A=self.A, B=B_, D=D_) assert_equal(A, self.A) assert_equal(B, B_) assert_equal(D, D_) assert_equal(C.shape[0], D_.shape[0]) assert_equal(C.shape[1], self.A.shape[0]) def test_zero_dimension_is_not_none2(self): B_ = np.zeros((2, 0)) C_ = np.zeros((0, 2)) A, B, C, D = abcd_normalize(A=self.A, B=B_, C=C_) assert_equal(A, self.A) assert_equal(B, B_) assert_equal(C, C_) assert_equal(D.shape[0], C_.shape[0]) assert_equal(D.shape[1], B_.shape[1]) def test_missing_A(self): A, B, C, D = abcd_normalize(B=self.B, C=self.C, D=self.D) assert_equal(A.shape[0], A.shape[1]) assert_equal(A.shape[0], B.shape[0]) assert_equal(A.shape, (self.B.shape[0], self.B.shape[0])) def test_missing_B(self): A, B, C, D = abcd_normalize(A=self.A, C=self.C, D=self.D) assert_equal(B.shape[0], A.shape[0]) assert_equal(B.shape[1], D.shape[1]) assert_equal(B.shape, (self.A.shape[0], self.D.shape[1])) def test_missing_C(self): A, B, C, D = abcd_normalize(A=self.A, B=self.B, D=self.D) assert_equal(C.shape[0], D.shape[0]) assert_equal(C.shape[1], A.shape[0]) assert_equal(C.shape, (self.D.shape[0], self.A.shape[0])) def test_missing_D(self): A, B, C, D = abcd_normalize(A=self.A, B=self.B, C=self.C) assert_equal(D.shape[0], C.shape[0]) assert_equal(D.shape[1], B.shape[1]) assert_equal(D.shape, (self.C.shape[0], self.B.shape[1])) def test_missing_AB(self): A, B, C, D = abcd_normalize(C=self.C, D=self.D) assert_equal(A.shape[0], A.shape[1]) assert_equal(A.shape[0], B.shape[0]) assert_equal(B.shape[1], D.shape[1]) assert_equal(A.shape, (self.C.shape[1], self.C.shape[1])) assert_equal(B.shape, (self.C.shape[1], self.D.shape[1])) def test_missing_AC(self): A, B, C, D = abcd_normalize(B=self.B, D=self.D) assert_equal(A.shape[0], A.shape[1]) assert_equal(A.shape[0], B.shape[0]) assert_equal(C.shape[0], D.shape[0]) assert_equal(C.shape[1], A.shape[0]) assert_equal(A.shape, (self.B.shape[0], self.B.shape[0])) assert_equal(C.shape, (self.D.shape[0], self.B.shape[0])) def test_missing_AD(self): A, B, C, D = abcd_normalize(B=self.B, C=self.C) assert_equal(A.shape[0], A.shape[1]) assert_equal(A.shape[0], B.shape[0]) assert_equal(D.shape[0], C.shape[0]) assert_equal(D.shape[1], B.shape[1]) assert_equal(A.shape, (self.B.shape[0], self.B.shape[0])) assert_equal(D.shape, (self.C.shape[0], self.B.shape[1])) def test_missing_BC(self): A, B, C, D = abcd_normalize(A=self.A, D=self.D) assert_equal(B.shape[0], A.shape[0]) assert_equal(B.shape[1], D.shape[1]) assert_equal(C.shape[0], D.shape[0]) assert_equal(C.shape[1], A.shape[0]) assert_equal(B.shape, (self.A.shape[0], self.D.shape[1])) assert_equal(C.shape, (self.D.shape[0], self.A.shape[0])) def test_missing_ABC_fails(self): assert_raises(ValueError, abcd_normalize, D=self.D) def test_missing_BD_fails(self): assert_raises(ValueError, abcd_normalize, A=self.A, C=self.C) def test_missing_CD_fails(self): assert_raises(ValueError, abcd_normalize, A=self.A, B=self.B) class Test_bode: def test_01(self): # Test bode() magnitude calculation (manual sanity check). # 1st order low-pass filter: H(s) = 1 / (s + 1), # cutoff: 1 rad/s, slope: -20 dB/decade # H(s=0.1) ~= 0 dB # H(s=1) ~= -3 dB # H(s=10) ~= -20 dB # H(s=100) ~= -40 dB system = lti([1], [1, 1]) w = [0.1, 1, 10, 100] w, mag, phase = bode(system, w=w) expected_mag = [0, -3, -20, -40] assert_almost_equal(mag, expected_mag, decimal=1) def test_02(self): # Test bode() phase calculation (manual sanity check). # 1st order low-pass filter: H(s) = 1 / (s + 1), # angle(H(s=0.1)) ~= -5.7 deg # angle(H(s=1)) ~= -45 deg # angle(H(s=10)) ~= -84.3 deg system = lti([1], [1, 1]) w = [0.1, 1, 10] w, mag, phase = bode(system, w=w) expected_phase = [-5.7, -45, -84.3] assert_almost_equal(phase, expected_phase, decimal=1) def test_03(self): # Test bode() magnitude calculation. # 1st order low-pass filter: H(s) = 1 / (s + 1) system = lti([1], [1, 1]) w = [0.1, 1, 10, 100] w, mag, phase = bode(system, w=w) jw = w * 1j y = np.polyval(system.num, jw) / np.polyval(system.den, jw) expected_mag = 20.0 * np.log10(abs(y)) assert_almost_equal(mag, expected_mag) def test_04(self): # Test bode() phase calculation. # 1st order low-pass filter: H(s) = 1 / (s + 1) system = lti([1], [1, 1]) w = [0.1, 1, 10, 100] w, mag, phase = bode(system, w=w) jw = w * 1j y = np.polyval(system.num, jw) / np.polyval(system.den, jw) expected_phase = np.arctan2(y.imag, y.real) * 180.0 / np.pi assert_almost_equal(phase, expected_phase) def test_05(self): # Test that bode() finds a reasonable frequency range. # 1st order low-pass filter: H(s) = 1 / (s + 1) system = lti([1], [1, 1]) n = 10 # Expected range is from 0.01 to 10. expected_w = np.logspace(-2, 1, n) w, mag, phase = bode(system, n=n) assert_almost_equal(w, expected_w) def test_06(self): # Test that bode() doesn't fail on a system with a pole at 0. # integrator, pole at zero: H(s) = 1 / s system = lti([1], [1, 0]) w, mag, phase = bode(system, n=2) assert_equal(w[0], 0.01) # a fail would give not-a-number def test_07(self): # bode() should not fail on a system with pure imaginary poles. # The test passes if bode doesn't raise an exception. system = lti([1], [1, 0, 100]) w, mag, phase = bode(system, n=2) def test_08(self): # Test that bode() return continuous phase, issues/2331. system = lti([], [-10, -30, -40, -60, -70], 1) w, mag, phase = system.bode(w=np.logspace(-3, 40, 100)) assert_almost_equal(min(phase), -450, decimal=15) def test_from_state_space(self): # Ensure that bode works with a system that was created from the # state space representation matrices A, B, C, D. In this case, # system.num will be a 2-D array with shape (1, n+1), where (n,n) # is the shape of A. # A Butterworth lowpass filter is used, so we know the exact # frequency response. a = np.array([1.0, 2.0, 2.0, 1.0]) A = linalg.companion(a).T B = np.array([[0.0], [0.0], [1.0]]) C = np.array([[1.0, 0.0, 0.0]]) D = np.array([[0.0]]) with suppress_warnings() as sup: sup.filter(BadCoefficients) system = lti(A, B, C, D) w, mag, phase = bode(system, n=100) expected_magnitude = 20 * np.log10(np.sqrt(1.0 / (1.0 + w**6))) assert_almost_equal(mag, expected_magnitude) class Test_freqresp: def test_output_manual(self): # Test freqresp() output calculation (manual sanity check). # 1st order low-pass filter: H(s) = 1 / (s + 1), # re(H(s=0.1)) ~= 0.99 # re(H(s=1)) ~= 0.5 # re(H(s=10)) ~= 0.0099 system = lti([1], [1, 1]) w = [0.1, 1, 10] w, H = freqresp(system, w=w) expected_re = [0.99, 0.5, 0.0099] expected_im = [-0.099, -0.5, -0.099] assert_almost_equal(H.real, expected_re, decimal=1) assert_almost_equal(H.imag, expected_im, decimal=1) def test_output(self): # Test freqresp() output calculation. # 1st order low-pass filter: H(s) = 1 / (s + 1) system = lti([1], [1, 1]) w = [0.1, 1, 10, 100] w, H = freqresp(system, w=w) s = w * 1j expected = np.polyval(system.num, s) / np.polyval(system.den, s) assert_almost_equal(H.real, expected.real) assert_almost_equal(H.imag, expected.imag) def test_freq_range(self): # Test that freqresp() finds a reasonable frequency range. # 1st order low-pass filter: H(s) = 1 / (s + 1) # Expected range is from 0.01 to 10. system = lti([1], [1, 1]) n = 10 expected_w = np.logspace(-2, 1, n) w, H = freqresp(system, n=n) assert_almost_equal(w, expected_w) def test_pole_zero(self): # Test that freqresp() doesn't fail on a system with a pole at 0. # integrator, pole at zero: H(s) = 1 / s system = lti([1], [1, 0]) w, H = freqresp(system, n=2) assert_equal(w[0], 0.01) # a fail would give not-a-number def test_from_state_space(self): # Ensure that freqresp works with a system that was created from the # state space representation matrices A, B, C, D. In this case, # system.num will be a 2-D array with shape (1, n+1), where (n,n) is # the shape of A. # A Butterworth lowpass filter is used, so we know the exact # frequency response. a = np.array([1.0, 2.0, 2.0, 1.0]) A = linalg.companion(a).T B = np.array([[0.0],[0.0],[1.0]]) C = np.array([[1.0, 0.0, 0.0]]) D = np.array([[0.0]]) with suppress_warnings() as sup: sup.filter(BadCoefficients) system = lti(A, B, C, D) w, H = freqresp(system, n=100) s = w * 1j expected = (1.0 / (1.0 + 2*s + 2*s**2 + s**3)) assert_almost_equal(H.real, expected.real) assert_almost_equal(H.imag, expected.imag) def test_from_zpk(self): # 4th order low-pass filter: H(s) = 1 / (s + 1) system = lti([],[-1]*4,[1]) w = [0.1, 1, 10, 100] w, H = freqresp(system, w=w) s = w * 1j expected = 1 / (s + 1)**4 assert_almost_equal(H.real, expected.real) assert_almost_equal(H.imag, expected.imag)
48,251
35.861727
79
py
scipy
scipy-main/scipy/signal/tests/test_czt.py
# This program is public domain # Authors: Paul Kienzle, Nadav Horesh ''' A unit test module for czt.py ''' import pytest from numpy.testing import assert_allclose from scipy.fft import fft from scipy.signal import (czt, zoom_fft, czt_points, CZT, ZoomFFT) import numpy as np def check_czt(x): # Check that czt is the equivalent of normal fft y = fft(x) y1 = czt(x) assert_allclose(y1, y, rtol=1e-13) # Check that interpolated czt is the equivalent of normal fft y = fft(x, 100*len(x)) y1 = czt(x, 100*len(x)) assert_allclose(y1, y, rtol=1e-12) def check_zoom_fft(x): # Check that zoom_fft is the equivalent of normal fft y = fft(x) y1 = zoom_fft(x, [0, 2-2./len(y)], endpoint=True) assert_allclose(y1, y, rtol=1e-11, atol=1e-14) y1 = zoom_fft(x, [0, 2]) assert_allclose(y1, y, rtol=1e-11, atol=1e-14) # Test fn scalar y1 = zoom_fft(x, 2-2./len(y), endpoint=True) assert_allclose(y1, y, rtol=1e-11, atol=1e-14) y1 = zoom_fft(x, 2) assert_allclose(y1, y, rtol=1e-11, atol=1e-14) # Check that zoom_fft with oversampling is equivalent to zero padding over = 10 yover = fft(x, over*len(x)) y2 = zoom_fft(x, [0, 2-2./len(yover)], m=len(yover), endpoint=True) assert_allclose(y2, yover, rtol=1e-12, atol=1e-10) y2 = zoom_fft(x, [0, 2], m=len(yover)) assert_allclose(y2, yover, rtol=1e-12, atol=1e-10) # Check that zoom_fft works on a subrange w = np.linspace(0, 2-2./len(x), len(x)) f1, f2 = w[3], w[6] y3 = zoom_fft(x, [f1, f2], m=3*over+1, endpoint=True) idx3 = slice(3*over, 6*over+1) assert_allclose(y3, yover[idx3], rtol=1e-13) def test_1D(): # Test of 1D version of the transforms np.random.seed(0) # Deterministic randomness # Random signals lengths = np.random.randint(8, 200, 20) np.append(lengths, 1) for length in lengths: x = np.random.random(length) check_zoom_fft(x) check_czt(x) # Gauss t = np.linspace(-2, 2, 128) x = np.exp(-t**2/0.01) check_zoom_fft(x) # Linear x = [1, 2, 3, 4, 5, 6, 7] check_zoom_fft(x) # Check near powers of two check_zoom_fft(range(126-31)) check_zoom_fft(range(127-31)) check_zoom_fft(range(128-31)) check_zoom_fft(range(129-31)) check_zoom_fft(range(130-31)) # Check transform on n-D array input x = np.reshape(np.arange(3*2*28), (3, 2, 28)) y1 = zoom_fft(x, [0, 2-2./28]) y2 = zoom_fft(x[2, 0, :], [0, 2-2./28]) assert_allclose(y1[2, 0], y2, rtol=1e-13, atol=1e-12) y1 = zoom_fft(x, [0, 2], endpoint=False) y2 = zoom_fft(x[2, 0, :], [0, 2], endpoint=False) assert_allclose(y1[2, 0], y2, rtol=1e-13, atol=1e-12) # Random (not a test condition) x = np.random.rand(101) check_zoom_fft(x) # Spikes t = np.linspace(0, 1, 128) x = np.sin(2*np.pi*t*5)+np.sin(2*np.pi*t*13) check_zoom_fft(x) # Sines x = np.zeros(100, dtype=complex) x[[1, 5, 21]] = 1 check_zoom_fft(x) # Sines plus complex component x += 1j*np.linspace(0, 0.5, x.shape[0]) check_zoom_fft(x) def test_large_prime_lengths(): np.random.seed(0) # Deterministic randomness for N in (101, 1009, 10007): x = np.random.rand(N) y = fft(x) y1 = czt(x) assert_allclose(y, y1, rtol=1e-12) @pytest.mark.slow def test_czt_vs_fft(): np.random.seed(123) random_lengths = np.random.exponential(100000, size=10).astype('int') for n in random_lengths: a = np.random.randn(n) assert_allclose(czt(a), fft(a), rtol=1e-11) def test_empty_input(): with pytest.raises(ValueError, match='Invalid number of CZT'): czt([]) with pytest.raises(ValueError, match='Invalid number of CZT'): zoom_fft([], 0.5) def test_0_rank_input(): with pytest.raises(IndexError, match='tuple index out of range'): czt(5) with pytest.raises(IndexError, match='tuple index out of range'): zoom_fft(5, 0.5) @pytest.mark.parametrize('impulse', ([0, 0, 1], [0, 0, 1, 0, 0], np.concatenate((np.array([0, 0, 1]), np.zeros(100))))) @pytest.mark.parametrize('m', (1, 3, 5, 8, 101, 1021)) @pytest.mark.parametrize('a', (1, 2, 0.5, 1.1)) # Step that tests away from the unit circle, but not so far it explodes from # numerical error @pytest.mark.parametrize('w', (None, 0.98534 + 0.17055j)) def test_czt_math(impulse, m, w, a): # z-transform of an impulse is 1 everywhere assert_allclose(czt(impulse[2:], m=m, w=w, a=a), np.ones(m), rtol=1e-10) # z-transform of a delayed impulse is z**-1 assert_allclose(czt(impulse[1:], m=m, w=w, a=a), czt_points(m=m, w=w, a=a)**-1, rtol=1e-10) # z-transform of a 2-delayed impulse is z**-2 assert_allclose(czt(impulse, m=m, w=w, a=a), czt_points(m=m, w=w, a=a)**-2, rtol=1e-10) def test_int_args(): # Integer argument `a` was producing all 0s assert_allclose(abs(czt([0, 1], m=10, a=2)), 0.5*np.ones(10), rtol=1e-15) assert_allclose(czt_points(11, w=2), 1/(2**np.arange(11)), rtol=1e-30) def test_czt_points(): for N in (1, 2, 3, 8, 11, 100, 101, 10007): assert_allclose(czt_points(N), np.exp(2j*np.pi*np.arange(N)/N), rtol=1e-30) assert_allclose(czt_points(7, w=1), np.ones(7), rtol=1e-30) assert_allclose(czt_points(11, w=2.), 1/(2**np.arange(11)), rtol=1e-30) func = CZT(12, m=11, w=2., a=1) assert_allclose(func.points(), 1/(2**np.arange(11)), rtol=1e-30) @pytest.mark.parametrize('cls, args', [(CZT, (100,)), (ZoomFFT, (100, 0.2))]) def test_CZT_size_mismatch(cls, args): # Data size doesn't match function's expected size myfunc = cls(*args) with pytest.raises(ValueError, match='CZT defined for'): myfunc(np.arange(5)) def test_invalid_range(): with pytest.raises(ValueError, match='2-length sequence'): ZoomFFT(100, [1, 2, 3]) @pytest.mark.parametrize('m', [0, -11, 5.5, 4.0]) def test_czt_points_errors(m): # Invalid number of points with pytest.raises(ValueError, match='Invalid number of CZT'): czt_points(m) @pytest.mark.parametrize('size', [0, -5, 3.5, 4.0]) def test_nonsense_size(size): # Numpy and Scipy fft() give ValueError for 0 output size, so we do, too with pytest.raises(ValueError, match='Invalid number of CZT'): CZT(size, 3) with pytest.raises(ValueError, match='Invalid number of CZT'): ZoomFFT(size, 0.2, 3) with pytest.raises(ValueError, match='Invalid number of CZT'): CZT(3, size) with pytest.raises(ValueError, match='Invalid number of CZT'): ZoomFFT(3, 0.2, size) with pytest.raises(ValueError, match='Invalid number of CZT'): czt([1, 2, 3], size) with pytest.raises(ValueError, match='Invalid number of CZT'): zoom_fft([1, 2, 3], 0.2, size)
6,993
30.790909
77
py
scipy
scipy-main/scipy/signal/tests/test_short_time_fft.py
"""Unit tests for module `_short_time_fft`. This file's structure loosely groups the tests into the following sequential categories: 1. Test function `_calc_dual_canonical_window`. 2. Test for invalid parameters and exceptions in `ShortTimeFFT` (until the `test_from_window` function). 3. Test algorithmic properties of STFT/ISTFT. Some tests were ported from ``test_spectral.py``. Notes ----- * Mypy 0.990 does interpret the line:: from scipy.stats import norm as normal_distribution incorrectly (but the code works), hence a ``type: ignore`` was appended. """ import math from itertools import product from typing import cast, get_args, Literal import numpy as np import pytest from numpy.testing import assert_allclose, assert_equal from scipy.fft import fftshift from scipy.stats import norm as normal_distribution # type: ignore from scipy.signal import get_window, welch, stft, istft, spectrogram from scipy.signal._short_time_fft import FFT_MODE_TYPE, \ _calc_dual_canonical_window, ShortTimeFFT, PAD_TYPE from scipy.signal.windows import gaussian def test__calc_dual_canonical_window_roundtrip(): """Test dual window calculation with a round trip to verify duality. Note that this works only for canonical window pairs (having minimal energy) like a Gaussian. The window is the same as in the example of `from ShortTimeFFT.from_dual`. """ win = gaussian(51, std=10, sym=True) d_win = _calc_dual_canonical_window(win, 10) win2 = _calc_dual_canonical_window(d_win, 10) assert_allclose(win2, win) def test__calc_dual_canonical_window_exceptions(): """Raise all exceptions in `_calc_dual_canonical_window`.""" # Verify that calculation can fail: with pytest.raises(ValueError, match="hop=5 is larger than window len.*"): _calc_dual_canonical_window(np.ones(4), 5) with pytest.raises(ValueError, match=".* Transform not invertible!"): _calc_dual_canonical_window(np.array([.1, .2, .3, 0]), 4) # Verify that parameter `win` may not be integers: with pytest.raises(ValueError, match="Parameter 'win' cannot be of int.*"): _calc_dual_canonical_window(np.ones(4, dtype=int), 1) def test_invalid_initializer_parameters(): """Verify that exceptions get raised on invalid parameters when instantiating ShortTimeFFT. """ with pytest.raises(ValueError, match=r"Parameter win must be 1d, " + r"but win.shape=\(2, 2\)!"): ShortTimeFFT(np.ones((2, 2)), hop=4, fs=1) with pytest.raises(ValueError, match="Parameter win must have " + "finite entries"): ShortTimeFFT(np.array([1, np.inf, 2, 3]), hop=4, fs=1) with pytest.raises(ValueError, match="Parameter hop=0 is not " + "an integer >= 1!"): ShortTimeFFT(np.ones(4), hop=0, fs=1) with pytest.raises(ValueError, match="Parameter hop=2.0 is not " + "an integer >= 1!"): # noinspection PyTypeChecker ShortTimeFFT(np.ones(4), hop=2.0, fs=1) with pytest.raises(ValueError, match=r"dual_win.shape=\(5,\) must equal " + r"win.shape=\(4,\)!"): ShortTimeFFT(np.ones(4), hop=2, fs=1, dual_win=np.ones(5)) with pytest.raises(ValueError, match="Parameter dual_win must be " + "a finite array!"): ShortTimeFFT(np.ones(3), hop=2, fs=1, dual_win=np.array([np.nan, 2, 3])) def test_exceptions_properties_methods(): """Verify that exceptions get raised when setting properties or calling method of ShortTimeFFT to/with invalid values.""" SFT = ShortTimeFFT(np.ones(8), hop=4, fs=1) with pytest.raises(ValueError, match="Sampling interval T=-1 must be " + "positive!"): SFT.T = -1 with pytest.raises(ValueError, match="Sampling frequency fs=-1 must be " + "positive!"): SFT.fs = -1 with pytest.raises(ValueError, match="fft_mode='invalid_typ' not in " + r"\('twosided', 'centered', " + r"'onesided', 'onesided2X'\)!"): SFT.fft_mode = 'invalid_typ' with pytest.raises(ValueError, match="For scaling is None, " + "fft_mode='onesided2X' is invalid.*"): SFT.fft_mode = 'onesided2X' with pytest.raises(ValueError, match="Attribute mfft=7 needs to be " + "at least the window length.*"): SFT.mfft = 7 with pytest.raises(ValueError, match="scaling='invalid' not in.*"): # noinspection PyTypeChecker SFT.scale_to('invalid') with pytest.raises(ValueError, match="phase_shift=3.0 has the unit .*"): SFT.phase_shift = 3.0 with pytest.raises(ValueError, match="-mfft < phase_shift < mfft " + "does not hold.*"): SFT.phase_shift = 2*SFT.mfft with pytest.raises(ValueError, match="Parameter padding='invalid' not.*"): # noinspection PyTypeChecker g = SFT._x_slices(np.zeros(16), k_off=0, p0=0, p1=1, padding='invalid') next(g) # execute generator with pytest.raises(ValueError, match="Trend type must be 'linear' " + "or 'constant'"): # noinspection PyTypeChecker SFT.stft_detrend(np.zeros(16), detr='invalid') with pytest.raises(ValueError, match="Parameter detr=nan is not a str, " + "function or None!"): # noinspection PyTypeChecker SFT.stft_detrend(np.zeros(16), detr=np.nan) with pytest.raises(ValueError, match="Invalid Parameter p0=0, p1=200.*"): SFT.p_range(100, 0, 200) with pytest.raises(ValueError, match="f_axis=0 may not be equal to " + "t_axis=0!"): SFT.istft(np.zeros((SFT.f_pts, 2)), t_axis=0, f_axis=0) with pytest.raises(ValueError, match=r"S.shape\[f_axis\]=2 must be equal" + " to self.f_pts=5.*"): SFT.istft(np.zeros((2, 2))) with pytest.raises(ValueError, match=r"S.shape\[t_axis\]=1 needs to have" + " at least 2 slices.*"): SFT.istft(np.zeros((SFT.f_pts, 1))) with pytest.raises(ValueError, match=r".*\(k1=100\) <= \(k_max=12\) " + "is false!$"): SFT.istft(np.zeros((SFT.f_pts, 3)), k1=100) with pytest.raises(ValueError, match=r"\(k1=1\) - \(k0=0\) = 1 has to " + "be at least.* length 4!"): SFT.istft(np.zeros((SFT.f_pts, 3)), k0=0, k1=1) with pytest.raises(ValueError, match=r"Parameter axes_seq='invalid' " + r"not in \['tf', 'ft'\]!"): # noinspection PyTypeChecker SFT.extent(n=100, axes_seq='invalid') with pytest.raises(ValueError, match="Attribute fft_mode=twosided must.*"): SFT.fft_mode = 'twosided' SFT.extent(n=100) @pytest.mark.parametrize('m', ('onesided', 'onesided2X')) def test_exceptions_fft_mode_complex_win(m: FFT_MODE_TYPE): """Verify hat one-sided spectra are not allowed with complex-valued windows. The reason being, the `rfft` function only accepts real-valued input. """ with pytest.raises(ValueError, match=f"One-sided spectra, i.e., fft_mode='{m}'.*"): ShortTimeFFT(np.ones(8)*1j, hop=4, fs=1, fft_mode=m) SFT = ShortTimeFFT(np.ones(8)*1j, hop=4, fs=1, fft_mode='twosided') with pytest.raises(ValueError, match=f"One-sided spectra, i.e., fft_mode='{m}'.*"): SFT.fft_mode = m def test_invalid_fft_mode_RuntimeError(): """Ensure exception gets raised when property `fft_mode` is invalid. """ SFT = ShortTimeFFT(np.ones(8), hop=4, fs=1) SFT._fft_mode = 'invalid_typ' with pytest.raises(RuntimeError): _ = SFT.f with pytest.raises(RuntimeError): SFT._fft_func(np.ones(8)) with pytest.raises(RuntimeError): SFT._ifft_func(np.ones(8)) @pytest.mark.parametrize('win_params, Nx', [(('gaussian', 2.), 9), # in docstr ('triang', 7), (('kaiser', 4.0), 9), (('exponential', None, 1.), 9), (4.0, 9)]) def test_from_window(win_params, Nx: int): """Verify that `from_window()` handels parameters correctly. The window parameterizations are documented in the `get_window` docstring. """ w_sym, fs = get_window(win_params, Nx, fftbins=False), 16. w_per = get_window(win_params, Nx, fftbins=True) SFT0 = ShortTimeFFT(w_sym, hop=3, fs=fs, fft_mode='twosided', scale_to='psd', phase_shift=1) nperseg = len(w_sym) noverlap = nperseg - SFT0.hop SFT1 = ShortTimeFFT.from_window(win_params, fs, nperseg, noverlap, symmetric_win=True, fft_mode='twosided', scale_to='psd', phase_shift=1) # periodic window: SFT2 = ShortTimeFFT.from_window(win_params, fs, nperseg, noverlap, symmetric_win=False, fft_mode='twosided', scale_to='psd', phase_shift=1) # Be informative when comparing instances: assert_equal(SFT1.win, SFT0.win) assert_allclose(SFT2.win, w_per / np.sqrt(sum(w_per**2) * fs)) for n_ in ('hop', 'T', 'fft_mode', 'mfft', 'scaling', 'phase_shift'): v0, v1, v2 = (getattr(SFT_, n_) for SFT_ in (SFT0, SFT1, SFT2)) assert v1 == v0, f"SFT1.{n_}={v1} does not equal SFT0.{n_}={v0}" assert v2 == v0, f"SFT2.{n_}={v2} does not equal SFT0.{n_}={v0}" def test_dual_win_roundtrip(): """Verify the duality of `win` and `dual_win`. Note that this test does not work for arbitrary windows, since dual windows are not unique. It always works for invertible STFTs if the windows do not overlap. """ # Non-standard values for keyword arguments (except for `scale_to`): kw = dict(hop=4, fs=1, fft_mode='twosided', mfft=8, scale_to=None, phase_shift=2) SFT0 = ShortTimeFFT(np.ones(4), **kw) SFT1 = ShortTimeFFT.from_dual(SFT0.dual_win, **kw) assert_allclose(SFT1.dual_win, SFT0.win) @pytest.mark.parametrize('scale_to, fac_psd, fac_mag', [(None, 0.25, 0.125), ('magnitude', 2.0, 1), ('psd', 1, 0.5)]) def test_scaling(scale_to: Literal['magnitude', 'psd'], fac_psd, fac_mag): """Verify scaling calculations. * Verify passing `scale_to`parameter to ``__init__(). * Roundtrip while changing scaling factor. """ SFT = ShortTimeFFT(np.ones(4) * 2, hop=4, fs=1, scale_to=scale_to) assert SFT.fac_psd == fac_psd assert SFT.fac_magnitude == fac_mag # increase coverage by accessing properties twice: assert SFT.fac_psd == fac_psd assert SFT.fac_magnitude == fac_mag x = np.fft.irfft([0, 0, 7, 0, 0, 0, 0]) # periodic signal Sx = SFT.stft(x) Sx_mag, Sx_psd = Sx * SFT.fac_magnitude, Sx * SFT.fac_psd SFT.scale_to('magnitude') x_mag = SFT.istft(Sx_mag, k1=len(x)) assert_allclose(x_mag, x) SFT.scale_to('psd') x_psd = SFT.istft(Sx_psd, k1=len(x)) assert_allclose(x_psd, x) def test_scale_to(): """Verify `scale_to()` method.""" SFT = ShortTimeFFT(np.ones(4) * 2, hop=4, fs=1, scale_to=None) SFT.scale_to('magnitude') assert SFT.scaling == 'magnitude' assert SFT.fac_psd == 2.0 assert SFT.fac_magnitude == 1 SFT.scale_to('psd') assert SFT.scaling == 'psd' assert SFT.fac_psd == 1 assert SFT.fac_magnitude == 0.5 SFT.scale_to('psd') # needed for coverage for scale, s_fac in zip(('magnitude', 'psd'), (8, 4)): SFT = ShortTimeFFT(np.ones(4) * 2, hop=4, fs=1, scale_to=None) dual_win = SFT.dual_win.copy() SFT.scale_to(cast(Literal['magnitude', 'psd'], scale)) assert_allclose(SFT.dual_win, dual_win * s_fac) def test_x_slices_padding(): """Verify padding. The reference arrays were taken from the docstrings of `zero_ext`, `const_ext`, `odd_ext()`, and `even_ext()` from the _array_tools module. """ SFT = ShortTimeFFT(np.ones(5), hop=4, fs=1) x = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]], dtype=float) d = {'zeros': [[[0, 0, 1, 2, 3], [0, 0, 0, 1, 4]], [[3, 4, 5, 0, 0], [4, 9, 16, 0, 0]]], 'edge': [[[1, 1, 1, 2, 3], [0, 0, 0, 1, 4]], [[3, 4, 5, 5, 5], [4, 9, 16, 16, 16]]], 'even': [[[3, 2, 1, 2, 3], [4, 1, 0, 1, 4]], [[3, 4, 5, 4, 3], [4, 9, 16, 9, 4]]], 'odd': [[[-1, 0, 1, 2, 3], [-4, -1, 0, 1, 4]], [[3, 4, 5, 6, 7], [4, 9, 16, 23, 28]]]} for p_, xx in d.items(): gen = SFT._x_slices(np.array(x), 0, 0, 2, padding=cast(PAD_TYPE, p_)) yy = np.array([y_.copy() for y_ in gen]) # due to inplace copying assert_equal(yy, xx, err_msg=f"Failed '{p_}' padding.") def test_invertible(): """Verify `invertible` property. """ SFT = ShortTimeFFT(np.ones(8), hop=4, fs=1) assert SFT.invertible SFT = ShortTimeFFT(np.ones(8), hop=9, fs=1) assert not SFT.invertible def test_border_values(): """Ensure that minimum and maximum values of slices are correct.""" SFT = ShortTimeFFT(np.ones(8), hop=4, fs=1) assert SFT.p_min == 0 assert SFT.k_min == -4 assert SFT.lower_border_end == (4, 1) assert SFT.lower_border_end == (4, 1) # needed to test caching assert SFT.p_max(10) == 4 assert SFT.k_max(10) == 16 assert SFT.upper_border_begin(10) == (4, 2) def test_border_values_exotic(): """Ensure that the border calculations are correct for windows with zeros. """ w = np.array([0, 0, 0, 0, 0, 0, 0, 1.]) SFT = ShortTimeFFT(w, hop=1, fs=1) assert SFT.lower_border_end == (0, 0) SFT = ShortTimeFFT(np.flip(w), hop=20, fs=1) assert SFT.upper_border_begin(4) == (0, 0) SFT._hop = -1 # provoke unreachable line with pytest.raises(RuntimeError): _ = SFT.k_max(4) with pytest.raises(RuntimeError): _ = SFT.k_min def test_t(): """Verify that the times of the slices are correct. """ SFT = ShortTimeFFT(np.ones(8), hop=4, fs=2) assert SFT.T == 1/2 assert SFT.fs == 2. assert SFT.delta_t == 4 * 1/2 t_stft = np.arange(0, SFT.p_max(10)) * SFT.delta_t assert_equal(SFT.t(10), t_stft) assert_equal(SFT.t(10, 1, 3), t_stft[1:3]) SFT.T = 1/4 assert SFT.T == 1/4 assert SFT.fs == 4 SFT.fs = 1/8 assert SFT.fs == 1/8 assert SFT.T == 8 @pytest.mark.parametrize('fft_mode, f', [('onesided', [0., 1., 2.]), ('onesided2X', [0., 1., 2.]), ('twosided', [0., 1., 2., -2., -1.]), ('centered', [-2., -1., 0., 1., 2.])]) def test_f(fft_mode: FFT_MODE_TYPE, f): """Verify the frequency values property `f`.""" SFT = ShortTimeFFT(np.ones(5), hop=4, fs=5, fft_mode=fft_mode, scale_to='psd') assert_equal(SFT.f, f) def test_extent(): """Ensure that the `extent()` method is correct. """ SFT = ShortTimeFFT(np.ones(32), hop=4, fs=32, fft_mode='onesided') assert SFT.extent(100, 'tf', False) == (-0.375, 3.625, 0.0, 17.0) assert SFT.extent(100, 'ft', False) == (0.0, 17.0, -0.375, 3.625) assert SFT.extent(100, 'tf', True) == (-0.4375, 3.5625, -0.5, 16.5) assert SFT.extent(100, 'ft', True) == (-0.5, 16.5, -0.4375, 3.5625) SFT = ShortTimeFFT(np.ones(32), hop=4, fs=32, fft_mode='centered') assert SFT.extent(100, 'tf', False) == (-0.375, 3.625, -16.0, 15.0) def test_spectrogram(): """Verify spectrogram and cross-spectrogram methods. """ SFT = ShortTimeFFT(np.ones(8), hop=4, fs=1) x, y = np.ones(10), np.arange(10) X, Y = SFT.stft(x), SFT.stft(y) assert_allclose(SFT.spectrogram(x), X.real**2+X.imag**2) assert_allclose(SFT.spectrogram(x, y), X * Y.conj()) @pytest.mark.parametrize('n', [8, 9]) def test_fft_func_roundtrip(n: int): """Test roundtrip `ifft_func(fft_func(x)) == x` for all permutations of relevant parameters. """ np.random.seed(2394795) x0 = np.random.rand(n) w, h_n = np.ones(n), 4 pp = dict( fft_mode=get_args(FFT_MODE_TYPE), mfft=[None, n, n+1, n+2], scaling=[None, 'magnitude', 'psd'], phase_shift=[None, -n+1, 0, n // 2, n-1]) for f_typ, mfft, scaling, phase_shift in product(*pp.values()): if f_typ == 'onesided2X' and scaling is None: continue # this combination is forbidden SFT = ShortTimeFFT(w, h_n, fs=n, fft_mode=f_typ, mfft=mfft, scale_to=scaling, phase_shift=phase_shift) X0 = SFT._fft_func(x0) x1 = SFT._ifft_func(X0) assert_allclose(x0, x1, err_msg="_fft_func() roundtrip failed for " + f"{f_typ=}, {mfft=}, {scaling=}, {phase_shift=}") SFT = ShortTimeFFT(w, h_n, fs=1) SFT._fft_mode = 'invalid_fft' # type: ignore with pytest.raises(RuntimeError): SFT._fft_func(x0) with pytest.raises(RuntimeError): SFT._ifft_func(x0) @pytest.mark.parametrize('i', range(19)) def test_impulse_roundtrip(i): """Roundtrip for an impulse being at different positions `i`.""" n = 19 w, h_n = np.ones(8), 3 x = np.zeros(n) x[i] = 1 SFT = ShortTimeFFT(w, hop=h_n, fs=1, scale_to=None, phase_shift=None) Sx = SFT.stft(x) # test slicing the input signal into two parts: n_q = SFT.nearest_k_p(n // 2) Sx0 = SFT.stft(x[:n_q], padding='zeros') Sx1 = SFT.stft(x[n_q:], padding='zeros') q0_ub = SFT.upper_border_begin(n_q)[1] - SFT.p_min q1_le = SFT.lower_border_end[1] - SFT.p_min assert_allclose(Sx0[:, :q0_ub], Sx[:, :q0_ub], err_msg=f"{i=}") assert_allclose(Sx1[:, q1_le:], Sx[:, q1_le-Sx1.shape[1]:], err_msg=f"{i=}") Sx01 = np.hstack((Sx0[:, :q0_ub], Sx0[:, q0_ub:] + Sx1[:, :q1_le], Sx1[:, q1_le:])) assert_allclose(Sx, Sx01, atol=1e-8, err_msg=f"{i=}") y = SFT.istft(Sx, 0, n) assert_allclose(y, x, atol=1e-8, err_msg=f"{i=}") y0 = SFT.istft(Sx, 0, n//2) assert_allclose(x[:n//2], y0, atol=1e-8, err_msg=f"{i=}") y1 = SFT.istft(Sx, n // 2, n) assert_allclose(x[n // 2:], y1, atol=1e-8, err_msg=f"{i=}") @pytest.mark.parametrize('hop', [1, 7, 8]) def test_asymmetric_window_roundtrip(hop: int): """An asymmetric window could uncover indexing problems. """ np.random.seed(23371) w = np.arange(16) / 8 # must be of type float w[len(w)//2:] = 1 SFT = ShortTimeFFT(w, hop, fs=1) x = 10 * np.random.randn(64) Sx = SFT.stft(x) x1 = SFT.istft(Sx, k1=len(x)) assert_allclose(x1, x1, err_msg="Roundtrip for asymmetric window with " + f" {hop=} failed!") @pytest.mark.parametrize('m_num', [6, 7]) def test_minimal_length_signal(m_num): """Verify that the shortest allowed signal works. """ SFT = ShortTimeFFT(np.ones(m_num), m_num//2, fs=1) n = math.ceil(m_num/2) x = np.ones(n) Sx = SFT.stft(x) x1 = SFT.istft(Sx, k1=n) assert_allclose(x1, x, err_msg=f"Roundtrip minimal length signal ({n=})" + f" for {m_num} sample window failed!") with pytest.raises(ValueError, match=rf"len\(x\)={n-1} must be >= ceil.*"): SFT.stft(x[:-1]) with pytest.raises(ValueError, match=rf"S.shape\[t_axis\]={Sx.shape[1]-1}" f" needs to have at least {Sx.shape[1]} slices"): SFT.istft(Sx[:, :-1], k1=n) def test_tutorial_stft_sliding_win(): """Verify example in "Sliding Windows" subsection from the "User Guide". In :ref:`tutorial_stft_sliding_win` (file ``signal.rst``) of the :ref:`user_guide` the behavior the border behavior of ``ShortTimeFFT(np.ones(6), 2, fs=1)`` with a 50 sample signal is discussed. This test verifies the presented indexes. """ SFT = ShortTimeFFT(np.ones(6), 2, fs=1) # Lower border: assert SFT.m_num_mid == 3, f"Slice middle is not 3 but {SFT.m_num_mid=}" assert SFT.p_min == -1, f"Lowest slice {SFT.p_min=} is not -1" assert SFT.k_min == -5, f"Lowest slice sample {SFT.p_min=} is not -5" k_lb, p_lb = SFT.lower_border_end assert p_lb == 2, f"First unaffected slice {p_lb=} is not 2" assert k_lb == 5, f"First unaffected sample {k_lb=} is not 5" n = 50 # upper signal border assert (p_max := SFT.p_max(n)) == 27, f"Last slice {p_max=} must be 27" assert (k_max := SFT.k_max(n)) == 55, f"Last sample {k_max=} must be 55" k_ub, p_ub = SFT.upper_border_begin(n) assert p_ub == 24, f"First upper border slice {p_ub=} must be 24" assert k_ub == 45, f"First upper border slice {k_ub=} must be 45" def test_tutorial_stft_legacy_stft(): """Verify STFT example in "Comparison with Legacy Implementation" from the "User Guide". In :ref:`tutorial_stft_legacy_stft` (file ``signal.rst``) of the :ref:`user_guide` the legacy and the new implementation are compared. """ fs, N = 200, 1001 # # 200 Hz sampling rate for 5 s signal t_z = np.arange(N) / fs # time indexes for signal z = np.exp(2j*np.pi * 70 * (t_z - 0.2 * t_z ** 2)) # complex-valued chirp nperseg, noverlap = 50, 40 win = ('gaussian', 1e-2 * fs) # Gaussian with 0.01 s standard deviation # Legacy STFT: f0_u, t0, Sz0_u = stft(z, fs, win, nperseg, noverlap, return_onesided=False, scaling='spectrum') Sz0 = fftshift(Sz0_u, axes=0) # New STFT: SFT = ShortTimeFFT.from_window(win, fs, nperseg, noverlap, fft_mode='centered', scale_to='magnitude', phase_shift=None) Sz1 = SFT.stft(z) assert_allclose(Sz0, Sz1[:, 2:-1]) assert_allclose((abs(Sz1[:, 1]).min(), abs(Sz1[:, 1]).max()), (6.925060911593139e-07, 8.00271269218721e-07)) t0_r, z0_r = istft(Sz0_u, fs, win, nperseg, noverlap, input_onesided=False, scaling='spectrum') z1_r = SFT.istft(Sz1, k1=N) assert len(z0_r) == N + 9 assert_allclose(z0_r[:N], z) assert_allclose(z1_r, z) # Spectrogram is just the absolute square of th STFT: assert_allclose(SFT.spectrogram(z), abs(Sz1) ** 2) def test_tutorial_stft_legacy_spectrogram(): """Verify spectrogram example in "Comparison with Legacy Implementation" from the "User Guide". In :ref:`tutorial_stft_legacy_stft` (file ``signal.rst``) of the :ref:`user_guide` the legacy and the new implementation are compared. """ fs, N = 200, 1001 # 200 Hz sampling rate for almost 5 s signal t_z = np.arange(N) / fs # time indexes for signal z = np.exp(2j*np.pi*70 * (t_z - 0.2*t_z**2)) # complex-valued sweep nperseg, noverlap = 50, 40 win = ('gaussian', 1e-2 * fs) # Gaussian with 0.01 s standard dev. # Legacy spectrogram: f2_u, t2, Sz2_u = spectrogram(z, fs, win, nperseg, noverlap, detrend=None, return_onesided=False, scaling='spectrum', mode='complex') f2, Sz2 = fftshift(f2_u), fftshift(Sz2_u, axes=0) # New STFT: SFT = ShortTimeFFT.from_window(win, fs, nperseg, noverlap, fft_mode='centered', scale_to='magnitude', phase_shift=None) Sz3 = SFT.stft(z, p0=0, p1=(N-noverlap) // SFT.hop, k_offset=nperseg // 2) t3 = SFT.t(N, p0=0, p1=(N-noverlap) // SFT.hop, k_offset=nperseg // 2) assert_allclose(t2, t3) assert_allclose(f2, SFT.f) assert_allclose(Sz2, Sz3) def test_permute_axes(): """Verify correctness of four-dimensional signal by permuting its shape. """ n = 25 SFT = ShortTimeFFT(np.ones(8)/8, hop=3, fs=n) x0 = np.arange(n) Sx0 = SFT.stft(x0) Sx0 = Sx0.reshape((Sx0.shape[0], 1, 1, 1, Sx0.shape[-1])) SxT = np.moveaxis(Sx0, (0, -1), (-1, 0)) atol = 2 * np.finfo(SFT.win.dtype).resolution for i in range(4): y = np.reshape(x0, np.roll((n, 1, 1, 1), i)) Sy = SFT.stft(y, axis=i) assert_allclose(Sy, np.moveaxis(Sx0, 0, i)) yb0 = SFT.istft(Sy, k1=n, f_axis=i) assert_allclose(yb0, y, atol=atol) # explicit t-axis parameter (for coverage): yb1 = SFT.istft(Sy, k1=n, f_axis=i, t_axis=Sy.ndim-1) assert_allclose(yb1, y, atol=atol) SyT = np.moveaxis(Sy, (i, -1), (-1, i)) assert_allclose(SyT, np.moveaxis(SxT, 0, i)) ybT = SFT.istft(SyT, k1=n, t_axis=i, f_axis=-1) assert_allclose(ybT, y, atol=atol) @pytest.mark.parametrize('window, n, nperseg, noverlap', [('boxcar', 100, 10, 0), # Test no overlap ('boxcar', 100, 10, 9), # Test high overlap ('bartlett', 101, 51, 26), # Test odd nperseg ('hann', 1024, 256, 128), # Test defaults (('tukey', 0.5), 1152, 256, 64), # Test Tukey ('hann', 1024, 256, 255), # Test overlapped hann ('boxcar', 100, 10, 3), # NOLA True, COLA False ('bartlett', 101, 51, 37), # NOLA True, COLA False ('hann', 1024, 256, 127), # NOLA True, COLA False # NOLA True, COLA False: (('tukey', 0.5), 1152, 256, 14), ('hann', 1024, 256, 5)]) # NOLA True, COLA False def test_roundtrip_windows(window, n: int, nperseg: int, noverlap: int): """Roundtrip test adapted from `test_spectral.TestSTFT`. The parameters are taken from the methods test_roundtrip_real(), test_roundtrip_nola_not_cola(), test_roundtrip_float32(), test_roundtrip_complex(). """ np.random.seed(2394655) w = get_window(window, nperseg) SFT = ShortTimeFFT(w, nperseg - noverlap, fs=1, fft_mode='twosided', phase_shift=None) z = 10 * np.random.randn(n) + 10j * np.random.randn(n) Sz = SFT.stft(z) z1 = SFT.istft(Sz, k1=len(z)) assert_allclose(z, z1, err_msg="Roundtrip for complex values failed") x = 10 * np.random.randn(n) Sx = SFT.stft(x) x1 = SFT.istft(Sx, k1=len(z)) assert_allclose(x, x1, err_msg="Roundtrip for float values failed") x32 = x.astype(np.float32) Sx32 = SFT.stft(x32) x32_1 = SFT.istft(Sx32, k1=len(x32)) assert_allclose(x32, x32_1, err_msg="Roundtrip for 32 Bit float values failed") @pytest.mark.parametrize('signal_type', ('real', 'complex')) def test_roundtrip_complex_window(signal_type): """Test roundtrip for complex-valued window function The purpose of this test is to check if the dual window is calculated correctly for complex-valued windows. """ np.random.seed(1354654) win = np.exp(2j*np.linspace(0, np.pi, 8)) SFT = ShortTimeFFT(win, 3, fs=1, fft_mode='twosided') z = 10 * np.random.randn(11) if signal_type == 'complex': z = z + 2j * z Sz = SFT.stft(z) z1 = SFT.istft(Sz, k1=len(z)) assert_allclose(z, z1, err_msg="Roundtrip for complex-valued window failed") def test_average_all_segments(): """Compare `welch` function with stft mean. Ported from `TestSpectrogram.test_average_all_segments` from file ``test__spectral.py``. """ x = np.random.randn(1024) fs = 1.0 window = ('tukey', 0.25) nperseg, noverlap = 16, 2 fw, Pw = welch(x, fs, window, nperseg, noverlap) SFT = ShortTimeFFT.from_window(window, fs, nperseg, noverlap, fft_mode='onesided2X', scale_to='psd', phase_shift=None) # `welch` positions the window differently than the STFT: P = SFT.spectrogram(x, detr='constant', p0=0, p1=(len(x)-noverlap)//SFT.hop, k_offset=nperseg//2) assert_allclose(SFT.f, fw) assert_allclose(np.mean(P, axis=-1), Pw) @pytest.mark.parametrize('window, N, nperseg, noverlap, mfft', # from test_roundtrip_padded_FFT: [('hann', 1024, 256, 128, 512), ('hann', 1024, 256, 128, 501), ('boxcar', 100, 10, 0, 33), (('tukey', 0.5), 1152, 256, 64, 1024), # from test_roundtrip_padded_signal: ('boxcar', 101, 10, 0, None), ('hann', 1000, 256, 128, None), # from test_roundtrip_boundary_extension: ('boxcar', 100, 10, 0, None), ('boxcar', 100, 10, 9, None)]) @pytest.mark.parametrize('padding', get_args(PAD_TYPE)) def test_stft_padding_roundtrip(window, N: int, nperseg: int, noverlap: int, mfft: int, padding): """Test the parameter 'padding' of `stft` with roundtrips. The STFT parametrizations were taken from the methods `test_roundtrip_padded_FFT`, `test_roundtrip_padded_signal` and `test_roundtrip_boundary_extension` from class `TestSTFT` in file ``test_spectral.py``. Note that the ShortTimeFFT does not need the concept of "boundary extension". """ x = normal_distribution.rvs(size=N, random_state=2909) # real signal z = x * np.exp(1j * np.pi / 4) # complex signal SFT = ShortTimeFFT.from_window(window, 1, nperseg, noverlap, fft_mode='twosided', mfft=mfft) Sx = SFT.stft(x, padding=padding) x1 = SFT.istft(Sx, k1=N) assert_allclose(x1, x, err_msg=f"Failed real roundtrip with '{padding}' padding") Sz = SFT.stft(z, padding=padding) z1 = SFT.istft(Sz, k1=N) assert_allclose(z1, z, err_msg="Failed complex roundtrip with " + f" '{padding}' padding") @pytest.mark.parametrize('N_x', (128, 129, 255, 256, 1337)) # signal length @pytest.mark.parametrize('w_size', (128, 256)) # window length @pytest.mark.parametrize('t_step', (4, 64)) # SFT time hop @pytest.mark.parametrize('f_c', (7., 23.)) # frequency of input sine def test_energy_conservation(N_x: int, w_size: int, t_step: int, f_c: float): """Test if a `psd`-scaled STFT conserves the L2 norm. This test is adapted from MNE-Python [1]_. Besides being battle-tested, this test has the benefit of using non-standard window including non-positive values and a 2d input signal. Since `ShortTimeFFT` requires the signal length `N_x` to be at least the window length `w_size`, the parameter `N_x` was changed from ``(127, 128, 255, 256, 1337)`` to ``(128, 129, 255, 256, 1337)`` to be more useful. .. [1] File ``test_stft.py`` of MNE-Python https://github.com/mne-tools/mne-python/blob/main/mne/time_frequency/tests/test_stft.py """ window = np.sin(np.arange(.5, w_size + .5) / w_size * np.pi) SFT = ShortTimeFFT(window, t_step, fs=1000, fft_mode='onesided2X', scale_to='psd') atol = 2*np.finfo(window.dtype).resolution N_x = max(N_x, w_size) # minimal sing # Test with low frequency signal t = np.arange(N_x).astype(np.float64) x = np.sin(2 * np.pi * f_c * t * SFT.T) x = np.array([x, x + 1.]) X = SFT.stft(x) xp = SFT.istft(X, k1=N_x) max_freq = SFT.f[np.argmax(np.sum(np.abs(X[0]) ** 2, axis=1))] assert X.shape[1] == SFT.f_pts assert np.all(SFT.f >= 0.) assert np.abs(max_freq - f_c) < 1. assert_allclose(x, xp, atol=atol) # check L2-norm squared (i.e., energy) conservation: E_x = np.sum(x**2, axis=-1) * SFT.T # numerical integration aX2 = X.real**2 + X.imag.real**2 E_X = np.sum(np.sum(aX2, axis=-1) * SFT.delta_t, axis=-1) * SFT.delta_f assert_allclose(E_X, E_x, atol=atol) # Test with random signal np.random.seed(2392795) x = np.random.randn(2, N_x) X = SFT.stft(x) xp = SFT.istft(X, k1=N_x) assert X.shape[1] == SFT.f_pts assert np.all(SFT.f >= 0.) assert np.abs(max_freq - f_c) < 1. assert_allclose(x, xp, atol=atol) # check L2-norm squared (i.e., energy) conservation: E_x = np.sum(x**2, axis=-1) * SFT.T # numeric integration aX2 = X.real ** 2 + X.imag.real ** 2 E_X = np.sum(np.sum(aX2, axis=-1) * SFT.delta_t, axis=-1) * SFT.delta_f assert_allclose(E_X, E_x, atol=atol) # Try with empty array x = np.zeros((0, N_x)) X = SFT.stft(x) xp = SFT.istft(X, k1=N_x) assert xp.shape == x.shape
33,174
39.805658
95
py
scipy
scipy-main/scipy/signal/tests/_scipy_spectral_test_shim.py
"""Helpers to utilize existing stft / istft tests for testing `ShortTimeFFT`. This module provides the functions stft_compare() and istft_compare(), which, compares the output between the existing (i)stft() and the shortTimeFFT based _(i)stft_wrapper() implementations in this module. For testing add the following imports to the file ``tests/test_spectral.py``:: from ._scipy_spectral_test_shim import stft_compare as stft from ._scipy_spectral_test_shim import istft_compare as istft and remove the existing imports of stft and istft. The idea of these wrappers is not to provide a backward-compatible interface but to demonstrate that the ShortTimeFFT implementation is at least as capable as the existing one and delivers comparable results. Furthermore, the wrappers highlight the different philosophies of the implementations, especially in the border handling. """ import platform from typing import cast, Literal import numpy as np from numpy.testing import assert_allclose from scipy.signal import ShortTimeFFT from scipy.signal import csd, get_window, stft, istft from scipy.signal._arraytools import const_ext, even_ext, odd_ext, zero_ext from scipy.signal._short_time_fft import FFT_MODE_TYPE from scipy.signal._spectral_py import _spectral_helper, _triage_segments, \ _median_bias def _stft_wrapper(x, fs=1.0, window='hann', nperseg=256, noverlap=None, nfft=None, detrend=False, return_onesided=True, boundary='zeros', padded=True, axis=-1, scaling='spectrum'): """Wrapper for the SciPy `stft()` function based on `ShortTimeFFT` for unit testing. Handling the boundary and padding is where `ShortTimeFFT` and `stft()` differ in behavior. Parts of `_spectral_helper()` were copied to mimic the` stft()` behavior. This function is meant to be solely used by `stft_compare()`. """ if scaling not in ('psd', 'spectrum'): # same errors as in original stft: raise ValueError(f"Parameter {scaling=} not in ['spectrum', 'psd']!") # The following lines are taken from the original _spectral_helper(): boundary_funcs = {'even': even_ext, 'odd': odd_ext, 'constant': const_ext, 'zeros': zero_ext, None: None} if boundary not in boundary_funcs: raise ValueError(f"Unknown boundary option '{boundary}', must be one" + f" of: {list(boundary_funcs.keys())}") if x.size == 0: return np.empty(x.shape), np.empty(x.shape), np.empty(x.shape) if nperseg is not None: # if specified by user nperseg = int(nperseg) if nperseg < 1: raise ValueError('nperseg must be a positive integer') # parse window; if array like, then set nperseg = win.shape win, nperseg = _triage_segments(window, nperseg, input_length=x.shape[axis]) if nfft is None: nfft = nperseg elif nfft < nperseg: raise ValueError('nfft must be greater than or equal to nperseg.') else: nfft = int(nfft) if noverlap is None: noverlap = nperseg//2 else: noverlap = int(noverlap) if noverlap >= nperseg: raise ValueError('noverlap must be less than nperseg.') nstep = nperseg - noverlap n = x.shape[axis] # Padding occurs after boundary extension, so that the extended signal ends # in zeros, instead of introducing an impulse at the end. # I.e. if x = [..., 3, 2] # extend then pad -> [..., 3, 2, 2, 3, 0, 0, 0] # pad then extend -> [..., 3, 2, 0, 0, 0, 2, 3] if boundary is not None: ext_func = boundary_funcs[boundary] # Extend by nperseg//2 in front and back: x = ext_func(x, nperseg//2, axis=axis) if padded: # Pad to integer number of windowed segments # I.e make x.shape[-1] = nperseg + (nseg-1)*nstep, with integer nseg x = np.moveaxis(x, axis, -1) # This is an edge case where shortTimeFFT returns one more time slice # than the Scipy stft() shorten to remove last time slice: if n % 2 == 1 and nperseg % 2 == 1 and noverlap % 2 == 1: x = x[..., :axis - 1] nadd = (-(x.shape[-1]-nperseg) % nstep) % nperseg zeros_shape = list(x.shape[:-1]) + [nadd] x = np.concatenate((x, np.zeros(zeros_shape)), axis=-1) x = np.moveaxis(x, -1, axis) # ... end original _spectral_helper() code. scale_to = {'spectrum': 'magnitude', 'psd': 'psd'}[scaling] if np.iscomplexobj(x) and return_onesided: return_onesided = False # using cast() to make mypy happy: fft_mode = cast(FFT_MODE_TYPE, 'onesided' if return_onesided else 'twosided') ST = ShortTimeFFT(win, nstep, fs, fft_mode=fft_mode, mfft=nfft, scale_to=scale_to, phase_shift=None) k_off = nperseg // 2 p0 = 0 # ST.lower_border_end[1] + 1 nn = x.shape[axis] if padded else n+k_off+1 p1 = ST.upper_border_begin(nn)[1] # ST.p_max(n) + 1 # This is bad hack to pass the test test_roundtrip_boundary_extension(): if padded is True and nperseg - noverlap == 1: p1 -= nperseg // 2 - 1 # the reasoning behind this is not clear to me detr = None if detrend is False else detrend Sxx = ST.stft_detrend(x, detr, p0, p1, k_offset=k_off, axis=axis) t = ST.t(nn, 0, p1 - p0, k_offset=0 if boundary is not None else k_off) if x.dtype in (np.float32, np.complex64): Sxx = Sxx.astype(np.complex64) # workaround for test_average_all_segments() - seems to be buggy behavior: if boundary is None and padded is False: t, Sxx = t[1:-1], Sxx[..., :-2] t -= k_off / fs return ST.f, t, Sxx def _istft_wrapper(Zxx, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None, input_onesided=True, boundary=True, time_axis=-1, freq_axis=-2, scaling='spectrum') -> \ tuple[np.ndarray, np.ndarray, tuple[int, int]]: """Wrapper for the SciPy `istft()` function based on `ShortTimeFFT` for unit testing. Note that only option handling is implemented as far as to handle the unit tests. E.g., the case ``nperseg=None`` is not handled. This function is meant to be solely used by `istft_compare()`. """ # *** Lines are taken from _spectral_py.istft() ***: if Zxx.ndim < 2: raise ValueError('Input stft must be at least 2d!') if freq_axis == time_axis: raise ValueError('Must specify differing time and frequency axes!') nseg = Zxx.shape[time_axis] if input_onesided: # Assume even segment length n_default = 2*(Zxx.shape[freq_axis] - 1) else: n_default = Zxx.shape[freq_axis] # Check windowing parameters if nperseg is None: nperseg = n_default else: nperseg = int(nperseg) if nperseg < 1: raise ValueError('nperseg must be a positive integer') if nfft is None: if input_onesided and (nperseg == n_default + 1): # Odd nperseg, no FFT padding nfft = nperseg else: nfft = n_default elif nfft < nperseg: raise ValueError('nfft must be greater than or equal to nperseg.') else: nfft = int(nfft) if noverlap is None: noverlap = nperseg//2 else: noverlap = int(noverlap) if noverlap >= nperseg: raise ValueError('noverlap must be less than nperseg.') nstep = nperseg - noverlap # Get window as array if isinstance(window, str) or type(window) is tuple: win = get_window(window, nperseg) else: win = np.asarray(window) if len(win.shape) != 1: raise ValueError('window must be 1-D') if win.shape[0] != nperseg: raise ValueError(f'window must have length of {nperseg}') outputlength = nperseg + (nseg-1)*nstep # *** End block of: Taken from _spectral_py.istft() *** # Using cast() to make mypy happy: fft_mode = cast(FFT_MODE_TYPE, 'onesided' if input_onesided else 'twosided') scale_to = cast(Literal['magnitude', 'psd'], {'spectrum': 'magnitude', 'psd': 'psd'}[scaling]) ST = ShortTimeFFT(win, nstep, fs, fft_mode=fft_mode, mfft=nfft, scale_to=scale_to, phase_shift=None) if boundary: j = nperseg if nperseg % 2 == 0 else nperseg - 1 k0 = ST.k_min + nperseg // 2 k1 = outputlength - j + k0 else: raise NotImplementedError("boundary=False does not make sense with" + "ShortTimeFFT.istft()!") x = ST.istft(Zxx, k0=k0, k1=k1, f_axis=freq_axis, t_axis=time_axis) t = np.arange(k1 - k0) * ST.T k_hi = ST.upper_border_begin(k1 - k0)[0] # using cast() to make mypy happy: return t, x, (ST.lower_border_end[0], k_hi) def _csd_wrapper(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None, detrend='constant', return_onesided=True, scaling='density', axis=-1, average='mean'): """Wrapper for the `csd()` function based on `ShortTimeFFT` for unit testing. """ freqs, _, Pxy = _csd_test_shim(x, y, fs, window, nperseg, noverlap, nfft, detrend, return_onesided, scaling, axis) # The following code is taken from csd(): if len(Pxy.shape) >= 2 and Pxy.size > 0: if Pxy.shape[-1] > 1: if average == 'median': # np.median must be passed real arrays for the desired result bias = _median_bias(Pxy.shape[-1]) if np.iscomplexobj(Pxy): Pxy = (np.median(np.real(Pxy), axis=-1) + 1j * np.median(np.imag(Pxy), axis=-1)) else: Pxy = np.median(Pxy, axis=-1) Pxy /= bias elif average == 'mean': Pxy = Pxy.mean(axis=-1) else: raise ValueError('average must be "median" or "mean", got %s' % (average,)) else: Pxy = np.reshape(Pxy, Pxy.shape[:-1]) return freqs, Pxy def _csd_test_shim(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None, detrend='constant', return_onesided=True, scaling='density', axis=-1): """Compare output of _spectral_helper() and ShortTimeFFT, more precisely _spect_helper_csd() for used in csd_wrapper(). The motivation of this function is to test if the ShortTimeFFT-based wrapper `_spect_helper_csd()` returns the same values as `_spectral_helper`. This function should only be usd by csd() in (unit) testing. """ freqs, t, Pxy = _spectral_helper(x, y, fs, window, nperseg, noverlap, nfft, detrend, return_onesided, scaling, axis, mode='psd') freqs1, Pxy1 = _spect_helper_csd(x, y, fs, window, nperseg, noverlap, nfft, detrend, return_onesided, scaling, axis) np.testing.assert_allclose(freqs1, freqs) amax_Pxy = max(np.abs(Pxy).max(), 1) if Pxy.size else 1 atol = np.finfo(Pxy.dtype).resolution * amax_Pxy # needed for large Pxy # for c_ in range(Pxy.shape[-1]): # np.testing.assert_allclose(Pxy1[:, c_], Pxy[:, c_], atol=atol) np.testing.assert_allclose(Pxy1, Pxy, atol=atol) return freqs, t, Pxy def _spect_helper_csd(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None, detrend='constant', return_onesided=True, scaling='density', axis=-1): """Wrapper for replacing _spectral_helper() by using the ShortTimeFFT for use by csd(). This function should be only used by _csd_test_shim() and is only useful for testing the ShortTimeFFT implementation. """ # The following lines are taken from the original _spectral_helper(): same_data = y is x axis = int(axis) # Ensure we have np.arrays, get outdtype x = np.asarray(x) if not same_data: y = np.asarray(y) # outdtype = np.result_type(x, y, np.complex64) # else: # outdtype = np.result_type(x, np.complex64) if not same_data: # Check if we can broadcast the outer axes together xouter = list(x.shape) youter = list(y.shape) xouter.pop(axis) youter.pop(axis) try: outershape = np.broadcast(np.empty(xouter), np.empty(youter)).shape except ValueError as e: raise ValueError('x and y cannot be broadcast together.') from e if same_data: if x.size == 0: return np.empty(x.shape), np.empty(x.shape) else: if x.size == 0 or y.size == 0: outshape = outershape + (min([x.shape[axis], y.shape[axis]]),) emptyout = np.moveaxis(np.empty(outshape), -1, axis) return emptyout, emptyout if nperseg is not None: # if specified by user nperseg = int(nperseg) if nperseg < 1: raise ValueError('nperseg must be a positive integer') # parse window; if array like, then set nperseg = win.shape n = x.shape[axis] if same_data else max(x.shape[axis], y.shape[axis]) win, nperseg = _triage_segments(window, nperseg, input_length=n) if nfft is None: nfft = nperseg elif nfft < nperseg: raise ValueError('nfft must be greater than or equal to nperseg.') else: nfft = int(nfft) if noverlap is None: noverlap = nperseg // 2 else: noverlap = int(noverlap) if noverlap >= nperseg: raise ValueError('noverlap must be less than nperseg.') nstep = nperseg - noverlap if np.iscomplexobj(x) and return_onesided: return_onesided = False # using cast() to make mypy happy: fft_mode = cast(FFT_MODE_TYPE, 'onesided' if return_onesided else 'twosided') scale = {'spectrum': 'magnitude', 'density': 'psd'}[scaling] SFT = ShortTimeFFT(win, nstep, fs, fft_mode=fft_mode, mfft=nfft, scale_to=scale, phase_shift=None) # _spectral_helper() calculates X.conj()*Y instead of X*Y.conj(): Pxy = SFT.spectrogram(y, x, detr=None if detrend is False else detrend, p0=0, p1=(n-noverlap)//SFT.hop, k_offset=nperseg//2, axis=axis).conj() # Note: # 'onesided2X' scaling of ShortTimeFFT conflicts with the # scaling='spectrum' parameter, since it doubles the squared magnitude, # which in the view of the ShortTimeFFT implementation does not make sense. # Hence, the doubling of the square is implemented here: if return_onesided: f_axis = Pxy.ndim - 1 + axis if axis < 0 else axis Pxy = np.moveaxis(Pxy, f_axis, -1) Pxy[..., 1:-1 if SFT.mfft % 2 == 0 else None] *= 2 Pxy = np.moveaxis(Pxy, -1, f_axis) return SFT.f, Pxy def stft_compare(x, fs=1.0, window='hann', nperseg=256, noverlap=None, nfft=None, detrend=False, return_onesided=True, boundary='zeros', padded=True, axis=-1, scaling='spectrum'): """Assert that the results from the existing `stft()` and `_stft_wrapper()` are close to each other. For comparing the STFT values an absolute tolerance of the floating point resolution was added to circumvent problems with the following tests: * For float32 the tolerances are much higher in TestSTFT.test_roundtrip_float32()). * The TestSTFT.test_roundtrip_scaling() has a high relative deviation. Interestingly this did not appear in Scipy 1.9.1 but only in the current development version. """ kw = dict(x=x, fs=fs, window=window, nperseg=nperseg, noverlap=noverlap, nfft=nfft, detrend=detrend, return_onesided=return_onesided, boundary=boundary, padded=padded, axis=axis, scaling=scaling) f, t, Zxx = stft(**kw) f_wrapper, t_wrapper, Zxx_wrapper = _stft_wrapper(**kw) e_msg_part = " of `stft_wrapper()` differ from `stft()`." assert_allclose(f_wrapper, f, err_msg=f"Frequencies {e_msg_part}") assert_allclose(t_wrapper, t, err_msg=f"Time slices {e_msg_part}") # Adapted tolerances to account for: atol = np.finfo(Zxx.dtype).resolution * 2 assert_allclose(Zxx_wrapper, Zxx, atol=atol, err_msg=f"STFT values {e_msg_part}") return f, t, Zxx def istft_compare(Zxx, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None, input_onesided=True, boundary=True, time_axis=-1, freq_axis=-2, scaling='spectrum'): """Assert that the results from the existing `istft()` and `_istft_wrapper()` are close to each other. Quirks: * If ``boundary=False`` the comparison is skipped, since it does not make sense with ShortTimeFFT.istft(). Only used in test TestSTFT.test_roundtrip_boundary_extension(). * If ShortTimeFFT.istft() decides the STFT is not invertible, the comparison is skipped, since istft() only emits a warning and does not return a correct result. Only used in ShortTimeFFT.test_roundtrip_not_nola(). * For comparing the signals an absolute tolerance of the floating point resolution was added to account for the low accuracy of float32 (Occurs only in TestSTFT.test_roundtrip_float32()). """ kw = dict(Zxx=Zxx, fs=fs, window=window, nperseg=nperseg, noverlap=noverlap, nfft=nfft, input_onesided=input_onesided, boundary=boundary, time_axis=time_axis, freq_axis=freq_axis, scaling=scaling) t, x = istft(**kw) if not boundary: # skip test_roundtrip_boundary_extension(): return t, x # _istft_wrapper does() not implement this case try: # if inversion fails, istft() only emits a warning: t_wrapper, x_wrapper, (k_lo, k_hi) = _istft_wrapper(**kw) except ValueError as v: # Do nothing if inversion fails: if v.args[0] == "Short-time Fourier Transform not invertible!": return t, x raise v e_msg_part = " of `istft_wrapper()` differ from `istft()`" assert_allclose(t, t_wrapper, err_msg=f"Sample times {e_msg_part}") # Adapted tolerances to account for resolution loss: atol = np.finfo(x.dtype).resolution*2 # instead of default atol = 0 rtol = 1e-7 # default for np.allclose() # Relax atol on 32-Bit platforms a bit to pass CI tests. # - Not clear why there are discrepancies (in the FFT maybe?) # - Not sure what changed on 'i686' since earlier on those test passed if x.dtype == np.float32 and platform.machine() == 'i686': # float32 gets only used by TestSTFT.test_roundtrip_float32() so # we are using the tolerances from there to circumvent CI problems atol, rtol = 1e-4, 1e-5 elif platform.machine() in ('aarch64', 'i386', 'i686'): atol = max(atol, 1e-12) # 2e-15 seems too tight for 32-Bit platforms assert_allclose(x_wrapper[k_lo:k_hi], x[k_lo:k_hi], atol=atol, rtol=rtol, err_msg=f"Signal values {e_msg_part}") return t, x def csd_compare(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None, detrend='constant', return_onesided=True, scaling='density', axis=-1, average='mean'): """Assert that the results from the existing `csd()` and `_csd_wrapper()` are close to each other. """ kw = dict(x=x, y=y, fs=fs, window=window, nperseg=nperseg, noverlap=noverlap, nfft=nfft, detrend=detrend, return_onesided=return_onesided, scaling=scaling, axis=axis, average=average) freqs0, Pxy0 = csd(**kw) freqs1, Pxy1 = _csd_wrapper(**kw) assert_allclose(freqs1, freqs0) assert_allclose(Pxy1, Pxy0) assert_allclose(freqs1, freqs0) return freqs0, Pxy0
20,033
39.885714
81
py
scipy
scipy-main/scipy/signal/tests/test_peak_finding.py
import copy import numpy as np from numpy.testing import ( assert_, assert_equal, assert_allclose, assert_array_equal ) import pytest from pytest import raises, warns from scipy.signal._peak_finding import ( argrelmax, argrelmin, peak_prominences, peak_widths, _unpack_condition_args, find_peaks, find_peaks_cwt, _identify_ridge_lines ) from scipy.signal.windows import gaussian from scipy.signal._peak_finding_utils import _local_maxima_1d, PeakPropertyWarning def _gen_gaussians(center_locs, sigmas, total_length): xdata = np.arange(0, total_length).astype(float) out_data = np.zeros(total_length, dtype=float) for ind, sigma in enumerate(sigmas): tmp = (xdata - center_locs[ind]) / sigma out_data += np.exp(-(tmp**2)) return out_data def _gen_gaussians_even(sigmas, total_length): num_peaks = len(sigmas) delta = total_length / (num_peaks + 1) center_locs = np.linspace(delta, total_length - delta, num=num_peaks).astype(int) out_data = _gen_gaussians(center_locs, sigmas, total_length) return out_data, center_locs def _gen_ridge_line(start_locs, max_locs, length, distances, gaps): """ Generate coordinates for a ridge line. Will be a series of coordinates, starting a start_loc (length 2). The maximum distance between any adjacent columns will be `max_distance`, the max distance between adjacent rows will be `map_gap'. `max_locs` should be the size of the intended matrix. The ending coordinates are guaranteed to be less than `max_locs`, although they may not approach `max_locs` at all. """ def keep_bounds(num, max_val): out = max(num, 0) out = min(out, max_val) return out gaps = copy.deepcopy(gaps) distances = copy.deepcopy(distances) locs = np.zeros([length, 2], dtype=int) locs[0, :] = start_locs total_length = max_locs[0] - start_locs[0] - sum(gaps) if total_length < length: raise ValueError('Cannot generate ridge line according to constraints') dist_int = length / len(distances) - 1 gap_int = length / len(gaps) - 1 for ind in range(1, length): nextcol = locs[ind - 1, 1] nextrow = locs[ind - 1, 0] + 1 if (ind % dist_int == 0) and (len(distances) > 0): nextcol += ((-1)**ind)*distances.pop() if (ind % gap_int == 0) and (len(gaps) > 0): nextrow += gaps.pop() nextrow = keep_bounds(nextrow, max_locs[0]) nextcol = keep_bounds(nextcol, max_locs[1]) locs[ind, :] = [nextrow, nextcol] return [locs[:, 0], locs[:, 1]] class TestLocalMaxima1d: def test_empty(self): """Test with empty signal.""" x = np.array([], dtype=np.float64) for array in _local_maxima_1d(x): assert_equal(array, np.array([])) assert_(array.base is None) def test_linear(self): """Test with linear signal.""" x = np.linspace(0, 100) for array in _local_maxima_1d(x): assert_equal(array, np.array([])) assert_(array.base is None) def test_simple(self): """Test with simple signal.""" x = np.linspace(-10, 10, 50) x[2::3] += 1 expected = np.arange(2, 50, 3) for array in _local_maxima_1d(x): # For plateaus of size 1, the edges are identical with the # midpoints assert_equal(array, expected) assert_(array.base is None) def test_flat_maxima(self): """Test if flat maxima are detected correctly.""" x = np.array([-1.3, 0, 1, 0, 2, 2, 0, 3, 3, 3, 2.99, 4, 4, 4, 4, -10, -5, -5, -5, -5, -5, -10]) midpoints, left_edges, right_edges = _local_maxima_1d(x) assert_equal(midpoints, np.array([2, 4, 8, 12, 18])) assert_equal(left_edges, np.array([2, 4, 7, 11, 16])) assert_equal(right_edges, np.array([2, 5, 9, 14, 20])) @pytest.mark.parametrize('x', [ np.array([1., 0, 2]), np.array([3., 3, 0, 4, 4]), np.array([5., 5, 5, 0, 6, 6, 6]), ]) def test_signal_edges(self, x): """Test if behavior on signal edges is correct.""" for array in _local_maxima_1d(x): assert_equal(array, np.array([])) assert_(array.base is None) def test_exceptions(self): """Test input validation and raised exceptions.""" with raises(ValueError, match="wrong number of dimensions"): _local_maxima_1d(np.ones((1, 1))) with raises(ValueError, match="expected 'const float64_t'"): _local_maxima_1d(np.ones(1, dtype=int)) with raises(TypeError, match="list"): _local_maxima_1d([1., 2.]) with raises(TypeError, match="'x' must not be None"): _local_maxima_1d(None) class TestRidgeLines: def test_empty(self): test_matr = np.zeros([20, 100]) lines = _identify_ridge_lines(test_matr, np.full(20, 2), 1) assert_(len(lines) == 0) def test_minimal(self): test_matr = np.zeros([20, 100]) test_matr[0, 10] = 1 lines = _identify_ridge_lines(test_matr, np.full(20, 2), 1) assert_(len(lines) == 1) test_matr = np.zeros([20, 100]) test_matr[0:2, 10] = 1 lines = _identify_ridge_lines(test_matr, np.full(20, 2), 1) assert_(len(lines) == 1) def test_single_pass(self): distances = [0, 1, 2, 5] gaps = [0, 1, 2, 0, 1] test_matr = np.zeros([20, 50]) + 1e-12 length = 12 line = _gen_ridge_line([0, 25], test_matr.shape, length, distances, gaps) test_matr[line[0], line[1]] = 1 max_distances = np.full(20, max(distances)) identified_lines = _identify_ridge_lines(test_matr, max_distances, max(gaps) + 1) assert_array_equal(identified_lines, [line]) def test_single_bigdist(self): distances = [0, 1, 2, 5] gaps = [0, 1, 2, 4] test_matr = np.zeros([20, 50]) length = 12 line = _gen_ridge_line([0, 25], test_matr.shape, length, distances, gaps) test_matr[line[0], line[1]] = 1 max_dist = 3 max_distances = np.full(20, max_dist) #This should get 2 lines, since the distance is too large identified_lines = _identify_ridge_lines(test_matr, max_distances, max(gaps) + 1) assert_(len(identified_lines) == 2) for iline in identified_lines: adists = np.diff(iline[1]) np.testing.assert_array_less(np.abs(adists), max_dist) agaps = np.diff(iline[0]) np.testing.assert_array_less(np.abs(agaps), max(gaps) + 0.1) def test_single_biggap(self): distances = [0, 1, 2, 5] max_gap = 3 gaps = [0, 4, 2, 1] test_matr = np.zeros([20, 50]) length = 12 line = _gen_ridge_line([0, 25], test_matr.shape, length, distances, gaps) test_matr[line[0], line[1]] = 1 max_dist = 6 max_distances = np.full(20, max_dist) #This should get 2 lines, since the gap is too large identified_lines = _identify_ridge_lines(test_matr, max_distances, max_gap) assert_(len(identified_lines) == 2) for iline in identified_lines: adists = np.diff(iline[1]) np.testing.assert_array_less(np.abs(adists), max_dist) agaps = np.diff(iline[0]) np.testing.assert_array_less(np.abs(agaps), max(gaps) + 0.1) def test_single_biggaps(self): distances = [0] max_gap = 1 gaps = [3, 6] test_matr = np.zeros([50, 50]) length = 30 line = _gen_ridge_line([0, 25], test_matr.shape, length, distances, gaps) test_matr[line[0], line[1]] = 1 max_dist = 1 max_distances = np.full(50, max_dist) #This should get 3 lines, since the gaps are too large identified_lines = _identify_ridge_lines(test_matr, max_distances, max_gap) assert_(len(identified_lines) == 3) for iline in identified_lines: adists = np.diff(iline[1]) np.testing.assert_array_less(np.abs(adists), max_dist) agaps = np.diff(iline[0]) np.testing.assert_array_less(np.abs(agaps), max(gaps) + 0.1) class TestArgrel: def test_empty(self): # Regression test for gh-2832. # When there are no relative extrema, make sure that # the number of empty arrays returned matches the # dimension of the input. empty_array = np.array([], dtype=int) z1 = np.zeros(5) i = argrelmin(z1) assert_equal(len(i), 1) assert_array_equal(i[0], empty_array) z2 = np.zeros((3,5)) row, col = argrelmin(z2, axis=0) assert_array_equal(row, empty_array) assert_array_equal(col, empty_array) row, col = argrelmin(z2, axis=1) assert_array_equal(row, empty_array) assert_array_equal(col, empty_array) def test_basic(self): # Note: the docstrings for the argrel{min,max,extrema} functions # do not give a guarantee of the order of the indices, so we'll # sort them before testing. x = np.array([[1, 2, 2, 3, 2], [2, 1, 2, 2, 3], [3, 2, 1, 2, 2], [2, 3, 2, 1, 2], [1, 2, 3, 2, 1]]) row, col = argrelmax(x, axis=0) order = np.argsort(row) assert_equal(row[order], [1, 2, 3]) assert_equal(col[order], [4, 0, 1]) row, col = argrelmax(x, axis=1) order = np.argsort(row) assert_equal(row[order], [0, 3, 4]) assert_equal(col[order], [3, 1, 2]) row, col = argrelmin(x, axis=0) order = np.argsort(row) assert_equal(row[order], [1, 2, 3]) assert_equal(col[order], [1, 2, 3]) row, col = argrelmin(x, axis=1) order = np.argsort(row) assert_equal(row[order], [1, 2, 3]) assert_equal(col[order], [1, 2, 3]) def test_highorder(self): order = 2 sigmas = [1.0, 2.0, 10.0, 5.0, 15.0] test_data, act_locs = _gen_gaussians_even(sigmas, 500) test_data[act_locs + order] = test_data[act_locs]*0.99999 test_data[act_locs - order] = test_data[act_locs]*0.99999 rel_max_locs = argrelmax(test_data, order=order, mode='clip')[0] assert_(len(rel_max_locs) == len(act_locs)) assert_((rel_max_locs == act_locs).all()) def test_2d_gaussians(self): sigmas = [1.0, 2.0, 10.0] test_data, act_locs = _gen_gaussians_even(sigmas, 100) rot_factor = 20 rot_range = np.arange(0, len(test_data)) - rot_factor test_data_2 = np.vstack([test_data, test_data[rot_range]]) rel_max_rows, rel_max_cols = argrelmax(test_data_2, axis=1, order=1) for rw in range(0, test_data_2.shape[0]): inds = (rel_max_rows == rw) assert_(len(rel_max_cols[inds]) == len(act_locs)) assert_((act_locs == (rel_max_cols[inds] - rot_factor*rw)).all()) class TestPeakProminences: def test_empty(self): """ Test if an empty array is returned if no peaks are provided. """ out = peak_prominences([1, 2, 3], []) for arr, dtype in zip(out, [np.float64, np.intp, np.intp]): assert_(arr.size == 0) assert_(arr.dtype == dtype) out = peak_prominences([], []) for arr, dtype in zip(out, [np.float64, np.intp, np.intp]): assert_(arr.size == 0) assert_(arr.dtype == dtype) def test_basic(self): """ Test if height of prominences is correctly calculated in signal with rising baseline (peak widths are 1 sample). """ # Prepare basic signal x = np.array([-1, 1.2, 1.2, 1, 3.2, 1.3, 2.88, 2.1]) peaks = np.array([1, 2, 4, 6]) lbases = np.array([0, 0, 0, 5]) rbases = np.array([3, 3, 5, 7]) proms = x[peaks] - np.max([x[lbases], x[rbases]], axis=0) # Test if calculation matches handcrafted result out = peak_prominences(x, peaks) assert_equal(out[0], proms) assert_equal(out[1], lbases) assert_equal(out[2], rbases) def test_edge_cases(self): """ Test edge cases. """ # Peaks have same height, prominence and bases x = [0, 2, 1, 2, 1, 2, 0] peaks = [1, 3, 5] proms, lbases, rbases = peak_prominences(x, peaks) assert_equal(proms, [2, 2, 2]) assert_equal(lbases, [0, 0, 0]) assert_equal(rbases, [6, 6, 6]) # Peaks have same height & prominence but different bases x = [0, 1, 0, 1, 0, 1, 0] peaks = np.array([1, 3, 5]) proms, lbases, rbases = peak_prominences(x, peaks) assert_equal(proms, [1, 1, 1]) assert_equal(lbases, peaks - 1) assert_equal(rbases, peaks + 1) def test_non_contiguous(self): """ Test with non-C-contiguous input arrays. """ x = np.repeat([-9, 9, 9, 0, 3, 1], 2) peaks = np.repeat([1, 2, 4], 2) proms, lbases, rbases = peak_prominences(x[::2], peaks[::2]) assert_equal(proms, [9, 9, 2]) assert_equal(lbases, [0, 0, 3]) assert_equal(rbases, [3, 3, 5]) def test_wlen(self): """ Test if wlen actually shrinks the evaluation range correctly. """ x = [0, 1, 2, 3, 1, 0, -1] peak = [3] # Test rounding behavior of wlen assert_equal(peak_prominences(x, peak), [3., 0, 6]) for wlen, i in [(8, 0), (7, 0), (6, 0), (5, 1), (3.2, 1), (3, 2), (1.1, 2)]: assert_equal(peak_prominences(x, peak, wlen), [3. - i, 0 + i, 6 - i]) def test_exceptions(self): """ Verify that exceptions and warnings are raised. """ # x with dimension > 1 with raises(ValueError, match='1-D array'): peak_prominences([[0, 1, 1, 0]], [1, 2]) # peaks with dimension > 1 with raises(ValueError, match='1-D array'): peak_prominences([0, 1, 1, 0], [[1, 2]]) # x with dimension < 1 with raises(ValueError, match='1-D array'): peak_prominences(3, [0,]) # empty x with supplied with raises(ValueError, match='not a valid index'): peak_prominences([], [0]) # invalid indices with non-empty x for p in [-100, -1, 3, 1000]: with raises(ValueError, match='not a valid index'): peak_prominences([1, 0, 2], [p]) # peaks is not cast-able to np.intp with raises(TypeError, match='cannot safely cast'): peak_prominences([0, 1, 1, 0], [1.1, 2.3]) # wlen < 3 with raises(ValueError, match='wlen'): peak_prominences(np.arange(10), [3, 5], wlen=1) def test_warnings(self): """ Verify that appropriate warnings are raised. """ msg = "some peaks have a prominence of 0" for p in [0, 1, 2]: with warns(PeakPropertyWarning, match=msg): peak_prominences([1, 0, 2], [p,]) with warns(PeakPropertyWarning, match=msg): peak_prominences([0, 1, 1, 1, 0], [2], wlen=2) class TestPeakWidths: def test_empty(self): """ Test if an empty array is returned if no peaks are provided. """ widths = peak_widths([], [])[0] assert_(isinstance(widths, np.ndarray)) assert_equal(widths.size, 0) widths = peak_widths([1, 2, 3], [])[0] assert_(isinstance(widths, np.ndarray)) assert_equal(widths.size, 0) out = peak_widths([], []) for arr in out: assert_(isinstance(arr, np.ndarray)) assert_equal(arr.size, 0) @pytest.mark.filterwarnings("ignore:some peaks have a width of 0") def test_basic(self): """ Test a simple use case with easy to verify results at different relative heights. """ x = np.array([1, 0, 1, 2, 1, 0, -1]) prominence = 2 for rel_height, width_true, lip_true, rip_true in [ (0., 0., 3., 3.), # raises warning (0.25, 1., 2.5, 3.5), (0.5, 2., 2., 4.), (0.75, 3., 1.5, 4.5), (1., 4., 1., 5.), (2., 5., 1., 6.), (3., 5., 1., 6.) ]: width_calc, height, lip_calc, rip_calc = peak_widths( x, [3], rel_height) assert_allclose(width_calc, width_true) assert_allclose(height, 2 - rel_height * prominence) assert_allclose(lip_calc, lip_true) assert_allclose(rip_calc, rip_true) def test_non_contiguous(self): """ Test with non-C-contiguous input arrays. """ x = np.repeat([0, 100, 50], 4) peaks = np.repeat([1], 3) result = peak_widths(x[::4], peaks[::3]) assert_equal(result, [0.75, 75, 0.75, 1.5]) def test_exceptions(self): """ Verify that argument validation works as intended. """ with raises(ValueError, match='1-D array'): # x with dimension > 1 peak_widths(np.zeros((3, 4)), np.ones(3)) with raises(ValueError, match='1-D array'): # x with dimension < 1 peak_widths(3, [0]) with raises(ValueError, match='1-D array'): # peaks with dimension > 1 peak_widths(np.arange(10), np.ones((3, 2), dtype=np.intp)) with raises(ValueError, match='1-D array'): # peaks with dimension < 1 peak_widths(np.arange(10), 3) with raises(ValueError, match='not a valid index'): # peak pos exceeds x.size peak_widths(np.arange(10), [8, 11]) with raises(ValueError, match='not a valid index'): # empty x with peaks supplied peak_widths([], [1, 2]) with raises(TypeError, match='cannot safely cast'): # peak cannot be safely casted to intp peak_widths(np.arange(10), [1.1, 2.3]) with raises(ValueError, match='rel_height'): # rel_height is < 0 peak_widths([0, 1, 0, 1, 0], [1, 3], rel_height=-1) with raises(TypeError, match='None'): # prominence data contains None peak_widths([1, 2, 1], [1], prominence_data=(None, None, None)) def test_warnings(self): """ Verify that appropriate warnings are raised. """ msg = "some peaks have a width of 0" with warns(PeakPropertyWarning, match=msg): # Case: rel_height is 0 peak_widths([0, 1, 0], [1], rel_height=0) with warns(PeakPropertyWarning, match=msg): # Case: prominence is 0 and bases are identical peak_widths( [0, 1, 1, 1, 0], [2], prominence_data=(np.array([0.], np.float64), np.array([2], np.intp), np.array([2], np.intp)) ) def test_mismatching_prominence_data(self): """Test with mismatching peak and / or prominence data.""" x = [0, 1, 0] peak = [1] for i, (prominences, left_bases, right_bases) in enumerate([ ((1.,), (-1,), (2,)), # left base not in x ((1.,), (0,), (3,)), # right base not in x ((1.,), (2,), (0,)), # swapped bases same as peak ((1., 1.), (0, 0), (2, 2)), # array shapes don't match peaks ((1., 1.), (0,), (2,)), # arrays with different shapes ((1.,), (0, 0), (2,)), # arrays with different shapes ((1.,), (0,), (2, 2)) # arrays with different shapes ]): # Make sure input is matches output of signal.peak_prominences prominence_data = (np.array(prominences, dtype=np.float64), np.array(left_bases, dtype=np.intp), np.array(right_bases, dtype=np.intp)) # Test for correct exception if i < 3: match = "prominence data is invalid for peak" else: match = "arrays in `prominence_data` must have the same shape" with raises(ValueError, match=match): peak_widths(x, peak, prominence_data=prominence_data) @pytest.mark.filterwarnings("ignore:some peaks have a width of 0") def test_intersection_rules(self): """Test if x == eval_height counts as an intersection.""" # Flatt peak with two possible intersection points if evaluated at 1 x = [0, 1, 2, 1, 3, 3, 3, 1, 2, 1, 0] # relative height is 0 -> width is 0 as well, raises warning assert_allclose(peak_widths(x, peaks=[5], rel_height=0), [(0.,), (3.,), (5.,), (5.,)]) # width_height == x counts as intersection -> nearest 1 is chosen assert_allclose(peak_widths(x, peaks=[5], rel_height=2/3), [(4.,), (1.,), (3.,), (7.,)]) def test_unpack_condition_args(): """ Verify parsing of condition arguments for `scipy.signal.find_peaks` function. """ x = np.arange(10) amin_true = x amax_true = amin_true + 10 peaks = amin_true[1::2] # Test unpacking with None or interval assert_((None, None) == _unpack_condition_args((None, None), x, peaks)) assert_((1, None) == _unpack_condition_args(1, x, peaks)) assert_((1, None) == _unpack_condition_args((1, None), x, peaks)) assert_((None, 2) == _unpack_condition_args((None, 2), x, peaks)) assert_((3., 4.5) == _unpack_condition_args((3., 4.5), x, peaks)) # Test if borders are correctly reduced with `peaks` amin_calc, amax_calc = _unpack_condition_args((amin_true, amax_true), x, peaks) assert_equal(amin_calc, amin_true[peaks]) assert_equal(amax_calc, amax_true[peaks]) # Test raises if array borders don't match x with raises(ValueError, match="array size of lower"): _unpack_condition_args(amin_true, np.arange(11), peaks) with raises(ValueError, match="array size of upper"): _unpack_condition_args((None, amin_true), np.arange(11), peaks) class TestFindPeaks: # Keys of optionally returned properties property_keys = {'peak_heights', 'left_thresholds', 'right_thresholds', 'prominences', 'left_bases', 'right_bases', 'widths', 'width_heights', 'left_ips', 'right_ips'} def test_constant(self): """ Test behavior for signal without local maxima. """ open_interval = (None, None) peaks, props = find_peaks(np.ones(10), height=open_interval, threshold=open_interval, prominence=open_interval, width=open_interval) assert_(peaks.size == 0) for key in self.property_keys: assert_(props[key].size == 0) def test_plateau_size(self): """ Test plateau size condition for peaks. """ # Prepare signal with peaks with peak_height == plateau_size plateau_sizes = np.array([1, 2, 3, 4, 8, 20, 111]) x = np.zeros(plateau_sizes.size * 2 + 1) x[1::2] = plateau_sizes repeats = np.ones(x.size, dtype=int) repeats[1::2] = x[1::2] x = np.repeat(x, repeats) # Test full output peaks, props = find_peaks(x, plateau_size=(None, None)) assert_equal(peaks, [1, 3, 7, 11, 18, 33, 100]) assert_equal(props["plateau_sizes"], plateau_sizes) assert_equal(props["left_edges"], peaks - (plateau_sizes - 1) // 2) assert_equal(props["right_edges"], peaks + plateau_sizes // 2) # Test conditions assert_equal(find_peaks(x, plateau_size=4)[0], [11, 18, 33, 100]) assert_equal(find_peaks(x, plateau_size=(None, 3.5))[0], [1, 3, 7]) assert_equal(find_peaks(x, plateau_size=(5, 50))[0], [18, 33]) def test_height_condition(self): """ Test height condition for peaks. """ x = (0., 1/3, 0., 2.5, 0, 4., 0) peaks, props = find_peaks(x, height=(None, None)) assert_equal(peaks, np.array([1, 3, 5])) assert_equal(props['peak_heights'], np.array([1/3, 2.5, 4.])) assert_equal(find_peaks(x, height=0.5)[0], np.array([3, 5])) assert_equal(find_peaks(x, height=(None, 3))[0], np.array([1, 3])) assert_equal(find_peaks(x, height=(2, 3))[0], np.array([3])) def test_threshold_condition(self): """ Test threshold condition for peaks. """ x = (0, 2, 1, 4, -1) peaks, props = find_peaks(x, threshold=(None, None)) assert_equal(peaks, np.array([1, 3])) assert_equal(props['left_thresholds'], np.array([2, 3])) assert_equal(props['right_thresholds'], np.array([1, 5])) assert_equal(find_peaks(x, threshold=2)[0], np.array([3])) assert_equal(find_peaks(x, threshold=3.5)[0], np.array([])) assert_equal(find_peaks(x, threshold=(None, 5))[0], np.array([1, 3])) assert_equal(find_peaks(x, threshold=(None, 4))[0], np.array([1])) assert_equal(find_peaks(x, threshold=(2, 4))[0], np.array([])) def test_distance_condition(self): """ Test distance condition for peaks. """ # Peaks of different height with constant distance 3 peaks_all = np.arange(1, 21, 3) x = np.zeros(21) x[peaks_all] += np.linspace(1, 2, peaks_all.size) # Test if peaks with "minimal" distance are still selected (distance = 3) assert_equal(find_peaks(x, distance=3)[0], peaks_all) # Select every second peak (distance > 3) peaks_subset = find_peaks(x, distance=3.0001)[0] # Test if peaks_subset is subset of peaks_all assert_( np.setdiff1d(peaks_subset, peaks_all, assume_unique=True).size == 0 ) # Test if every second peak was removed assert_equal(np.diff(peaks_subset), 6) # Test priority of peak removal x = [-2, 1, -1, 0, -3] peaks_subset = find_peaks(x, distance=10)[0] # use distance > x size assert_(peaks_subset.size == 1 and peaks_subset[0] == 1) def test_prominence_condition(self): """ Test prominence condition for peaks. """ x = np.linspace(0, 10, 100) peaks_true = np.arange(1, 99, 2) offset = np.linspace(1, 10, peaks_true.size) x[peaks_true] += offset prominences = x[peaks_true] - x[peaks_true + 1] interval = (3, 9) keep = np.nonzero( (interval[0] <= prominences) & (prominences <= interval[1])) peaks_calc, properties = find_peaks(x, prominence=interval) assert_equal(peaks_calc, peaks_true[keep]) assert_equal(properties['prominences'], prominences[keep]) assert_equal(properties['left_bases'], 0) assert_equal(properties['right_bases'], peaks_true[keep] + 1) def test_width_condition(self): """ Test width condition for peaks. """ x = np.array([1, 0, 1, 2, 1, 0, -1, 4, 0]) peaks, props = find_peaks(x, width=(None, 2), rel_height=0.75) assert_equal(peaks.size, 1) assert_equal(peaks, 7) assert_allclose(props['widths'], 1.35) assert_allclose(props['width_heights'], 1.) assert_allclose(props['left_ips'], 6.4) assert_allclose(props['right_ips'], 7.75) def test_properties(self): """ Test returned properties. """ open_interval = (None, None) x = [0, 1, 0, 2, 1.5, 0, 3, 0, 5, 9] peaks, props = find_peaks(x, height=open_interval, threshold=open_interval, prominence=open_interval, width=open_interval) assert_(len(props) == len(self.property_keys)) for key in self.property_keys: assert_(peaks.size == props[key].size) def test_raises(self): """ Test exceptions raised by function. """ with raises(ValueError, match="1-D array"): find_peaks(np.array(1)) with raises(ValueError, match="1-D array"): find_peaks(np.ones((2, 2))) with raises(ValueError, match="distance"): find_peaks(np.arange(10), distance=-1) @pytest.mark.filterwarnings("ignore:some peaks have a prominence of 0", "ignore:some peaks have a width of 0") def test_wlen_smaller_plateau(self): """ Test behavior of prominence and width calculation if the given window length is smaller than a peak's plateau size. Regression test for gh-9110. """ peaks, props = find_peaks([0, 1, 1, 1, 0], prominence=(None, None), width=(None, None), wlen=2) assert_equal(peaks, 2) assert_equal(props["prominences"], 0) assert_equal(props["widths"], 0) assert_equal(props["width_heights"], 1) for key in ("left_bases", "right_bases", "left_ips", "right_ips"): assert_equal(props[key], peaks) @pytest.mark.parametrize("kwargs", [ {}, {"distance": 3.0}, {"prominence": (None, None)}, {"width": (None, 2)}, ]) def test_readonly_array(self, kwargs): """ Test readonly arrays are accepted. """ x = np.linspace(0, 10, 15) x_readonly = x.copy() x_readonly.flags.writeable = False peaks, _ = find_peaks(x) peaks_readonly, _ = find_peaks(x_readonly, **kwargs) assert_allclose(peaks, peaks_readonly) class TestFindPeaksCwt: def test_find_peaks_exact(self): """ Generate a series of gaussians and attempt to find the peak locations. """ sigmas = [5.0, 3.0, 10.0, 20.0, 10.0, 50.0] num_points = 500 test_data, act_locs = _gen_gaussians_even(sigmas, num_points) widths = np.arange(0.1, max(sigmas)) found_locs = find_peaks_cwt(test_data, widths, gap_thresh=2, min_snr=0, min_length=None) np.testing.assert_array_equal(found_locs, act_locs, "Found maximum locations did not equal those expected") def test_find_peaks_withnoise(self): """ Verify that peak locations are (approximately) found for a series of gaussians with added noise. """ sigmas = [5.0, 3.0, 10.0, 20.0, 10.0, 50.0] num_points = 500 test_data, act_locs = _gen_gaussians_even(sigmas, num_points) widths = np.arange(0.1, max(sigmas)) noise_amp = 0.07 np.random.seed(18181911) test_data += (np.random.rand(num_points) - 0.5)*(2*noise_amp) found_locs = find_peaks_cwt(test_data, widths, min_length=15, gap_thresh=1, min_snr=noise_amp / 5) np.testing.assert_equal(len(found_locs), len(act_locs), 'Different number' + 'of peaks found than expected') diffs = np.abs(found_locs - act_locs) max_diffs = np.array(sigmas) / 5 np.testing.assert_array_less(diffs, max_diffs, 'Maximum location differed' + 'by more than %s' % (max_diffs)) def test_find_peaks_nopeak(self): """ Verify that no peak is found in data that's just noise. """ noise_amp = 1.0 num_points = 100 np.random.seed(181819141) test_data = (np.random.rand(num_points) - 0.5)*(2*noise_amp) widths = np.arange(10, 50) found_locs = find_peaks_cwt(test_data, widths, min_snr=5, noise_perc=30) np.testing.assert_equal(len(found_locs), 0) def test_find_peaks_with_non_default_wavelets(self): x = gaussian(200, 2) widths = np.array([1, 2, 3, 4]) a = find_peaks_cwt(x, widths, wavelet=gaussian) np.testing.assert_equal(np.array([100]), a) def test_find_peaks_window_size(self): """ Verify that window_size is passed correctly to private function and affects the result. """ sigmas = [2.0, 2.0] num_points = 1000 test_data, act_locs = _gen_gaussians_even(sigmas, num_points) widths = np.arange(0.1, max(sigmas), 0.2) noise_amp = 0.05 np.random.seed(18181911) test_data += (np.random.rand(num_points) - 0.5)*(2*noise_amp) # Possibly contrived negative region to throw off peak finding # when window_size is too large test_data[250:320] -= 1 found_locs = find_peaks_cwt(test_data, widths, gap_thresh=2, min_snr=3, min_length=None, window_size=None) with pytest.raises(AssertionError): assert found_locs.size == act_locs.size found_locs = find_peaks_cwt(test_data, widths, gap_thresh=2, min_snr=3, min_length=None, window_size=20) assert found_locs.size == act_locs.size def test_find_peaks_with_one_width(self): """ Verify that the `width` argument in `find_peaks_cwt` can be a float """ xs = np.arange(0, np.pi, 0.05) test_data = np.sin(xs) widths = 1 found_locs = find_peaks_cwt(test_data, widths) np.testing.assert_equal(found_locs, 32)
33,667
36.914414
89
py
scipy
scipy-main/scipy/signal/tests/test_spectral.py
import sys import numpy as np from numpy.testing import (assert_, assert_approx_equal, assert_allclose, assert_array_equal, assert_equal, assert_array_almost_equal_nulp, suppress_warnings) import pytest from pytest import raises as assert_raises from scipy import signal from scipy.fft import fftfreq from scipy.signal import (periodogram, welch, lombscargle, coherence, spectrogram, check_COLA, check_NOLA) from scipy.signal._spectral_py import _spectral_helper # Compare ShortTimeFFT.stft() / ShortTimeFFT.istft() with stft() / istft(): from scipy.signal.tests._scipy_spectral_test_shim import stft_compare as stft from scipy.signal.tests._scipy_spectral_test_shim import istft_compare as istft from scipy.signal.tests._scipy_spectral_test_shim import csd_compare as csd class TestPeriodogram: def test_real_onesided_even(self): x = np.zeros(16) x[0] = 1 f, p = periodogram(x) assert_allclose(f, np.linspace(0, 0.5, 9)) q = np.ones(9) q[0] = 0 q[-1] /= 2.0 q /= 8 assert_allclose(p, q) def test_real_onesided_odd(self): x = np.zeros(15) x[0] = 1 f, p = periodogram(x) assert_allclose(f, np.arange(8.0)/15.0) q = np.ones(8) q[0] = 0 q *= 2.0/15.0 assert_allclose(p, q, atol=1e-15) def test_real_twosided(self): x = np.zeros(16) x[0] = 1 f, p = periodogram(x, return_onesided=False) assert_allclose(f, fftfreq(16, 1.0)) q = np.full(16, 1/16.0) q[0] = 0 assert_allclose(p, q) def test_real_spectrum(self): x = np.zeros(16) x[0] = 1 f, p = periodogram(x, scaling='spectrum') g, q = periodogram(x, scaling='density') assert_allclose(f, np.linspace(0, 0.5, 9)) assert_allclose(p, q/16.0) def test_integer_even(self): x = np.zeros(16, dtype=int) x[0] = 1 f, p = periodogram(x) assert_allclose(f, np.linspace(0, 0.5, 9)) q = np.ones(9) q[0] = 0 q[-1] /= 2.0 q /= 8 assert_allclose(p, q) def test_integer_odd(self): x = np.zeros(15, dtype=int) x[0] = 1 f, p = periodogram(x) assert_allclose(f, np.arange(8.0)/15.0) q = np.ones(8) q[0] = 0 q *= 2.0/15.0 assert_allclose(p, q, atol=1e-15) def test_integer_twosided(self): x = np.zeros(16, dtype=int) x[0] = 1 f, p = periodogram(x, return_onesided=False) assert_allclose(f, fftfreq(16, 1.0)) q = np.full(16, 1/16.0) q[0] = 0 assert_allclose(p, q) def test_complex(self): x = np.zeros(16, np.complex128) x[0] = 1.0 + 2.0j f, p = periodogram(x, return_onesided=False) assert_allclose(f, fftfreq(16, 1.0)) q = np.full(16, 5.0/16.0) q[0] = 0 assert_allclose(p, q) def test_unk_scaling(self): assert_raises(ValueError, periodogram, np.zeros(4, np.complex128), scaling='foo') @pytest.mark.skipif( sys.maxsize <= 2**32, reason="On some 32-bit tolerance issue" ) def test_nd_axis_m1(self): x = np.zeros(20, dtype=np.float64) x = x.reshape((2,1,10)) x[:,:,0] = 1.0 f, p = periodogram(x) assert_array_equal(p.shape, (2, 1, 6)) assert_array_almost_equal_nulp(p[0,0,:], p[1,0,:], 60) f0, p0 = periodogram(x[0,0,:]) assert_array_almost_equal_nulp(p0[np.newaxis,:], p[1,:], 60) @pytest.mark.skipif( sys.maxsize <= 2**32, reason="On some 32-bit tolerance issue" ) def test_nd_axis_0(self): x = np.zeros(20, dtype=np.float64) x = x.reshape((10,2,1)) x[0,:,:] = 1.0 f, p = periodogram(x, axis=0) assert_array_equal(p.shape, (6,2,1)) assert_array_almost_equal_nulp(p[:,0,0], p[:,1,0], 60) f0, p0 = periodogram(x[:,0,0]) assert_array_almost_equal_nulp(p0, p[:,1,0]) def test_window_external(self): x = np.zeros(16) x[0] = 1 f, p = periodogram(x, 10, 'hann') win = signal.get_window('hann', 16) fe, pe = periodogram(x, 10, win) assert_array_almost_equal_nulp(p, pe) assert_array_almost_equal_nulp(f, fe) win_err = signal.get_window('hann', 32) assert_raises(ValueError, periodogram, x, 10, win_err) # win longer than signal def test_padded_fft(self): x = np.zeros(16) x[0] = 1 f, p = periodogram(x) fp, pp = periodogram(x, nfft=32) assert_allclose(f, fp[::2]) assert_allclose(p, pp[::2]) assert_array_equal(pp.shape, (17,)) def test_empty_input(self): f, p = periodogram([]) assert_array_equal(f.shape, (0,)) assert_array_equal(p.shape, (0,)) for shape in [(0,), (3,0), (0,5,2)]: f, p = periodogram(np.empty(shape)) assert_array_equal(f.shape, shape) assert_array_equal(p.shape, shape) def test_empty_input_other_axis(self): for shape in [(3,0), (0,5,2)]: f, p = periodogram(np.empty(shape), axis=1) assert_array_equal(f.shape, shape) assert_array_equal(p.shape, shape) def test_short_nfft(self): x = np.zeros(18) x[0] = 1 f, p = periodogram(x, nfft=16) assert_allclose(f, np.linspace(0, 0.5, 9)) q = np.ones(9) q[0] = 0 q[-1] /= 2.0 q /= 8 assert_allclose(p, q) def test_nfft_is_xshape(self): x = np.zeros(16) x[0] = 1 f, p = periodogram(x, nfft=16) assert_allclose(f, np.linspace(0, 0.5, 9)) q = np.ones(9) q[0] = 0 q[-1] /= 2.0 q /= 8 assert_allclose(p, q) def test_real_onesided_even_32(self): x = np.zeros(16, 'f') x[0] = 1 f, p = periodogram(x) assert_allclose(f, np.linspace(0, 0.5, 9)) q = np.ones(9, 'f') q[0] = 0 q[-1] /= 2.0 q /= 8 assert_allclose(p, q) assert_(p.dtype == q.dtype) def test_real_onesided_odd_32(self): x = np.zeros(15, 'f') x[0] = 1 f, p = periodogram(x) assert_allclose(f, np.arange(8.0)/15.0) q = np.ones(8, 'f') q[0] = 0 q *= 2.0/15.0 assert_allclose(p, q, atol=1e-7) assert_(p.dtype == q.dtype) def test_real_twosided_32(self): x = np.zeros(16, 'f') x[0] = 1 f, p = periodogram(x, return_onesided=False) assert_allclose(f, fftfreq(16, 1.0)) q = np.full(16, 1/16.0, 'f') q[0] = 0 assert_allclose(p, q) assert_(p.dtype == q.dtype) def test_complex_32(self): x = np.zeros(16, 'F') x[0] = 1.0 + 2.0j f, p = periodogram(x, return_onesided=False) assert_allclose(f, fftfreq(16, 1.0)) q = np.full(16, 5.0/16.0, 'f') q[0] = 0 assert_allclose(p, q) assert_(p.dtype == q.dtype) def test_shorter_window_error(self): x = np.zeros(16) x[0] = 1 win = signal.get_window('hann', 10) expected_msg = ('the size of the window must be the same size ' 'of the input on the specified axis') with assert_raises(ValueError, match=expected_msg): periodogram(x, window=win) class TestWelch: def test_real_onesided_even(self): x = np.zeros(16) x[0] = 1 x[8] = 1 f, p = welch(x, nperseg=8) assert_allclose(f, np.linspace(0, 0.5, 5)) q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222, 0.11111111]) assert_allclose(p, q, atol=1e-7, rtol=1e-7) def test_real_onesided_odd(self): x = np.zeros(16) x[0] = 1 x[8] = 1 f, p = welch(x, nperseg=9) assert_allclose(f, np.arange(5.0)/9.0) q = np.array([0.12477455, 0.23430933, 0.17072113, 0.17072113, 0.17072113]) assert_allclose(p, q, atol=1e-7, rtol=1e-7) def test_real_twosided(self): x = np.zeros(16) x[0] = 1 x[8] = 1 f, p = welch(x, nperseg=8, return_onesided=False) assert_allclose(f, fftfreq(8, 1.0)) q = np.array([0.08333333, 0.07638889, 0.11111111, 0.11111111, 0.11111111, 0.11111111, 0.11111111, 0.07638889]) assert_allclose(p, q, atol=1e-7, rtol=1e-7) def test_real_spectrum(self): x = np.zeros(16) x[0] = 1 x[8] = 1 f, p = welch(x, nperseg=8, scaling='spectrum') assert_allclose(f, np.linspace(0, 0.5, 5)) q = np.array([0.015625, 0.02864583, 0.04166667, 0.04166667, 0.02083333]) assert_allclose(p, q, atol=1e-7, rtol=1e-7) def test_integer_onesided_even(self): x = np.zeros(16, dtype=int) x[0] = 1 x[8] = 1 f, p = welch(x, nperseg=8) assert_allclose(f, np.linspace(0, 0.5, 5)) q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222, 0.11111111]) assert_allclose(p, q, atol=1e-7, rtol=1e-7) def test_integer_onesided_odd(self): x = np.zeros(16, dtype=int) x[0] = 1 x[8] = 1 f, p = welch(x, nperseg=9) assert_allclose(f, np.arange(5.0)/9.0) q = np.array([0.12477455, 0.23430933, 0.17072113, 0.17072113, 0.17072113]) assert_allclose(p, q, atol=1e-7, rtol=1e-7) def test_integer_twosided(self): x = np.zeros(16, dtype=int) x[0] = 1 x[8] = 1 f, p = welch(x, nperseg=8, return_onesided=False) assert_allclose(f, fftfreq(8, 1.0)) q = np.array([0.08333333, 0.07638889, 0.11111111, 0.11111111, 0.11111111, 0.11111111, 0.11111111, 0.07638889]) assert_allclose(p, q, atol=1e-7, rtol=1e-7) def test_complex(self): x = np.zeros(16, np.complex128) x[0] = 1.0 + 2.0j x[8] = 1.0 + 2.0j f, p = welch(x, nperseg=8, return_onesided=False) assert_allclose(f, fftfreq(8, 1.0)) q = np.array([0.41666667, 0.38194444, 0.55555556, 0.55555556, 0.55555556, 0.55555556, 0.55555556, 0.38194444]) assert_allclose(p, q, atol=1e-7, rtol=1e-7) def test_unk_scaling(self): assert_raises(ValueError, welch, np.zeros(4, np.complex128), scaling='foo', nperseg=4) def test_detrend_linear(self): x = np.arange(10, dtype=np.float64) + 0.04 f, p = welch(x, nperseg=10, detrend='linear') assert_allclose(p, np.zeros_like(p), atol=1e-15) def test_no_detrending(self): x = np.arange(10, dtype=np.float64) + 0.04 f1, p1 = welch(x, nperseg=10, detrend=False) f2, p2 = welch(x, nperseg=10, detrend=lambda x: x) assert_allclose(f1, f2, atol=1e-15) assert_allclose(p1, p2, atol=1e-15) def test_detrend_external(self): x = np.arange(10, dtype=np.float64) + 0.04 f, p = welch(x, nperseg=10, detrend=lambda seg: signal.detrend(seg, type='l')) assert_allclose(p, np.zeros_like(p), atol=1e-15) def test_detrend_external_nd_m1(self): x = np.arange(40, dtype=np.float64) + 0.04 x = x.reshape((2,2,10)) f, p = welch(x, nperseg=10, detrend=lambda seg: signal.detrend(seg, type='l')) assert_allclose(p, np.zeros_like(p), atol=1e-15) def test_detrend_external_nd_0(self): x = np.arange(20, dtype=np.float64) + 0.04 x = x.reshape((2,1,10)) x = np.moveaxis(x, 2, 0) f, p = welch(x, nperseg=10, axis=0, detrend=lambda seg: signal.detrend(seg, axis=0, type='l')) assert_allclose(p, np.zeros_like(p), atol=1e-15) def test_nd_axis_m1(self): x = np.arange(20, dtype=np.float64) + 0.04 x = x.reshape((2,1,10)) f, p = welch(x, nperseg=10) assert_array_equal(p.shape, (2, 1, 6)) assert_allclose(p[0,0,:], p[1,0,:], atol=1e-13, rtol=1e-13) f0, p0 = welch(x[0,0,:], nperseg=10) assert_allclose(p0[np.newaxis,:], p[1,:], atol=1e-13, rtol=1e-13) def test_nd_axis_0(self): x = np.arange(20, dtype=np.float64) + 0.04 x = x.reshape((10,2,1)) f, p = welch(x, nperseg=10, axis=0) assert_array_equal(p.shape, (6,2,1)) assert_allclose(p[:,0,0], p[:,1,0], atol=1e-13, rtol=1e-13) f0, p0 = welch(x[:,0,0], nperseg=10) assert_allclose(p0, p[:,1,0], atol=1e-13, rtol=1e-13) def test_window_external(self): x = np.zeros(16) x[0] = 1 x[8] = 1 f, p = welch(x, 10, 'hann', nperseg=8) win = signal.get_window('hann', 8) fe, pe = welch(x, 10, win, nperseg=None) assert_array_almost_equal_nulp(p, pe) assert_array_almost_equal_nulp(f, fe) assert_array_equal(fe.shape, (5,)) # because win length used as nperseg assert_array_equal(pe.shape, (5,)) assert_raises(ValueError, welch, x, 10, win, nperseg=4) # because nperseg != win.shape[-1] win_err = signal.get_window('hann', 32) assert_raises(ValueError, welch, x, 10, win_err, nperseg=None) # win longer than signal def test_empty_input(self): f, p = welch([]) assert_array_equal(f.shape, (0,)) assert_array_equal(p.shape, (0,)) for shape in [(0,), (3,0), (0,5,2)]: f, p = welch(np.empty(shape)) assert_array_equal(f.shape, shape) assert_array_equal(p.shape, shape) def test_empty_input_other_axis(self): for shape in [(3,0), (0,5,2)]: f, p = welch(np.empty(shape), axis=1) assert_array_equal(f.shape, shape) assert_array_equal(p.shape, shape) def test_short_data(self): x = np.zeros(8) x[0] = 1 #for string-like window, input signal length < nperseg value gives #UserWarning, sets nperseg to x.shape[-1] with suppress_warnings() as sup: sup.filter(UserWarning, "nperseg = 256 is greater than input length = 8, using nperseg = 8") f, p = welch(x,window='hann') # default nperseg f1, p1 = welch(x,window='hann', nperseg=256) # user-specified nperseg f2, p2 = welch(x, nperseg=8) # valid nperseg, doesn't give warning assert_allclose(f, f2) assert_allclose(p, p2) assert_allclose(f1, f2) assert_allclose(p1, p2) def test_window_long_or_nd(self): assert_raises(ValueError, welch, np.zeros(4), 1, np.array([1,1,1,1,1])) assert_raises(ValueError, welch, np.zeros(4), 1, np.arange(6).reshape((2,3))) def test_nondefault_noverlap(self): x = np.zeros(64) x[::8] = 1 f, p = welch(x, nperseg=16, noverlap=4) q = np.array([0, 1./12., 1./3., 1./5., 1./3., 1./5., 1./3., 1./5., 1./6.]) assert_allclose(p, q, atol=1e-12) def test_bad_noverlap(self): assert_raises(ValueError, welch, np.zeros(4), 1, 'hann', 2, 7) def test_nfft_too_short(self): assert_raises(ValueError, welch, np.ones(12), nfft=3, nperseg=4) def test_real_onesided_even_32(self): x = np.zeros(16, 'f') x[0] = 1 x[8] = 1 f, p = welch(x, nperseg=8) assert_allclose(f, np.linspace(0, 0.5, 5)) q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222, 0.11111111], 'f') assert_allclose(p, q, atol=1e-7, rtol=1e-7) assert_(p.dtype == q.dtype) def test_real_onesided_odd_32(self): x = np.zeros(16, 'f') x[0] = 1 x[8] = 1 f, p = welch(x, nperseg=9) assert_allclose(f, np.arange(5.0)/9.0) q = np.array([0.12477458, 0.23430935, 0.17072113, 0.17072116, 0.17072113], 'f') assert_allclose(p, q, atol=1e-7, rtol=1e-7) assert_(p.dtype == q.dtype) def test_real_twosided_32(self): x = np.zeros(16, 'f') x[0] = 1 x[8] = 1 f, p = welch(x, nperseg=8, return_onesided=False) assert_allclose(f, fftfreq(8, 1.0)) q = np.array([0.08333333, 0.07638889, 0.11111111, 0.11111111, 0.11111111, 0.11111111, 0.11111111, 0.07638889], 'f') assert_allclose(p, q, atol=1e-7, rtol=1e-7) assert_(p.dtype == q.dtype) def test_complex_32(self): x = np.zeros(16, 'F') x[0] = 1.0 + 2.0j x[8] = 1.0 + 2.0j f, p = welch(x, nperseg=8, return_onesided=False) assert_allclose(f, fftfreq(8, 1.0)) q = np.array([0.41666666, 0.38194442, 0.55555552, 0.55555552, 0.55555558, 0.55555552, 0.55555552, 0.38194442], 'f') assert_allclose(p, q, atol=1e-7, rtol=1e-7) assert_(p.dtype == q.dtype, f'dtype mismatch, {p.dtype}, {q.dtype}') def test_padded_freqs(self): x = np.zeros(12) nfft = 24 f = fftfreq(nfft, 1.0)[:nfft//2+1] f[-1] *= -1 fodd, _ = welch(x, nperseg=5, nfft=nfft) feven, _ = welch(x, nperseg=6, nfft=nfft) assert_allclose(f, fodd) assert_allclose(f, feven) nfft = 25 f = fftfreq(nfft, 1.0)[:(nfft + 1)//2] fodd, _ = welch(x, nperseg=5, nfft=nfft) feven, _ = welch(x, nperseg=6, nfft=nfft) assert_allclose(f, fodd) assert_allclose(f, feven) def test_window_correction(self): A = 20 fs = 1e4 nperseg = int(fs//10) fsig = 300 ii = int(fsig*nperseg//fs) # Freq index of fsig tt = np.arange(fs)/fs x = A*np.sin(2*np.pi*fsig*tt) for window in ['hann', 'bartlett', ('tukey', 0.1), 'flattop']: _, p_spec = welch(x, fs=fs, nperseg=nperseg, window=window, scaling='spectrum') freq, p_dens = welch(x, fs=fs, nperseg=nperseg, window=window, scaling='density') # Check peak height at signal frequency for 'spectrum' assert_allclose(p_spec[ii], A**2/2.0) # Check integrated spectrum RMS for 'density' assert_allclose(np.sqrt(np.trapz(p_dens, freq)), A*np.sqrt(2)/2, rtol=1e-3) def test_axis_rolling(self): np.random.seed(1234) x_flat = np.random.randn(1024) _, p_flat = welch(x_flat) for a in range(3): newshape = [1,]*3 newshape[a] = -1 x = x_flat.reshape(newshape) _, p_plus = welch(x, axis=a) # Positive axis index _, p_minus = welch(x, axis=a-x.ndim) # Negative axis index assert_equal(p_flat, p_plus.squeeze(), err_msg=a) assert_equal(p_flat, p_minus.squeeze(), err_msg=a-x.ndim) def test_average(self): x = np.zeros(16) x[0] = 1 x[8] = 1 f, p = welch(x, nperseg=8, average='median') assert_allclose(f, np.linspace(0, 0.5, 5)) q = np.array([.1, .05, 0., 1.54074396e-33, 0.]) assert_allclose(p, q, atol=1e-7, rtol=1e-7) assert_raises(ValueError, welch, x, nperseg=8, average='unrecognised-average') class TestCSD: def test_pad_shorter_x(self): x = np.zeros(8) y = np.zeros(12) f = np.linspace(0, 0.5, 7) c = np.zeros(7,dtype=np.complex128) f1, c1 = csd(x, y, nperseg=12) assert_allclose(f, f1) assert_allclose(c, c1) def test_pad_shorter_y(self): x = np.zeros(12) y = np.zeros(8) f = np.linspace(0, 0.5, 7) c = np.zeros(7,dtype=np.complex128) f1, c1 = csd(x, y, nperseg=12) assert_allclose(f, f1) assert_allclose(c, c1) def test_real_onesided_even(self): x = np.zeros(16) x[0] = 1 x[8] = 1 f, p = csd(x, x, nperseg=8) assert_allclose(f, np.linspace(0, 0.5, 5)) q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222, 0.11111111]) assert_allclose(p, q, atol=1e-7, rtol=1e-7) def test_real_onesided_odd(self): x = np.zeros(16) x[0] = 1 x[8] = 1 f, p = csd(x, x, nperseg=9) assert_allclose(f, np.arange(5.0)/9.0) q = np.array([0.12477455, 0.23430933, 0.17072113, 0.17072113, 0.17072113]) assert_allclose(p, q, atol=1e-7, rtol=1e-7) def test_real_twosided(self): x = np.zeros(16) x[0] = 1 x[8] = 1 f, p = csd(x, x, nperseg=8, return_onesided=False) assert_allclose(f, fftfreq(8, 1.0)) q = np.array([0.08333333, 0.07638889, 0.11111111, 0.11111111, 0.11111111, 0.11111111, 0.11111111, 0.07638889]) assert_allclose(p, q, atol=1e-7, rtol=1e-7) def test_real_spectrum(self): x = np.zeros(16) x[0] = 1 x[8] = 1 f, p = csd(x, x, nperseg=8, scaling='spectrum') assert_allclose(f, np.linspace(0, 0.5, 5)) q = np.array([0.015625, 0.02864583, 0.04166667, 0.04166667, 0.02083333]) assert_allclose(p, q, atol=1e-7, rtol=1e-7) def test_integer_onesided_even(self): x = np.zeros(16, dtype=int) x[0] = 1 x[8] = 1 f, p = csd(x, x, nperseg=8) assert_allclose(f, np.linspace(0, 0.5, 5)) q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222, 0.11111111]) assert_allclose(p, q, atol=1e-7, rtol=1e-7) def test_integer_onesided_odd(self): x = np.zeros(16, dtype=int) x[0] = 1 x[8] = 1 f, p = csd(x, x, nperseg=9) assert_allclose(f, np.arange(5.0)/9.0) q = np.array([0.12477455, 0.23430933, 0.17072113, 0.17072113, 0.17072113]) assert_allclose(p, q, atol=1e-7, rtol=1e-7) def test_integer_twosided(self): x = np.zeros(16, dtype=int) x[0] = 1 x[8] = 1 f, p = csd(x, x, nperseg=8, return_onesided=False) assert_allclose(f, fftfreq(8, 1.0)) q = np.array([0.08333333, 0.07638889, 0.11111111, 0.11111111, 0.11111111, 0.11111111, 0.11111111, 0.07638889]) assert_allclose(p, q, atol=1e-7, rtol=1e-7) def test_complex(self): x = np.zeros(16, np.complex128) x[0] = 1.0 + 2.0j x[8] = 1.0 + 2.0j f, p = csd(x, x, nperseg=8, return_onesided=False) assert_allclose(f, fftfreq(8, 1.0)) q = np.array([0.41666667, 0.38194444, 0.55555556, 0.55555556, 0.55555556, 0.55555556, 0.55555556, 0.38194444]) assert_allclose(p, q, atol=1e-7, rtol=1e-7) def test_unk_scaling(self): assert_raises(ValueError, csd, np.zeros(4, np.complex128), np.ones(4, np.complex128), scaling='foo', nperseg=4) def test_detrend_linear(self): x = np.arange(10, dtype=np.float64) + 0.04 f, p = csd(x, x, nperseg=10, detrend='linear') assert_allclose(p, np.zeros_like(p), atol=1e-15) def test_no_detrending(self): x = np.arange(10, dtype=np.float64) + 0.04 f1, p1 = csd(x, x, nperseg=10, detrend=False) f2, p2 = csd(x, x, nperseg=10, detrend=lambda x: x) assert_allclose(f1, f2, atol=1e-15) assert_allclose(p1, p2, atol=1e-15) def test_detrend_external(self): x = np.arange(10, dtype=np.float64) + 0.04 f, p = csd(x, x, nperseg=10, detrend=lambda seg: signal.detrend(seg, type='l')) assert_allclose(p, np.zeros_like(p), atol=1e-15) def test_detrend_external_nd_m1(self): x = np.arange(40, dtype=np.float64) + 0.04 x = x.reshape((2,2,10)) f, p = csd(x, x, nperseg=10, detrend=lambda seg: signal.detrend(seg, type='l')) assert_allclose(p, np.zeros_like(p), atol=1e-15) def test_detrend_external_nd_0(self): x = np.arange(20, dtype=np.float64) + 0.04 x = x.reshape((2,1,10)) x = np.moveaxis(x, 2, 0) f, p = csd(x, x, nperseg=10, axis=0, detrend=lambda seg: signal.detrend(seg, axis=0, type='l')) assert_allclose(p, np.zeros_like(p), atol=1e-15) def test_nd_axis_m1(self): x = np.arange(20, dtype=np.float64) + 0.04 x = x.reshape((2,1,10)) f, p = csd(x, x, nperseg=10) assert_array_equal(p.shape, (2, 1, 6)) assert_allclose(p[0,0,:], p[1,0,:], atol=1e-13, rtol=1e-13) f0, p0 = csd(x[0,0,:], x[0,0,:], nperseg=10) assert_allclose(p0[np.newaxis,:], p[1,:], atol=1e-13, rtol=1e-13) def test_nd_axis_0(self): x = np.arange(20, dtype=np.float64) + 0.04 x = x.reshape((10,2,1)) f, p = csd(x, x, nperseg=10, axis=0) assert_array_equal(p.shape, (6,2,1)) assert_allclose(p[:,0,0], p[:,1,0], atol=1e-13, rtol=1e-13) f0, p0 = csd(x[:,0,0], x[:,0,0], nperseg=10) assert_allclose(p0, p[:,1,0], atol=1e-13, rtol=1e-13) def test_window_external(self): x = np.zeros(16) x[0] = 1 x[8] = 1 f, p = csd(x, x, 10, 'hann', 8) win = signal.get_window('hann', 8) fe, pe = csd(x, x, 10, win, nperseg=None) assert_array_almost_equal_nulp(p, pe) assert_array_almost_equal_nulp(f, fe) assert_array_equal(fe.shape, (5,)) # because win length used as nperseg assert_array_equal(pe.shape, (5,)) assert_raises(ValueError, csd, x, x, 10, win, nperseg=256) # because nperseg != win.shape[-1] win_err = signal.get_window('hann', 32) assert_raises(ValueError, csd, x, x, 10, win_err, nperseg=None) # because win longer than signal def test_empty_input(self): f, p = csd([],np.zeros(10)) assert_array_equal(f.shape, (0,)) assert_array_equal(p.shape, (0,)) f, p = csd(np.zeros(10),[]) assert_array_equal(f.shape, (0,)) assert_array_equal(p.shape, (0,)) for shape in [(0,), (3,0), (0,5,2)]: f, p = csd(np.empty(shape), np.empty(shape)) assert_array_equal(f.shape, shape) assert_array_equal(p.shape, shape) f, p = csd(np.ones(10), np.empty((5,0))) assert_array_equal(f.shape, (5,0)) assert_array_equal(p.shape, (5,0)) f, p = csd(np.empty((5,0)), np.ones(10)) assert_array_equal(f.shape, (5,0)) assert_array_equal(p.shape, (5,0)) def test_empty_input_other_axis(self): for shape in [(3,0), (0,5,2)]: f, p = csd(np.empty(shape), np.empty(shape), axis=1) assert_array_equal(f.shape, shape) assert_array_equal(p.shape, shape) f, p = csd(np.empty((10,10,3)), np.zeros((10,0,1)), axis=1) assert_array_equal(f.shape, (10,0,3)) assert_array_equal(p.shape, (10,0,3)) f, p = csd(np.empty((10,0,1)), np.zeros((10,10,3)), axis=1) assert_array_equal(f.shape, (10,0,3)) assert_array_equal(p.shape, (10,0,3)) def test_short_data(self): x = np.zeros(8) x[0] = 1 #for string-like window, input signal length < nperseg value gives #UserWarning, sets nperseg to x.shape[-1] with suppress_warnings() as sup: sup.filter(UserWarning, "nperseg = 256 is greater than input length = 8, using nperseg = 8") f, p = csd(x, x, window='hann') # default nperseg f1, p1 = csd(x, x, window='hann', nperseg=256) # user-specified nperseg f2, p2 = csd(x, x, nperseg=8) # valid nperseg, doesn't give warning assert_allclose(f, f2) assert_allclose(p, p2) assert_allclose(f1, f2) assert_allclose(p1, p2) def test_window_long_or_nd(self): assert_raises(ValueError, csd, np.zeros(4), np.ones(4), 1, np.array([1,1,1,1,1])) assert_raises(ValueError, csd, np.zeros(4), np.ones(4), 1, np.arange(6).reshape((2,3))) def test_nondefault_noverlap(self): x = np.zeros(64) x[::8] = 1 f, p = csd(x, x, nperseg=16, noverlap=4) q = np.array([0, 1./12., 1./3., 1./5., 1./3., 1./5., 1./3., 1./5., 1./6.]) assert_allclose(p, q, atol=1e-12) def test_bad_noverlap(self): assert_raises(ValueError, csd, np.zeros(4), np.ones(4), 1, 'hann', 2, 7) def test_nfft_too_short(self): assert_raises(ValueError, csd, np.ones(12), np.zeros(12), nfft=3, nperseg=4) def test_real_onesided_even_32(self): x = np.zeros(16, 'f') x[0] = 1 x[8] = 1 f, p = csd(x, x, nperseg=8) assert_allclose(f, np.linspace(0, 0.5, 5)) q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222, 0.11111111], 'f') assert_allclose(p, q, atol=1e-7, rtol=1e-7) assert_(p.dtype == q.dtype) def test_real_onesided_odd_32(self): x = np.zeros(16, 'f') x[0] = 1 x[8] = 1 f, p = csd(x, x, nperseg=9) assert_allclose(f, np.arange(5.0)/9.0) q = np.array([0.12477458, 0.23430935, 0.17072113, 0.17072116, 0.17072113], 'f') assert_allclose(p, q, atol=1e-7, rtol=1e-7) assert_(p.dtype == q.dtype) def test_real_twosided_32(self): x = np.zeros(16, 'f') x[0] = 1 x[8] = 1 f, p = csd(x, x, nperseg=8, return_onesided=False) assert_allclose(f, fftfreq(8, 1.0)) q = np.array([0.08333333, 0.07638889, 0.11111111, 0.11111111, 0.11111111, 0.11111111, 0.11111111, 0.07638889], 'f') assert_allclose(p, q, atol=1e-7, rtol=1e-7) assert_(p.dtype == q.dtype) def test_complex_32(self): x = np.zeros(16, 'F') x[0] = 1.0 + 2.0j x[8] = 1.0 + 2.0j f, p = csd(x, x, nperseg=8, return_onesided=False) assert_allclose(f, fftfreq(8, 1.0)) q = np.array([0.41666666, 0.38194442, 0.55555552, 0.55555552, 0.55555558, 0.55555552, 0.55555552, 0.38194442], 'f') assert_allclose(p, q, atol=1e-7, rtol=1e-7) assert_(p.dtype == q.dtype, f'dtype mismatch, {p.dtype}, {q.dtype}') def test_padded_freqs(self): x = np.zeros(12) y = np.ones(12) nfft = 24 f = fftfreq(nfft, 1.0)[:nfft//2+1] f[-1] *= -1 fodd, _ = csd(x, y, nperseg=5, nfft=nfft) feven, _ = csd(x, y, nperseg=6, nfft=nfft) assert_allclose(f, fodd) assert_allclose(f, feven) nfft = 25 f = fftfreq(nfft, 1.0)[:(nfft + 1)//2] fodd, _ = csd(x, y, nperseg=5, nfft=nfft) feven, _ = csd(x, y, nperseg=6, nfft=nfft) assert_allclose(f, fodd) assert_allclose(f, feven) def test_copied_data(self): x = np.random.randn(64) y = x.copy() _, p_same = csd(x, x, nperseg=8, average='mean', return_onesided=False) _, p_copied = csd(x, y, nperseg=8, average='mean', return_onesided=False) assert_allclose(p_same, p_copied) _, p_same = csd(x, x, nperseg=8, average='median', return_onesided=False) _, p_copied = csd(x, y, nperseg=8, average='median', return_onesided=False) assert_allclose(p_same, p_copied) class TestCoherence: def test_identical_input(self): x = np.random.randn(20) y = np.copy(x) # So `y is x` -> False f = np.linspace(0, 0.5, 6) C = np.ones(6) f1, C1 = coherence(x, y, nperseg=10) assert_allclose(f, f1) assert_allclose(C, C1) def test_phase_shifted_input(self): x = np.random.randn(20) y = -x f = np.linspace(0, 0.5, 6) C = np.ones(6) f1, C1 = coherence(x, y, nperseg=10) assert_allclose(f, f1) assert_allclose(C, C1) class TestSpectrogram: def test_average_all_segments(self): x = np.random.randn(1024) fs = 1.0 window = ('tukey', 0.25) nperseg = 16 noverlap = 2 f, _, P = spectrogram(x, fs, window, nperseg, noverlap) fw, Pw = welch(x, fs, window, nperseg, noverlap) assert_allclose(f, fw) assert_allclose(np.mean(P, axis=-1), Pw) def test_window_external(self): x = np.random.randn(1024) fs = 1.0 window = ('tukey', 0.25) nperseg = 16 noverlap = 2 f, _, P = spectrogram(x, fs, window, nperseg, noverlap) win = signal.get_window(('tukey', 0.25), 16) fe, _, Pe = spectrogram(x, fs, win, nperseg=None, noverlap=2) assert_array_equal(fe.shape, (9,)) # because win length used as nperseg assert_array_equal(Pe.shape, (9,73)) assert_raises(ValueError, spectrogram, x, fs, win, nperseg=8) # because nperseg != win.shape[-1] win_err = signal.get_window(('tukey', 0.25), 2048) assert_raises(ValueError, spectrogram, x, fs, win_err, nperseg=None) # win longer than signal def test_short_data(self): x = np.random.randn(1024) fs = 1.0 #for string-like window, input signal length < nperseg value gives #UserWarning, sets nperseg to x.shape[-1] f, _, p = spectrogram(x, fs, window=('tukey',0.25)) # default nperseg with suppress_warnings() as sup: sup.filter(UserWarning, "nperseg = 1025 is greater than input length = 1024, using nperseg = 1024") f1, _, p1 = spectrogram(x, fs, window=('tukey',0.25), nperseg=1025) # user-specified nperseg f2, _, p2 = spectrogram(x, fs, nperseg=256) # to compare w/default f3, _, p3 = spectrogram(x, fs, nperseg=1024) # compare w/user-spec'd assert_allclose(f, f2) assert_allclose(p, p2) assert_allclose(f1, f3) assert_allclose(p1, p3) class TestLombscargle: def test_frequency(self): """Test if frequency location of peak corresponds to frequency of generated input signal. """ # Input parameters ampl = 2. w = 1. phi = 0.5 * np.pi nin = 100 nout = 1000 p = 0.7 # Fraction of points to select # Randomly select a fraction of an array with timesteps np.random.seed(2353425) r = np.random.rand(nin) t = np.linspace(0.01*np.pi, 10.*np.pi, nin)[r >= p] # Plot a sine wave for the selected times x = ampl * np.sin(w*t + phi) # Define the array of frequencies for which to compute the periodogram f = np.linspace(0.01, 10., nout) # Calculate Lomb-Scargle periodogram P = lombscargle(t, x, f) # Check if difference between found frequency maximum and input # frequency is less than accuracy delta = f[1] - f[0] assert_(w - f[np.argmax(P)] < (delta/2.)) def test_amplitude(self): # Test if height of peak in normalized Lomb-Scargle periodogram # corresponds to amplitude of the generated input signal. # Input parameters ampl = 2. w = 1. phi = 0.5 * np.pi nin = 100 nout = 1000 p = 0.7 # Fraction of points to select # Randomly select a fraction of an array with timesteps np.random.seed(2353425) r = np.random.rand(nin) t = np.linspace(0.01*np.pi, 10.*np.pi, nin)[r >= p] # Plot a sine wave for the selected times x = ampl * np.sin(w*t + phi) # Define the array of frequencies for which to compute the periodogram f = np.linspace(0.01, 10., nout) # Calculate Lomb-Scargle periodogram pgram = lombscargle(t, x, f) # Normalize pgram = np.sqrt(4 * pgram / t.shape[0]) # Check if difference between found frequency maximum and input # frequency is less than accuracy assert_approx_equal(np.max(pgram), ampl, significant=2) def test_precenter(self): # Test if precenter gives the same result as manually precentering. # Input parameters ampl = 2. w = 1. phi = 0.5 * np.pi nin = 100 nout = 1000 p = 0.7 # Fraction of points to select offset = 0.15 # Offset to be subtracted in pre-centering # Randomly select a fraction of an array with timesteps np.random.seed(2353425) r = np.random.rand(nin) t = np.linspace(0.01*np.pi, 10.*np.pi, nin)[r >= p] # Plot a sine wave for the selected times x = ampl * np.sin(w*t + phi) + offset # Define the array of frequencies for which to compute the periodogram f = np.linspace(0.01, 10., nout) # Calculate Lomb-Scargle periodogram pgram = lombscargle(t, x, f, precenter=True) pgram2 = lombscargle(t, x - x.mean(), f, precenter=False) # check if centering worked assert_allclose(pgram, pgram2) def test_normalize(self): # Test normalize option of Lomb-Scarge. # Input parameters ampl = 2. w = 1. phi = 0.5 * np.pi nin = 100 nout = 1000 p = 0.7 # Fraction of points to select # Randomly select a fraction of an array with timesteps np.random.seed(2353425) r = np.random.rand(nin) t = np.linspace(0.01*np.pi, 10.*np.pi, nin)[r >= p] # Plot a sine wave for the selected times x = ampl * np.sin(w*t + phi) # Define the array of frequencies for which to compute the periodogram f = np.linspace(0.01, 10., nout) # Calculate Lomb-Scargle periodogram pgram = lombscargle(t, x, f) pgram2 = lombscargle(t, x, f, normalize=True) # check if normalization works as expected assert_allclose(pgram * 2 / np.dot(x, x), pgram2) assert_approx_equal(np.max(pgram2), 1.0, significant=2) def test_wrong_shape(self): t = np.linspace(0, 1, 1) x = np.linspace(0, 1, 2) f = np.linspace(0, 1, 3) assert_raises(ValueError, lombscargle, t, x, f) def test_zero_division(self): t = np.zeros(1) x = np.zeros(1) f = np.zeros(1) assert_raises(ZeroDivisionError, lombscargle, t, x, f) def test_lombscargle_atan_vs_atan2(self): # https://github.com/scipy/scipy/issues/3787 # This raised a ZeroDivisionError. t = np.linspace(0, 10, 1000, endpoint=False) x = np.sin(4*t) f = np.linspace(0, 50, 500, endpoint=False) + 0.1 lombscargle(t, x, f*2*np.pi) class TestSTFT: def test_input_validation(self): def chk_VE(match): """Assert for a ValueError matching regexp `match`. This little wrapper allows a more concise code layout. """ return pytest.raises(ValueError, match=match) # Checks for check_COLA(): with chk_VE('nperseg must be a positive integer'): check_COLA('hann', -10, 0) with chk_VE('noverlap must be less than nperseg.'): check_COLA('hann', 10, 20) with chk_VE('window must be 1-D'): check_COLA(np.ones((2, 2)), 10, 0) with chk_VE('window must have length of nperseg'): check_COLA(np.ones(20), 10, 0) # Checks for check_NOLA(): with chk_VE('nperseg must be a positive integer'): check_NOLA('hann', -10, 0) with chk_VE('noverlap must be less than nperseg'): check_NOLA('hann', 10, 20) with chk_VE('window must be 1-D'): check_NOLA(np.ones((2, 2)), 10, 0) with chk_VE('window must have length of nperseg'): check_NOLA(np.ones(20), 10, 0) with chk_VE('noverlap must be a nonnegative integer'): check_NOLA('hann', 64, -32) x = np.zeros(1024) z = stft(x)[2] # Checks for stft(): with chk_VE('window must be 1-D'): stft(x, window=np.ones((2, 2))) with chk_VE('value specified for nperseg is different ' + 'from length of window'): stft(x, window=np.ones(10), nperseg=256) with chk_VE('nperseg must be a positive integer'): stft(x, nperseg=-256) with chk_VE('noverlap must be less than nperseg.'): stft(x, nperseg=256, noverlap=1024) with chk_VE('nfft must be greater than or equal to nperseg.'): stft(x, nperseg=256, nfft=8) # Checks for istft(): with chk_VE('Input stft must be at least 2d!'): istft(x) with chk_VE('window must be 1-D'): istft(z, window=np.ones((2, 2))) with chk_VE('window must have length of 256'): istft(z, window=np.ones(10), nperseg=256) with chk_VE('nperseg must be a positive integer'): istft(z, nperseg=-256) with chk_VE('noverlap must be less than nperseg.'): istft(z, nperseg=256, noverlap=1024) with chk_VE('nfft must be greater than or equal to nperseg.'): istft(z, nperseg=256, nfft=8) with pytest.warns(UserWarning, match="NOLA condition failed, " + "STFT may not be invertible"): istft(z, nperseg=256, noverlap=0, window='hann') with chk_VE('Must specify differing time and frequency axes!'): istft(z, time_axis=0, freq_axis=0) # Checks for _spectral_helper(): with chk_VE("Unknown value for mode foo, must be one of: " + r"\{'psd', 'stft'\}"): _spectral_helper(x, x, mode='foo') with chk_VE("x and y must be equal if mode is 'stft'"): _spectral_helper(x[:512], x[512:], mode='stft') with chk_VE("Unknown boundary option 'foo', must be one of: " + r"\['even', 'odd', 'constant', 'zeros', None\]"): _spectral_helper(x, x, boundary='foo') scaling = "not_valid" with chk_VE(fr"Parameter {scaling=} not in \['spectrum', 'psd'\]!"): stft(x, scaling=scaling) with chk_VE(fr"Parameter {scaling=} not in \['spectrum', 'psd'\]!"): istft(z, scaling=scaling) def test_check_COLA(self): settings = [ ('boxcar', 10, 0), ('boxcar', 10, 9), ('bartlett', 51, 26), ('hann', 256, 128), ('hann', 256, 192), ('blackman', 300, 200), (('tukey', 0.5), 256, 64), ('hann', 256, 255), ] for setting in settings: msg = '{}, {}, {}'.format(*setting) assert_equal(True, check_COLA(*setting), err_msg=msg) def test_check_NOLA(self): settings_pass = [ ('boxcar', 10, 0), ('boxcar', 10, 9), ('boxcar', 10, 7), ('bartlett', 51, 26), ('bartlett', 51, 10), ('hann', 256, 128), ('hann', 256, 192), ('hann', 256, 37), ('blackman', 300, 200), ('blackman', 300, 123), (('tukey', 0.5), 256, 64), (('tukey', 0.5), 256, 38), ('hann', 256, 255), ('hann', 256, 39), ] for setting in settings_pass: msg = '{}, {}, {}'.format(*setting) assert_equal(True, check_NOLA(*setting), err_msg=msg) w_fail = np.ones(16) w_fail[::2] = 0 settings_fail = [ (w_fail, len(w_fail), len(w_fail) // 2), ('hann', 64, 0), ] for setting in settings_fail: msg = '{}, {}, {}'.format(*setting) assert_equal(False, check_NOLA(*setting), err_msg=msg) def test_average_all_segments(self): np.random.seed(1234) x = np.random.randn(1024) fs = 1.0 window = 'hann' nperseg = 16 noverlap = 8 # Compare twosided, because onesided welch doubles non-DC terms to # account for power at negative frequencies. stft doesn't do this, # because it breaks invertibility. f, _, Z = stft(x, fs, window, nperseg, noverlap, padded=False, return_onesided=False, boundary=None) fw, Pw = welch(x, fs, window, nperseg, noverlap, return_onesided=False, scaling='spectrum', detrend=False) assert_allclose(f, fw) assert_allclose(np.mean(np.abs(Z)**2, axis=-1), Pw) def test_permute_axes(self): np.random.seed(1234) x = np.random.randn(1024) fs = 1.0 window = 'hann' nperseg = 16 noverlap = 8 f1, t1, Z1 = stft(x, fs, window, nperseg, noverlap) f2, t2, Z2 = stft(x.reshape((-1, 1, 1)), fs, window, nperseg, noverlap, axis=0) t3, x1 = istft(Z1, fs, window, nperseg, noverlap) t4, x2 = istft(Z2.T, fs, window, nperseg, noverlap, time_axis=0, freq_axis=-1) assert_allclose(f1, f2) assert_allclose(t1, t2) assert_allclose(t3, t4) assert_allclose(Z1, Z2[:, 0, 0, :]) assert_allclose(x1, x2[:, 0, 0]) @pytest.mark.parametrize('scaling', ['spectrum', 'psd']) def test_roundtrip_real(self, scaling): np.random.seed(1234) settings = [ ('boxcar', 100, 10, 0), # Test no overlap ('boxcar', 100, 10, 9), # Test high overlap ('bartlett', 101, 51, 26), # Test odd nperseg ('hann', 1024, 256, 128), # Test defaults (('tukey', 0.5), 1152, 256, 64), # Test Tukey ('hann', 1024, 256, 255), # Test overlapped hann ] for window, N, nperseg, noverlap in settings: t = np.arange(N) x = 10*np.random.randn(t.size) _, _, zz = stft(x, nperseg=nperseg, noverlap=noverlap, window=window, detrend=None, padded=False, scaling=scaling) tr, xr = istft(zz, nperseg=nperseg, noverlap=noverlap, window=window, scaling=scaling) msg = f'{window}, {noverlap}' assert_allclose(t, tr, err_msg=msg) assert_allclose(x, xr, err_msg=msg) def test_roundtrip_not_nola(self): np.random.seed(1234) w_fail = np.ones(16) w_fail[::2] = 0 settings = [ (w_fail, 256, len(w_fail), len(w_fail) // 2), ('hann', 256, 64, 0), ] for window, N, nperseg, noverlap in settings: msg = f'{window}, {N}, {nperseg}, {noverlap}' assert not check_NOLA(window, nperseg, noverlap), msg t = np.arange(N) x = 10 * np.random.randn(t.size) _, _, zz = stft(x, nperseg=nperseg, noverlap=noverlap, window=window, detrend=None, padded=True, boundary='zeros') with pytest.warns(UserWarning, match='NOLA'): tr, xr = istft(zz, nperseg=nperseg, noverlap=noverlap, window=window, boundary=True) assert np.allclose(t, tr[:len(t)]), msg assert not np.allclose(x, xr[:len(x)]), msg def test_roundtrip_nola_not_cola(self): np.random.seed(1234) settings = [ ('boxcar', 100, 10, 3), # NOLA True, COLA False ('bartlett', 101, 51, 37), # NOLA True, COLA False ('hann', 1024, 256, 127), # NOLA True, COLA False (('tukey', 0.5), 1152, 256, 14), # NOLA True, COLA False ('hann', 1024, 256, 5), # NOLA True, COLA False ] for window, N, nperseg, noverlap in settings: msg = f'{window}, {nperseg}, {noverlap}' assert check_NOLA(window, nperseg, noverlap), msg assert not check_COLA(window, nperseg, noverlap), msg t = np.arange(N) x = 10 * np.random.randn(t.size) _, _, zz = stft(x, nperseg=nperseg, noverlap=noverlap, window=window, detrend=None, padded=True, boundary='zeros') tr, xr = istft(zz, nperseg=nperseg, noverlap=noverlap, window=window, boundary=True) msg = f'{window}, {noverlap}' assert_allclose(t, tr[:len(t)], err_msg=msg) assert_allclose(x, xr[:len(x)], err_msg=msg) def test_roundtrip_float32(self): np.random.seed(1234) settings = [('hann', 1024, 256, 128)] for window, N, nperseg, noverlap in settings: t = np.arange(N) x = 10*np.random.randn(t.size) x = x.astype(np.float32) _, _, zz = stft(x, nperseg=nperseg, noverlap=noverlap, window=window, detrend=None, padded=False) tr, xr = istft(zz, nperseg=nperseg, noverlap=noverlap, window=window) msg = f'{window}, {noverlap}' assert_allclose(t, t, err_msg=msg) assert_allclose(x, xr, err_msg=msg, rtol=1e-4, atol=1e-5) assert_(x.dtype == xr.dtype) @pytest.mark.parametrize('scaling', ['spectrum', 'psd']) def test_roundtrip_complex(self, scaling): np.random.seed(1234) settings = [ ('boxcar', 100, 10, 0), # Test no overlap ('boxcar', 100, 10, 9), # Test high overlap ('bartlett', 101, 51, 26), # Test odd nperseg ('hann', 1024, 256, 128), # Test defaults (('tukey', 0.5), 1152, 256, 64), # Test Tukey ('hann', 1024, 256, 255), # Test overlapped hann ] for window, N, nperseg, noverlap in settings: t = np.arange(N) x = 10*np.random.randn(t.size) + 10j*np.random.randn(t.size) _, _, zz = stft(x, nperseg=nperseg, noverlap=noverlap, window=window, detrend=None, padded=False, return_onesided=False, scaling=scaling) tr, xr = istft(zz, nperseg=nperseg, noverlap=noverlap, window=window, input_onesided=False, scaling=scaling) msg = f'{window}, {nperseg}, {noverlap}' assert_allclose(t, tr, err_msg=msg) assert_allclose(x, xr, err_msg=msg) # Check that asking for onesided switches to twosided with suppress_warnings() as sup: sup.filter(UserWarning, "Input data is complex, switching to return_onesided=False") _, _, zz = stft(x, nperseg=nperseg, noverlap=noverlap, window=window, detrend=None, padded=False, return_onesided=True, scaling=scaling) tr, xr = istft(zz, nperseg=nperseg, noverlap=noverlap, window=window, input_onesided=False, scaling=scaling) msg = f'{window}, {nperseg}, {noverlap}' assert_allclose(t, tr, err_msg=msg) assert_allclose(x, xr, err_msg=msg) def test_roundtrip_boundary_extension(self): np.random.seed(1234) # Test against boxcar, since window is all ones, and thus can be fully # recovered with no boundary extension settings = [ ('boxcar', 100, 10, 0), # Test no overlap ('boxcar', 100, 10, 9), # Test high overlap ] for window, N, nperseg, noverlap in settings: t = np.arange(N) x = 10*np.random.randn(t.size) _, _, zz = stft(x, nperseg=nperseg, noverlap=noverlap, window=window, detrend=None, padded=True, boundary=None) _, xr = istft(zz, noverlap=noverlap, window=window, boundary=False) for boundary in ['even', 'odd', 'constant', 'zeros']: _, _, zz_ext = stft(x, nperseg=nperseg, noverlap=noverlap, window=window, detrend=None, padded=True, boundary=boundary) _, xr_ext = istft(zz_ext, noverlap=noverlap, window=window, boundary=True) msg = f'{window}, {noverlap}, {boundary}' assert_allclose(x, xr, err_msg=msg) assert_allclose(x, xr_ext, err_msg=msg) def test_roundtrip_padded_signal(self): np.random.seed(1234) settings = [ ('boxcar', 101, 10, 0), ('hann', 1000, 256, 128), ] for window, N, nperseg, noverlap in settings: t = np.arange(N) x = 10*np.random.randn(t.size) _, _, zz = stft(x, nperseg=nperseg, noverlap=noverlap, window=window, detrend=None, padded=True) tr, xr = istft(zz, noverlap=noverlap, window=window) msg = f'{window}, {noverlap}' # Account for possible zero-padding at the end assert_allclose(t, tr[:t.size], err_msg=msg) assert_allclose(x, xr[:x.size], err_msg=msg) def test_roundtrip_padded_FFT(self): np.random.seed(1234) settings = [ ('hann', 1024, 256, 128, 512), ('hann', 1024, 256, 128, 501), ('boxcar', 100, 10, 0, 33), (('tukey', 0.5), 1152, 256, 64, 1024), ] for window, N, nperseg, noverlap, nfft in settings: t = np.arange(N) x = 10*np.random.randn(t.size) xc = x*np.exp(1j*np.pi/4) # real signal _, _, z = stft(x, nperseg=nperseg, noverlap=noverlap, nfft=nfft, window=window, detrend=None, padded=True) # complex signal _, _, zc = stft(xc, nperseg=nperseg, noverlap=noverlap, nfft=nfft, window=window, detrend=None, padded=True, return_onesided=False) tr, xr = istft(z, nperseg=nperseg, noverlap=noverlap, nfft=nfft, window=window) tr, xcr = istft(zc, nperseg=nperseg, noverlap=noverlap, nfft=nfft, window=window, input_onesided=False) msg = f'{window}, {noverlap}' assert_allclose(t, tr, err_msg=msg) assert_allclose(x, xr, err_msg=msg) assert_allclose(xc, xcr, err_msg=msg) def test_axis_rolling(self): np.random.seed(1234) x_flat = np.random.randn(1024) _, _, z_flat = stft(x_flat) for a in range(3): newshape = [1,]*3 newshape[a] = -1 x = x_flat.reshape(newshape) _, _, z_plus = stft(x, axis=a) # Positive axis index _, _, z_minus = stft(x, axis=a-x.ndim) # Negative axis index assert_equal(z_flat, z_plus.squeeze(), err_msg=a) assert_equal(z_flat, z_minus.squeeze(), err_msg=a-x.ndim) # z_flat has shape [n_freq, n_time] # Test vs. transpose _, x_transpose_m = istft(z_flat.T, time_axis=-2, freq_axis=-1) _, x_transpose_p = istft(z_flat.T, time_axis=0, freq_axis=1) assert_allclose(x_flat, x_transpose_m, err_msg='istft transpose minus') assert_allclose(x_flat, x_transpose_p, err_msg='istft transpose plus') def test_roundtrip_scaling(self): """Verify behavior of scaling parameter. """ # Create 1024 sample cosine signal with amplitude 2: X = np.zeros(513, dtype=complex) X[256] = 1024 x = np.fft.irfft(X) power_x = sum(x**2) / len(x) # power of signal x is 2 # Calculate magnitude-scaled STFT: Zs = stft(x, boundary='even', scaling='spectrum')[2] # Test round trip: x1 = istft(Zs, boundary=True, scaling='spectrum')[1] assert_allclose(x1, x) # For a Hann-windowed 256 sample length FFT, we expect a peak at # frequency 64 (since it is 1/4 the length of X) with a height of 1 # (half the amplitude). A Hann window of a perfectly centered sine has # the magnitude [..., 0, 0, 0.5, 1, 0.5, 0, 0, ...]. # Note that in this case the 'even' padding works for the beginning # but not for the end of the STFT. assert_allclose(abs(Zs[63, :-1]), 0.5) assert_allclose(abs(Zs[64, :-1]), 1) assert_allclose(abs(Zs[65, :-1]), 0.5) # All other values should be zero: Zs[63:66, :-1] = 0 # Note since 'rtol' does not have influence here, atol needs to be set: assert_allclose(Zs[:, :-1], 0, atol=np.finfo(Zs.dtype).resolution) # Calculate two-sided psd-scaled STFT: # - using 'even' padding since signal is axis symmetric - this ensures # stationary behavior on the boundaries # - using the two-sided transform allows determining the spectral # power by `sum(abs(Zp[:, k])**2) / len(f)` for the k-th time slot. Zp = stft(x, return_onesided=False, boundary='even', scaling='psd')[2] # Calculate spectral power of Zd by summing over the frequency axis: psd_Zp = np.sum(Zp.real**2 + Zp.imag**2, axis=0) / Zp.shape[0] # Spectral power of Zp should be equal to the signal's power: assert_allclose(psd_Zp, power_x) # Test round trip: x1 = istft(Zp, input_onesided=False, boundary=True, scaling='psd')[1] assert_allclose(x1, x) # The power of the one-sided psd-scaled STFT can be determined # analogously (note that the two sides are not of equal shape): Zp0 = stft(x, return_onesided=True, boundary='even', scaling='psd')[2] # Since x is real, its Fourier transform is conjugate symmetric, i.e., # the missing 'second side' can be expressed through the 'first side': Zp1 = np.conj(Zp0[-2:0:-1, :]) # 'second side' is conjugate reversed assert_allclose(Zp[:129, :], Zp0) assert_allclose(Zp[129:, :], Zp1) # Calculate the spectral power: s2 = (np.sum(Zp0.real ** 2 + Zp0.imag ** 2, axis=0) + np.sum(Zp1.real ** 2 + Zp1.imag ** 2, axis=0)) psd_Zp01 = s2 / (Zp0.shape[0] + Zp1.shape[0]) assert_allclose(psd_Zp01, power_x) # Test round trip: x1 = istft(Zp0, input_onesided=True, boundary=True, scaling='psd')[1] assert_allclose(x1, x)
59,640
35.860939
105
py
scipy
scipy-main/scipy/signal/tests/test_wavelets.py
import numpy as np from numpy.testing import assert_equal, \ assert_array_equal, assert_array_almost_equal, assert_array_less, assert_ import scipy.signal._wavelets as wavelets class TestWavelets: def test_qmf(self): assert_array_equal(wavelets.qmf([1, 1]), [1, -1]) def test_daub(self): for i in range(1, 15): assert_equal(len(wavelets.daub(i)), i * 2) def test_cascade(self): for J in range(1, 7): for i in range(1, 5): lpcoef = wavelets.daub(i) k = len(lpcoef) x, phi, psi = wavelets.cascade(lpcoef, J) assert_(len(x) == len(phi) == len(psi)) assert_equal(len(x), (k - 1) * 2 ** J) def test_morlet(self): x = wavelets.morlet(50, 4.1, complete=True) y = wavelets.morlet(50, 4.1, complete=False) # Test if complete and incomplete wavelet have same lengths: assert_equal(len(x), len(y)) # Test if complete wavelet is less than incomplete wavelet: assert_array_less(x, y) x = wavelets.morlet(10, 50, complete=False) y = wavelets.morlet(10, 50, complete=True) # For large widths complete and incomplete wavelets should be # identical within numerical precision: assert_equal(x, y) # miscellaneous tests: x = np.array([1.73752399e-09 + 9.84327394e-25j, 6.49471756e-01 + 0.00000000e+00j, 1.73752399e-09 - 9.84327394e-25j]) y = wavelets.morlet(3, w=2, complete=True) assert_array_almost_equal(x, y) x = np.array([2.00947715e-09 + 9.84327394e-25j, 7.51125544e-01 + 0.00000000e+00j, 2.00947715e-09 - 9.84327394e-25j]) y = wavelets.morlet(3, w=2, complete=False) assert_array_almost_equal(x, y, decimal=2) x = wavelets.morlet(10000, s=4, complete=True) y = wavelets.morlet(20000, s=8, complete=True)[5000:15000] assert_array_almost_equal(x, y, decimal=2) x = wavelets.morlet(10000, s=4, complete=False) assert_array_almost_equal(y, x, decimal=2) y = wavelets.morlet(20000, s=8, complete=False)[5000:15000] assert_array_almost_equal(x, y, decimal=2) x = wavelets.morlet(10000, w=3, s=5, complete=True) y = wavelets.morlet(20000, w=3, s=10, complete=True)[5000:15000] assert_array_almost_equal(x, y, decimal=2) x = wavelets.morlet(10000, w=3, s=5, complete=False) assert_array_almost_equal(y, x, decimal=2) y = wavelets.morlet(20000, w=3, s=10, complete=False)[5000:15000] assert_array_almost_equal(x, y, decimal=2) x = wavelets.morlet(10000, w=7, s=10, complete=True) y = wavelets.morlet(20000, w=7, s=20, complete=True)[5000:15000] assert_array_almost_equal(x, y, decimal=2) x = wavelets.morlet(10000, w=7, s=10, complete=False) assert_array_almost_equal(x, y, decimal=2) y = wavelets.morlet(20000, w=7, s=20, complete=False)[5000:15000] assert_array_almost_equal(x, y, decimal=2) def test_morlet2(self): w = wavelets.morlet2(1.0, 0.5) expected = (np.pi**(-0.25) * np.sqrt(1/0.5)).astype(complex) assert_array_equal(w, expected) lengths = [5, 11, 15, 51, 101] for length in lengths: w = wavelets.morlet2(length, 1.0) assert_(len(w) == length) max_loc = np.argmax(w) assert_(max_loc == (length // 2)) points = 100 w = abs(wavelets.morlet2(points, 2.0)) half_vec = np.arange(0, points // 2) assert_array_almost_equal(w[half_vec], w[-(half_vec + 1)]) x = np.array([5.03701224e-09 + 2.46742437e-24j, 1.88279253e+00 + 0.00000000e+00j, 5.03701224e-09 - 2.46742437e-24j]) y = wavelets.morlet2(3, s=1/(2*np.pi), w=2) assert_array_almost_equal(x, y) def test_ricker(self): w = wavelets.ricker(1.0, 1) expected = 2 / (np.sqrt(3 * 1.0) * (np.pi ** 0.25)) assert_array_equal(w, expected) lengths = [5, 11, 15, 51, 101] for length in lengths: w = wavelets.ricker(length, 1.0) assert_(len(w) == length) max_loc = np.argmax(w) assert_(max_loc == (length // 2)) points = 100 w = wavelets.ricker(points, 2.0) half_vec = np.arange(0, points // 2) #Wavelet should be symmetric assert_array_almost_equal(w[half_vec], w[-(half_vec + 1)]) #Check zeros aas = [5, 10, 15, 20, 30] points = 99 for a in aas: w = wavelets.ricker(points, a) vec = np.arange(0, points) - (points - 1.0) / 2 exp_zero1 = np.argmin(np.abs(vec - a)) exp_zero2 = np.argmin(np.abs(vec + a)) assert_array_almost_equal(w[exp_zero1], 0) assert_array_almost_equal(w[exp_zero2], 0) def test_cwt(self): widths = [1.0] def delta_wavelet(s, t): return np.array([1]) len_data = 100 test_data = np.sin(np.pi * np.arange(0, len_data) / 10.0) #Test delta function input gives same data as output cwt_dat = wavelets.cwt(test_data, delta_wavelet, widths) assert_(cwt_dat.shape == (len(widths), len_data)) assert_array_almost_equal(test_data, cwt_dat.flatten()) #Check proper shape on output widths = [1, 3, 4, 5, 10] cwt_dat = wavelets.cwt(test_data, wavelets.ricker, widths) assert_(cwt_dat.shape == (len(widths), len_data)) widths = [len_data * 10] #Note: this wavelet isn't defined quite right, but is fine for this test def flat_wavelet(l, w): return np.full(w, 1 / w) cwt_dat = wavelets.cwt(test_data, flat_wavelet, widths) assert_array_almost_equal(cwt_dat, np.mean(test_data))
5,977
37.818182
80
py
scipy
scipy-main/scipy/signal/tests/test_result_type.py
# Regressions tests on result types of some signal functions import numpy as np from numpy.testing import assert_ from scipy.signal import (decimate, lfilter_zi, lfiltic, sos2tf, sosfilt_zi) def test_decimate(): ones_f32 = np.ones(32, dtype=np.float32) assert_(decimate(ones_f32, 2).dtype == np.float32) ones_i64 = np.ones(32, dtype=np.int64) assert_(decimate(ones_i64, 2).dtype == np.float64) def test_lfilter_zi(): b_f32 = np.array([1, 2, 3], dtype=np.float32) a_f32 = np.array([4, 5, 6], dtype=np.float32) assert_(lfilter_zi(b_f32, a_f32).dtype == np.float32) def test_lfiltic(): # this would return f32 when given a mix of f32 / f64 args b_f32 = np.array([1, 2, 3], dtype=np.float32) a_f32 = np.array([4, 5, 6], dtype=np.float32) x_f32 = np.ones(32, dtype=np.float32) b_f64 = b_f32.astype(np.float64) a_f64 = a_f32.astype(np.float64) x_f64 = x_f32.astype(np.float64) assert_(lfiltic(b_f64, a_f32, x_f32).dtype == np.float64) assert_(lfiltic(b_f32, a_f64, x_f32).dtype == np.float64) assert_(lfiltic(b_f32, a_f32, x_f64).dtype == np.float64) assert_(lfiltic(b_f32, a_f32, x_f32, x_f64).dtype == np.float64) def test_sos2tf(): sos_f32 = np.array([[4, 5, 6, 1, 2, 3]], dtype=np.float32) b, a = sos2tf(sos_f32) assert_(b.dtype == np.float32) assert_(a.dtype == np.float32) def test_sosfilt_zi(): sos_f32 = np.array([[4, 5, 6, 1, 2, 3]], dtype=np.float32) assert_(sosfilt_zi(sos_f32).dtype == np.float32)
1,627
29.716981
68
py
scipy
scipy-main/scipy/signal/tests/test_windows.py
import pickle import numpy as np from numpy import array from numpy.testing import (assert_array_almost_equal, assert_array_equal, assert_allclose, assert_equal, assert_, assert_array_less, suppress_warnings) import pytest from pytest import raises as assert_raises from scipy.fft import fft from scipy.signal import windows, get_window, resample, hann as dep_hann from scipy import signal window_funcs = [ ('boxcar', ()), ('triang', ()), ('parzen', ()), ('bohman', ()), ('blackman', ()), ('nuttall', ()), ('blackmanharris', ()), ('flattop', ()), ('bartlett', ()), ('barthann', ()), ('hamming', ()), ('kaiser', (1,)), ('dpss', (2,)), ('gaussian', (0.5,)), ('general_gaussian', (1.5, 2)), ('chebwin', (1,)), ('cosine', ()), ('hann', ()), ('exponential', ()), ('taylor', ()), ('tukey', (0.5,)), ('lanczos', ()), ] @pytest.mark.parametrize(["method", "args"], window_funcs) def test_deprecated_import(method, args): if method in ('taylor', 'lanczos', 'dpss'): pytest.skip("Deprecation test not applicable") func = getattr(signal, method) msg = f"Importing {method}" with pytest.deprecated_call(match=msg): func(1, *args) class TestBartHann: def test_basic(self): assert_allclose(windows.barthann(6, sym=True), [0, 0.35857354213752, 0.8794264578624801, 0.8794264578624801, 0.3585735421375199, 0], rtol=1e-15, atol=1e-15) assert_allclose(windows.barthann(7), [0, 0.27, 0.73, 1.0, 0.73, 0.27, 0], rtol=1e-15, atol=1e-15) assert_allclose(windows.barthann(6, False), [0, 0.27, 0.73, 1.0, 0.73, 0.27], rtol=1e-15, atol=1e-15) class TestBartlett: def test_basic(self): assert_allclose(windows.bartlett(6), [0, 0.4, 0.8, 0.8, 0.4, 0]) assert_allclose(windows.bartlett(7), [0, 1/3, 2/3, 1.0, 2/3, 1/3, 0]) assert_allclose(windows.bartlett(6, False), [0, 1/3, 2/3, 1.0, 2/3, 1/3]) class TestBlackman: def test_basic(self): assert_allclose(windows.blackman(6, sym=False), [0, 0.13, 0.63, 1.0, 0.63, 0.13], atol=1e-14) assert_allclose(windows.blackman(7, sym=False), [0, 0.09045342435412804, 0.4591829575459636, 0.9203636180999081, 0.9203636180999081, 0.4591829575459636, 0.09045342435412804], atol=1e-8) assert_allclose(windows.blackman(6), [0, 0.2007701432625305, 0.8492298567374694, 0.8492298567374694, 0.2007701432625305, 0], atol=1e-14) assert_allclose(windows.blackman(7, True), [0, 0.13, 0.63, 1.0, 0.63, 0.13, 0], atol=1e-14) class TestBlackmanHarris: def test_basic(self): assert_allclose(windows.blackmanharris(6, False), [6.0e-05, 0.055645, 0.520575, 1.0, 0.520575, 0.055645]) assert_allclose(windows.blackmanharris(7, sym=False), [6.0e-05, 0.03339172347815117, 0.332833504298565, 0.8893697722232837, 0.8893697722232838, 0.3328335042985652, 0.03339172347815122]) assert_allclose(windows.blackmanharris(6), [6.0e-05, 0.1030114893456638, 0.7938335106543362, 0.7938335106543364, 0.1030114893456638, 6.0e-05]) assert_allclose(windows.blackmanharris(7, sym=True), [6.0e-05, 0.055645, 0.520575, 1.0, 0.520575, 0.055645, 6.0e-05]) class TestTaylor: def test_normalized(self): """Tests windows of small length that are normalized to 1. See the documentation for the Taylor window for more information on normalization. """ assert_allclose(windows.taylor(1, 2, 15), 1.0) assert_allclose( windows.taylor(5, 2, 15), np.array([0.75803341, 0.90757699, 1.0, 0.90757699, 0.75803341]) ) assert_allclose( windows.taylor(6, 2, 15), np.array([ 0.7504082, 0.86624416, 0.98208011, 0.98208011, 0.86624416, 0.7504082 ]) ) def test_non_normalized(self): """Test windows of small length that are not normalized to 1. See the documentation for the Taylor window for more information on normalization. """ assert_allclose( windows.taylor(5, 2, 15, norm=False), np.array([ 0.87508054, 1.04771499, 1.15440894, 1.04771499, 0.87508054 ]) ) assert_allclose( windows.taylor(6, 2, 15, norm=False), np.array([ 0.86627793, 1.0, 1.13372207, 1.13372207, 1.0, 0.86627793 ]) ) def test_correctness(self): """This test ensures the correctness of the implemented Taylor Windowing function. A Taylor Window of 1024 points is created, its FFT is taken, and the Peak Sidelobe Level (PSLL) and 3dB and 18dB bandwidth are found and checked. A publication from Sandia National Laboratories was used as reference for the correctness values [1]_. References ----- .. [1] Armin Doerry, "Catalog of Window Taper Functions for Sidelobe Control", 2017. https://www.researchgate.net/profile/Armin_Doerry/publication/316281181_Catalog_of_Window_Taper_Functions_for_Sidelobe_Control/links/58f92cb2a6fdccb121c9d54d/Catalog-of-Window-Taper-Functions-for-Sidelobe-Control.pdf """ M_win = 1024 N_fft = 131072 # Set norm=False for correctness as the values obtained from the # scientific publication do not normalize the values. Normalizing # changes the sidelobe level from the desired value. w = windows.taylor(M_win, nbar=4, sll=35, norm=False, sym=False) f = fft(w, N_fft) spec = 20 * np.log10(np.abs(f / np.amax(f))) first_zero = np.argmax(np.diff(spec) > 0) PSLL = np.amax(spec[first_zero:-first_zero]) BW_3dB = 2*np.argmax(spec <= -3.0102999566398121) / N_fft * M_win BW_18dB = 2*np.argmax(spec <= -18.061799739838872) / N_fft * M_win assert_allclose(PSLL, -35.1672, atol=1) assert_allclose(BW_3dB, 1.1822, atol=0.1) assert_allclose(BW_18dB, 2.6112, atol=0.1) class TestBohman: def test_basic(self): assert_allclose(windows.bohman(6), [0, 0.1791238937062839, 0.8343114522576858, 0.8343114522576858, 0.1791238937062838, 0]) assert_allclose(windows.bohman(7, sym=True), [0, 0.1089977810442293, 0.6089977810442293, 1.0, 0.6089977810442295, 0.1089977810442293, 0]) assert_allclose(windows.bohman(6, False), [0, 0.1089977810442293, 0.6089977810442293, 1.0, 0.6089977810442295, 0.1089977810442293]) class TestBoxcar: def test_basic(self): assert_allclose(windows.boxcar(6), [1, 1, 1, 1, 1, 1]) assert_allclose(windows.boxcar(7), [1, 1, 1, 1, 1, 1, 1]) assert_allclose(windows.boxcar(6, False), [1, 1, 1, 1, 1, 1]) cheb_odd_true = array([0.200938, 0.107729, 0.134941, 0.165348, 0.198891, 0.235450, 0.274846, 0.316836, 0.361119, 0.407338, 0.455079, 0.503883, 0.553248, 0.602637, 0.651489, 0.699227, 0.745266, 0.789028, 0.829947, 0.867485, 0.901138, 0.930448, 0.955010, 0.974482, 0.988591, 0.997138, 1.000000, 0.997138, 0.988591, 0.974482, 0.955010, 0.930448, 0.901138, 0.867485, 0.829947, 0.789028, 0.745266, 0.699227, 0.651489, 0.602637, 0.553248, 0.503883, 0.455079, 0.407338, 0.361119, 0.316836, 0.274846, 0.235450, 0.198891, 0.165348, 0.134941, 0.107729, 0.200938]) cheb_even_true = array([0.203894, 0.107279, 0.133904, 0.163608, 0.196338, 0.231986, 0.270385, 0.311313, 0.354493, 0.399594, 0.446233, 0.493983, 0.542378, 0.590916, 0.639071, 0.686302, 0.732055, 0.775783, 0.816944, 0.855021, 0.889525, 0.920006, 0.946060, 0.967339, 0.983557, 0.994494, 1.000000, 1.000000, 0.994494, 0.983557, 0.967339, 0.946060, 0.920006, 0.889525, 0.855021, 0.816944, 0.775783, 0.732055, 0.686302, 0.639071, 0.590916, 0.542378, 0.493983, 0.446233, 0.399594, 0.354493, 0.311313, 0.270385, 0.231986, 0.196338, 0.163608, 0.133904, 0.107279, 0.203894]) class TestChebWin: def test_basic(self): with suppress_warnings() as sup: sup.filter(UserWarning, "This window is not suitable") assert_allclose(windows.chebwin(6, 100), [0.1046401879356917, 0.5075781475823447, 1.0, 1.0, 0.5075781475823447, 0.1046401879356917]) assert_allclose(windows.chebwin(7, 100), [0.05650405062850233, 0.316608530648474, 0.7601208123539079, 1.0, 0.7601208123539079, 0.316608530648474, 0.05650405062850233]) assert_allclose(windows.chebwin(6, 10), [1.0, 0.6071201674458373, 0.6808391469897297, 0.6808391469897297, 0.6071201674458373, 1.0]) assert_allclose(windows.chebwin(7, 10), [1.0, 0.5190521247588651, 0.5864059018130382, 0.6101519801307441, 0.5864059018130382, 0.5190521247588651, 1.0]) assert_allclose(windows.chebwin(6, 10, False), [1.0, 0.5190521247588651, 0.5864059018130382, 0.6101519801307441, 0.5864059018130382, 0.5190521247588651]) def test_cheb_odd_high_attenuation(self): with suppress_warnings() as sup: sup.filter(UserWarning, "This window is not suitable") cheb_odd = windows.chebwin(53, at=-40) assert_array_almost_equal(cheb_odd, cheb_odd_true, decimal=4) def test_cheb_even_high_attenuation(self): with suppress_warnings() as sup: sup.filter(UserWarning, "This window is not suitable") cheb_even = windows.chebwin(54, at=40) assert_array_almost_equal(cheb_even, cheb_even_true, decimal=4) def test_cheb_odd_low_attenuation(self): cheb_odd_low_at_true = array([1.000000, 0.519052, 0.586405, 0.610151, 0.586405, 0.519052, 1.000000]) with suppress_warnings() as sup: sup.filter(UserWarning, "This window is not suitable") cheb_odd = windows.chebwin(7, at=10) assert_array_almost_equal(cheb_odd, cheb_odd_low_at_true, decimal=4) def test_cheb_even_low_attenuation(self): cheb_even_low_at_true = array([1.000000, 0.451924, 0.51027, 0.541338, 0.541338, 0.51027, 0.451924, 1.000000]) with suppress_warnings() as sup: sup.filter(UserWarning, "This window is not suitable") cheb_even = windows.chebwin(8, at=-10) assert_array_almost_equal(cheb_even, cheb_even_low_at_true, decimal=4) exponential_data = { (4, None, 0.2, False): array([4.53999297624848542e-05, 6.73794699908546700e-03, 1.00000000000000000e+00, 6.73794699908546700e-03]), (4, None, 0.2, True): array([0.00055308437014783, 0.0820849986238988, 0.0820849986238988, 0.00055308437014783]), (4, None, 1.0, False): array([0.1353352832366127, 0.36787944117144233, 1., 0.36787944117144233]), (4, None, 1.0, True): array([0.22313016014842982, 0.60653065971263342, 0.60653065971263342, 0.22313016014842982]), (4, 2, 0.2, False): array([4.53999297624848542e-05, 6.73794699908546700e-03, 1.00000000000000000e+00, 6.73794699908546700e-03]), (4, 2, 0.2, True): None, (4, 2, 1.0, False): array([0.1353352832366127, 0.36787944117144233, 1., 0.36787944117144233]), (4, 2, 1.0, True): None, (5, None, 0.2, True): array([4.53999297624848542e-05, 6.73794699908546700e-03, 1.00000000000000000e+00, 6.73794699908546700e-03, 4.53999297624848542e-05]), (5, None, 1.0, True): array([0.1353352832366127, 0.36787944117144233, 1., 0.36787944117144233, 0.1353352832366127]), (5, 2, 0.2, True): None, (5, 2, 1.0, True): None } def test_exponential(): for k, v in exponential_data.items(): if v is None: assert_raises(ValueError, windows.exponential, *k) else: win = windows.exponential(*k) assert_allclose(win, v, rtol=1e-14) class TestFlatTop: def test_basic(self): assert_allclose(windows.flattop(6, sym=False), [-0.000421051, -0.051263156, 0.19821053, 1.0, 0.19821053, -0.051263156]) assert_allclose(windows.flattop(7, sym=False), [-0.000421051, -0.03684078115492348, 0.01070371671615342, 0.7808739149387698, 0.7808739149387698, 0.01070371671615342, -0.03684078115492348]) assert_allclose(windows.flattop(6), [-0.000421051, -0.0677142520762119, 0.6068721525762117, 0.6068721525762117, -0.0677142520762119, -0.000421051]) assert_allclose(windows.flattop(7, True), [-0.000421051, -0.051263156, 0.19821053, 1.0, 0.19821053, -0.051263156, -0.000421051]) class TestGaussian: def test_basic(self): assert_allclose(windows.gaussian(6, 1.0), [0.04393693362340742, 0.3246524673583497, 0.8824969025845955, 0.8824969025845955, 0.3246524673583497, 0.04393693362340742]) assert_allclose(windows.gaussian(7, 1.2), [0.04393693362340742, 0.2493522087772962, 0.7066482778577162, 1.0, 0.7066482778577162, 0.2493522087772962, 0.04393693362340742]) assert_allclose(windows.gaussian(7, 3), [0.6065306597126334, 0.8007374029168081, 0.9459594689067654, 1.0, 0.9459594689067654, 0.8007374029168081, 0.6065306597126334]) assert_allclose(windows.gaussian(6, 3, False), [0.6065306597126334, 0.8007374029168081, 0.9459594689067654, 1.0, 0.9459594689067654, 0.8007374029168081]) class TestGeneralCosine: def test_basic(self): assert_allclose(windows.general_cosine(5, [0.5, 0.3, 0.2]), [0.4, 0.3, 1, 0.3, 0.4]) assert_allclose(windows.general_cosine(4, [0.5, 0.3, 0.2], sym=False), [0.4, 0.3, 1, 0.3]) class TestGeneralHamming: def test_basic(self): assert_allclose(windows.general_hamming(5, 0.7), [0.4, 0.7, 1.0, 0.7, 0.4]) assert_allclose(windows.general_hamming(5, 0.75, sym=False), [0.5, 0.6727457514, 0.9522542486, 0.9522542486, 0.6727457514]) assert_allclose(windows.general_hamming(6, 0.75, sym=True), [0.5, 0.6727457514, 0.9522542486, 0.9522542486, 0.6727457514, 0.5]) class TestHamming: def test_basic(self): assert_allclose(windows.hamming(6, False), [0.08, 0.31, 0.77, 1.0, 0.77, 0.31]) assert_allclose(windows.hamming(7, sym=False), [0.08, 0.2531946911449826, 0.6423596296199047, 0.9544456792351128, 0.9544456792351128, 0.6423596296199047, 0.2531946911449826]) assert_allclose(windows.hamming(6), [0.08, 0.3978521825875242, 0.9121478174124757, 0.9121478174124757, 0.3978521825875242, 0.08]) assert_allclose(windows.hamming(7, sym=True), [0.08, 0.31, 0.77, 1.0, 0.77, 0.31, 0.08]) class TestHann: def test_basic(self): assert_allclose(windows.hann(6, sym=False), [0, 0.25, 0.75, 1.0, 0.75, 0.25], rtol=1e-15, atol=1e-15) assert_allclose(windows.hann(7, sym=False), [0, 0.1882550990706332, 0.6112604669781572, 0.9504844339512095, 0.9504844339512095, 0.6112604669781572, 0.1882550990706332], rtol=1e-15, atol=1e-15) assert_allclose(windows.hann(6, True), [0, 0.3454915028125263, 0.9045084971874737, 0.9045084971874737, 0.3454915028125263, 0], rtol=1e-15, atol=1e-15) assert_allclose(windows.hann(7), [0, 0.25, 0.75, 1.0, 0.75, 0.25, 0], rtol=1e-15, atol=1e-15) class TestKaiser: def test_basic(self): assert_allclose(windows.kaiser(6, 0.5), [0.9403061933191572, 0.9782962393705389, 0.9975765035372042, 0.9975765035372042, 0.9782962393705389, 0.9403061933191572]) assert_allclose(windows.kaiser(7, 0.5), [0.9403061933191572, 0.9732402256999829, 0.9932754654413773, 1.0, 0.9932754654413773, 0.9732402256999829, 0.9403061933191572]) assert_allclose(windows.kaiser(6, 2.7), [0.2603047507678832, 0.6648106293528054, 0.9582099802511439, 0.9582099802511439, 0.6648106293528054, 0.2603047507678832]) assert_allclose(windows.kaiser(7, 2.7), [0.2603047507678832, 0.5985765418119844, 0.8868495172060835, 1.0, 0.8868495172060835, 0.5985765418119844, 0.2603047507678832]) assert_allclose(windows.kaiser(6, 2.7, False), [0.2603047507678832, 0.5985765418119844, 0.8868495172060835, 1.0, 0.8868495172060835, 0.5985765418119844]) class TestKaiserBesselDerived: def test_basic(self): M = 100 w = windows.kaiser_bessel_derived(M, beta=4.0) w2 = windows.get_window(('kaiser bessel derived', 4.0), M, fftbins=False) assert_allclose(w, w2) # Test for Princen-Bradley condition assert_allclose(w[:M // 2] ** 2 + w[-M // 2:] ** 2, 1.) # Test actual values from other implementations # M = 2: sqrt(2) / 2 # M = 4: 0.518562710536, 0.855039598640 # M = 6: 0.436168993154, 0.707106781187, 0.899864772847 # Ref:https://github.com/scipy/scipy/pull/4747#issuecomment-172849418 assert_allclose(windows.kaiser_bessel_derived(2, beta=np.pi / 2)[:1], np.sqrt(2) / 2) assert_allclose(windows.kaiser_bessel_derived(4, beta=np.pi / 2)[:2], [0.518562710536, 0.855039598640]) assert_allclose(windows.kaiser_bessel_derived(6, beta=np.pi / 2)[:3], [0.436168993154, 0.707106781187, 0.899864772847]) def test_exceptions(self): M = 100 # Assert ValueError for odd window length msg = ("Kaiser-Bessel Derived windows are only defined for even " "number of points") with assert_raises(ValueError, match=msg): windows.kaiser_bessel_derived(M + 1, beta=4.) # Assert ValueError for non-symmetric setting msg = ("Kaiser-Bessel Derived windows are only defined for " "symmetric shapes") with assert_raises(ValueError, match=msg): windows.kaiser_bessel_derived(M + 1, beta=4., sym=False) class TestNuttall: def test_basic(self): assert_allclose(windows.nuttall(6, sym=False), [0.0003628, 0.0613345, 0.5292298, 1.0, 0.5292298, 0.0613345]) assert_allclose(windows.nuttall(7, sym=False), [0.0003628, 0.03777576895352025, 0.3427276199688195, 0.8918518610776603, 0.8918518610776603, 0.3427276199688196, 0.0377757689535203]) assert_allclose(windows.nuttall(6), [0.0003628, 0.1105152530498718, 0.7982580969501282, 0.7982580969501283, 0.1105152530498719, 0.0003628]) assert_allclose(windows.nuttall(7, True), [0.0003628, 0.0613345, 0.5292298, 1.0, 0.5292298, 0.0613345, 0.0003628]) class TestParzen: def test_basic(self): assert_allclose(windows.parzen(6), [0.009259259259259254, 0.25, 0.8611111111111112, 0.8611111111111112, 0.25, 0.009259259259259254]) assert_allclose(windows.parzen(7, sym=True), [0.00583090379008747, 0.1574344023323616, 0.6501457725947521, 1.0, 0.6501457725947521, 0.1574344023323616, 0.00583090379008747]) assert_allclose(windows.parzen(6, False), [0.00583090379008747, 0.1574344023323616, 0.6501457725947521, 1.0, 0.6501457725947521, 0.1574344023323616]) class TestTriang: def test_basic(self): assert_allclose(windows.triang(6, True), [1/6, 1/2, 5/6, 5/6, 1/2, 1/6]) assert_allclose(windows.triang(7), [1/4, 1/2, 3/4, 1, 3/4, 1/2, 1/4]) assert_allclose(windows.triang(6, sym=False), [1/4, 1/2, 3/4, 1, 3/4, 1/2]) tukey_data = { (4, 0.5, True): array([0.0, 1.0, 1.0, 0.0]), (4, 0.9, True): array([0.0, 0.84312081893436686, 0.84312081893436686, 0.0]), (4, 1.0, True): array([0.0, 0.75, 0.75, 0.0]), (4, 0.5, False): array([0.0, 1.0, 1.0, 1.0]), (4, 0.9, False): array([0.0, 0.58682408883346526, 1.0, 0.58682408883346526]), (4, 1.0, False): array([0.0, 0.5, 1.0, 0.5]), (5, 0.0, True): array([1.0, 1.0, 1.0, 1.0, 1.0]), (5, 0.8, True): array([0.0, 0.69134171618254492, 1.0, 0.69134171618254492, 0.0]), (5, 1.0, True): array([0.0, 0.5, 1.0, 0.5, 0.0]), (6, 0): [1, 1, 1, 1, 1, 1], (7, 0): [1, 1, 1, 1, 1, 1, 1], (6, .25): [0, 1, 1, 1, 1, 0], (7, .25): [0, 1, 1, 1, 1, 1, 0], (6,): [0, 0.9045084971874737, 1.0, 1.0, 0.9045084971874735, 0], (7,): [0, 0.75, 1.0, 1.0, 1.0, 0.75, 0], (6, .75): [0, 0.5522642316338269, 1.0, 1.0, 0.5522642316338267, 0], (7, .75): [0, 0.4131759111665348, 0.9698463103929542, 1.0, 0.9698463103929542, 0.4131759111665347, 0], (6, 1): [0, 0.3454915028125263, 0.9045084971874737, 0.9045084971874737, 0.3454915028125263, 0], (7, 1): [0, 0.25, 0.75, 1.0, 0.75, 0.25, 0], } class TestTukey: def test_basic(self): # Test against hardcoded data for k, v in tukey_data.items(): if v is None: assert_raises(ValueError, windows.tukey, *k) else: win = windows.tukey(*k) assert_allclose(win, v, rtol=1e-15, atol=1e-15) def test_extremes(self): # Test extremes of alpha correspond to boxcar and hann tuk0 = windows.tukey(100, 0) box0 = windows.boxcar(100) assert_array_almost_equal(tuk0, box0) tuk1 = windows.tukey(100, 1) han1 = windows.hann(100) assert_array_almost_equal(tuk1, han1) dpss_data = { # All values from MATLAB: # * taper[1] of (3, 1.4, 3) sign-flipped # * taper[3] of (5, 1.5, 5) sign-flipped (4, 0.1, 2): ([[0.497943898, 0.502047681, 0.502047681, 0.497943898], [0.670487993, 0.224601537, -0.224601537, -0.670487993]], [0.197961815, 0.002035474]), # noqa (3, 1.4, 3): ([[0.410233151, 0.814504464, 0.410233151], [0.707106781, 0.0, -0.707106781], [0.575941629, -0.580157287, 0.575941629]], [0.999998093, 0.998067480, 0.801934426]), # noqa (5, 1.5, 5): ([[0.1745071052, 0.4956749177, 0.669109327, 0.495674917, 0.174507105], [0.4399493348, 0.553574369, 0.0, -0.553574369, -0.439949334], [0.631452756, 0.073280238, -0.437943884, 0.073280238, 0.631452756], [0.553574369, -0.439949334, 0.0, 0.439949334, -0.553574369], [0.266110290, -0.498935248, 0.600414741, -0.498935248, 0.266110290147157]], [0.999728571, 0.983706916, 0.768457889, 0.234159338, 0.013947282907567]), # noqa: E501 (100, 2, 4): ([[0.0030914414, 0.0041266922, 0.005315076, 0.006665149, 0.008184854, 0.0098814158, 0.011761239, 0.013829809, 0.016091597, 0.018549973, 0.02120712, 0.02406396, 0.027120092, 0.030373728, 0.033821651, 0.037459181, 0.041280145, 0.045276872, 0.049440192, 0.053759447, 0.058222524, 0.062815894, 0.067524661, 0.072332638, 0.077222418, 0.082175473, 0.087172252, 0.092192299, 0.097214376, 0.1022166, 0.10717657, 0.11207154, 0.11687856, 0.12157463, 0.12613686, 0.13054266, 0.13476986, 0.13879691, 0.14260302, 0.14616832, 0.14947401, 0.1525025, 0.15523755, 0.15766438, 0.15976981, 0.16154233, 0.16297223, 0.16405162, 0.16477455, 0.16513702, 0.16513702, 0.16477455, 0.16405162, 0.16297223, 0.16154233, 0.15976981, 0.15766438, 0.15523755, 0.1525025, 0.14947401, 0.14616832, 0.14260302, 0.13879691, 0.13476986, 0.13054266, 0.12613686, 0.12157463, 0.11687856, 0.11207154, 0.10717657, 0.1022166, 0.097214376, 0.092192299, 0.087172252, 0.082175473, 0.077222418, 0.072332638, 0.067524661, 0.062815894, 0.058222524, 0.053759447, 0.049440192, 0.045276872, 0.041280145, 0.037459181, 0.033821651, 0.030373728, 0.027120092, 0.02406396, 0.02120712, 0.018549973, 0.016091597, 0.013829809, 0.011761239, 0.0098814158, 0.008184854, 0.006665149, 0.005315076, 0.0041266922, 0.0030914414], [0.018064449, 0.022040342, 0.026325013, 0.030905288, 0.035764398, 0.040881982, 0.046234148, 0.051793558, 0.057529559, 0.063408356, 0.069393216, 0.075444716, 0.081521022, 0.087578202, 0.093570567, 0.099451049, 0.10517159, 0.11068356, 0.11593818, 0.12088699, 0.12548227, 0.12967752, 0.1334279, 0.13669069, 0.13942569, 0.1415957, 0.14316686, 0.14410905, 0.14439626, 0.14400686, 0.14292389, 0.1411353, 0.13863416, 0.13541876, 0.13149274, 0.12686516, 0.12155045, 0.1155684, 0.10894403, 0.10170748, 0.093893752, 0.08554251, 0.076697768, 0.067407559, 0.057723559, 0.04770068, 0.037396627, 0.026871428, 0.016186944, 0.0054063557, -0.0054063557, -0.016186944, -0.026871428, -0.037396627, -0.04770068, -0.057723559, -0.067407559, -0.076697768, -0.08554251, -0.093893752, -0.10170748, -0.10894403, -0.1155684, -0.12155045, -0.12686516, -0.13149274, -0.13541876, -0.13863416, -0.1411353, -0.14292389, -0.14400686, -0.14439626, -0.14410905, -0.14316686, -0.1415957, -0.13942569, -0.13669069, -0.1334279, -0.12967752, -0.12548227, -0.12088699, -0.11593818, -0.11068356, -0.10517159, -0.099451049, -0.093570567, -0.087578202, -0.081521022, -0.075444716, -0.069393216, -0.063408356, -0.057529559, -0.051793558, -0.046234148, -0.040881982, -0.035764398, -0.030905288, -0.026325013, -0.022040342, -0.018064449], [0.064817553, 0.072567801, 0.080292992, 0.087918235, 0.095367076, 0.10256232, 0.10942687, 0.1158846, 0.12186124, 0.12728523, 0.13208858, 0.13620771, 0.13958427, 0.14216587, 0.14390678, 0.14476863, 0.1447209, 0.14374148, 0.14181704, 0.13894336, 0.13512554, 0.13037812, 0.1247251, 0.11819984, 0.11084487, 0.10271159, 0.093859853, 0.084357497, 0.074279719, 0.063708406, 0.052731374, 0.041441525, 0.029935953, 0.018314987, 0.0066811877, -0.0048616765, -0.016209689, -0.027259848, -0.037911124, -0.048065512, -0.05762905, -0.066512804, -0.0746338, -0.081915903, -0.088290621, -0.09369783, -0.098086416, -0.10141482, -0.10365146, -0.10477512, -0.10477512, -0.10365146, -0.10141482, -0.098086416, -0.09369783, -0.088290621, -0.081915903, -0.0746338, -0.066512804, -0.05762905, -0.048065512, -0.037911124, -0.027259848, -0.016209689, -0.0048616765, 0.0066811877, 0.018314987, 0.029935953, 0.041441525, 0.052731374, 0.063708406, 0.074279719, 0.084357497, 0.093859853, 0.10271159, 0.11084487, 0.11819984, 0.1247251, 0.13037812, 0.13512554, 0.13894336, 0.14181704, 0.14374148, 0.1447209, 0.14476863, 0.14390678, 0.14216587, 0.13958427, 0.13620771, 0.13208858, 0.12728523, 0.12186124, 0.1158846, 0.10942687, 0.10256232, 0.095367076, 0.087918235, 0.080292992, 0.072567801, 0.064817553], [0.14985551, 0.15512305, 0.15931467, 0.16236806, 0.16423291, 0.16487165, 0.16426009, 0.1623879, 0.1592589, 0.15489114, 0.14931693, 0.14258255, 0.13474785, 0.1258857, 0.11608124, 0.10543095, 0.094041635, 0.082029213, 0.069517411, 0.056636348, 0.043521028, 0.030309756, 0.017142511, 0.0041592774, -0.0085016282, -0.020705223, -0.032321494, -0.043226982, -0.053306291, -0.062453515, -0.070573544, -0.077583253, -0.083412547, -0.088005244, -0.091319802, -0.093329861, -0.094024602, -0.093408915, -0.091503383, -0.08834406, -0.08398207, -0.078483012, -0.071926192, -0.064403681, -0.056019215, -0.046886954, -0.037130106, -0.026879442, -0.016271713, -0.005448, 0.005448, 0.016271713, 0.026879442, 0.037130106, 0.046886954, 0.056019215, 0.064403681, 0.071926192, 0.078483012, 0.08398207, 0.08834406, 0.091503383, 0.093408915, 0.094024602, 0.093329861, 0.091319802, 0.088005244, 0.083412547, 0.077583253, 0.070573544, 0.062453515, 0.053306291, 0.043226982, 0.032321494, 0.020705223, 0.0085016282, -0.0041592774, -0.017142511, -0.030309756, -0.043521028, -0.056636348, -0.069517411, -0.082029213, -0.094041635, -0.10543095, -0.11608124, -0.1258857, -0.13474785, -0.14258255, -0.14931693, -0.15489114, -0.1592589, -0.1623879, -0.16426009, -0.16487165, -0.16423291, -0.16236806, -0.15931467, -0.15512305, -0.14985551]], [0.999943140, 0.997571533, 0.959465463, 0.721862496]), # noqa: E501 } class TestDPSS: def test_basic(self): # Test against hardcoded data for k, v in dpss_data.items(): win, ratios = windows.dpss(*k, return_ratios=True) assert_allclose(win, v[0], atol=1e-7, err_msg=k) assert_allclose(ratios, v[1], rtol=1e-5, atol=1e-7, err_msg=k) def test_unity(self): # Test unity value handling (gh-2221) for M in range(1, 21): # corrected w/approximation (default) win = windows.dpss(M, M / 2.1) expected = M % 2 # one for odd, none for even assert_equal(np.isclose(win, 1.).sum(), expected, err_msg=f'{win}') # corrected w/subsample delay (slower) win_sub = windows.dpss(M, M / 2.1, norm='subsample') if M > 2: # @M=2 the subsample doesn't do anything assert_equal(np.isclose(win_sub, 1.).sum(), expected, err_msg=f'{win_sub}') assert_allclose(win, win_sub, rtol=0.03) # within 3% # not the same, l2-norm win_2 = windows.dpss(M, M / 2.1, norm=2) expected = 1 if M == 1 else 0 assert_equal(np.isclose(win_2, 1.).sum(), expected, err_msg=f'{win_2}') def test_extremes(self): # Test extremes of alpha lam = windows.dpss(31, 6, 4, return_ratios=True)[1] assert_array_almost_equal(lam, 1.) lam = windows.dpss(31, 7, 4, return_ratios=True)[1] assert_array_almost_equal(lam, 1.) lam = windows.dpss(31, 8, 4, return_ratios=True)[1] assert_array_almost_equal(lam, 1.) def test_degenerate(self): # Test failures assert_raises(ValueError, windows.dpss, 4, 1.5, -1) # Bad Kmax assert_raises(ValueError, windows.dpss, 4, 1.5, -5) assert_raises(TypeError, windows.dpss, 4, 1.5, 1.1) assert_raises(ValueError, windows.dpss, 3, 1.5, 3) # NW must be < N/2. assert_raises(ValueError, windows.dpss, 3, -1, 3) # NW must be pos assert_raises(ValueError, windows.dpss, 3, 0, 3) assert_raises(ValueError, windows.dpss, -1, 1, 3) # negative M class TestLanczos: def test_basic(self): # Analytical results: # sinc(x) = sinc(-x) # sinc(pi) = 0, sinc(0) = 1 # Hand computation on WolframAlpha: # sinc(2 pi / 3) = 0.413496672 # sinc(pi / 3) = 0.826993343 # sinc(3 pi / 5) = 0.504551152 # sinc(pi / 5) = 0.935489284 assert_allclose(windows.lanczos(6, sym=False), [0., 0.413496672, 0.826993343, 1., 0.826993343, 0.413496672], atol=1e-9) assert_allclose(windows.lanczos(6), [0., 0.504551152, 0.935489284, 0.935489284, 0.504551152, 0.], atol=1e-9) assert_allclose(windows.lanczos(7, sym=True), [0., 0.413496672, 0.826993343, 1., 0.826993343, 0.413496672, 0.], atol=1e-9) def test_array_size(self): for n in [0, 10, 11]: assert_equal(len(windows.lanczos(n, sym=False)), n) assert_equal(len(windows.lanczos(n, sym=True)), n) class TestGetWindow: def test_boxcar(self): w = windows.get_window('boxcar', 12) assert_array_equal(w, np.ones_like(w)) # window is a tuple of len 1 w = windows.get_window(('boxcar',), 16) assert_array_equal(w, np.ones_like(w)) def test_cheb_odd(self): with suppress_warnings() as sup: sup.filter(UserWarning, "This window is not suitable") w = windows.get_window(('chebwin', -40), 53, fftbins=False) assert_array_almost_equal(w, cheb_odd_true, decimal=4) def test_cheb_even(self): with suppress_warnings() as sup: sup.filter(UserWarning, "This window is not suitable") w = windows.get_window(('chebwin', 40), 54, fftbins=False) assert_array_almost_equal(w, cheb_even_true, decimal=4) def test_dpss(self): win1 = windows.get_window(('dpss', 3), 64, fftbins=False) win2 = windows.dpss(64, 3) assert_array_almost_equal(win1, win2, decimal=4) def test_kaiser_float(self): win1 = windows.get_window(7.2, 64) win2 = windows.kaiser(64, 7.2, False) assert_allclose(win1, win2) def test_invalid_inputs(self): # Window is not a float, tuple, or string assert_raises(ValueError, windows.get_window, set('hann'), 8) # Unknown window type error assert_raises(ValueError, windows.get_window, 'broken', 4) def test_array_as_window(self): # github issue 3603 osfactor = 128 sig = np.arange(128) win = windows.get_window(('kaiser', 8.0), osfactor // 2) with assert_raises(ValueError, match='must have the same length'): resample(sig, len(sig) * osfactor, window=win) def test_general_cosine(self): assert_allclose(get_window(('general_cosine', [0.5, 0.3, 0.2]), 4), [0.4, 0.3, 1, 0.3]) assert_allclose(get_window(('general_cosine', [0.5, 0.3, 0.2]), 4, fftbins=False), [0.4, 0.55, 0.55, 0.4]) def test_general_hamming(self): assert_allclose(get_window(('general_hamming', 0.7), 5), [0.4, 0.6072949, 0.9427051, 0.9427051, 0.6072949]) assert_allclose(get_window(('general_hamming', 0.7), 5, fftbins=False), [0.4, 0.7, 1.0, 0.7, 0.4]) def test_lanczos(self): assert_allclose(get_window('lanczos', 6), [0., 0.413496672, 0.826993343, 1., 0.826993343, 0.413496672], atol=1e-9) assert_allclose(get_window('lanczos', 6, fftbins=False), [0., 0.504551152, 0.935489284, 0.935489284, 0.504551152, 0.], atol=1e-9) assert_allclose(get_window('lanczos', 6), get_window('sinc', 6)) def test_windowfunc_basics(): for window_name, params in window_funcs: window = getattr(windows, window_name) with suppress_warnings() as sup: sup.filter(UserWarning, "This window is not suitable") # Check symmetry for odd and even lengths w1 = window(8, *params, sym=True) w2 = window(7, *params, sym=False) assert_array_almost_equal(w1[:-1], w2) w1 = window(9, *params, sym=True) w2 = window(8, *params, sym=False) assert_array_almost_equal(w1[:-1], w2) # Check that functions run and output lengths are correct assert_equal(len(window(6, *params, sym=True)), 6) assert_equal(len(window(6, *params, sym=False)), 6) assert_equal(len(window(7, *params, sym=True)), 7) assert_equal(len(window(7, *params, sym=False)), 7) # Check invalid lengths assert_raises(ValueError, window, 5.5, *params) assert_raises(ValueError, window, -7, *params) # Check degenerate cases assert_array_equal(window(0, *params, sym=True), []) assert_array_equal(window(0, *params, sym=False), []) assert_array_equal(window(1, *params, sym=True), [1]) assert_array_equal(window(1, *params, sym=False), [1]) # Check dtype assert_(window(0, *params, sym=True).dtype == 'float') assert_(window(0, *params, sym=False).dtype == 'float') assert_(window(1, *params, sym=True).dtype == 'float') assert_(window(1, *params, sym=False).dtype == 'float') assert_(window(6, *params, sym=True).dtype == 'float') assert_(window(6, *params, sym=False).dtype == 'float') # Check normalization assert_array_less(window(10, *params, sym=True), 1.01) assert_array_less(window(10, *params, sym=False), 1.01) assert_array_less(window(9, *params, sym=True), 1.01) assert_array_less(window(9, *params, sym=False), 1.01) # Check that DFT-even spectrum is purely real for odd and even assert_allclose(fft(window(10, *params, sym=False)).imag, 0, atol=1e-14) assert_allclose(fft(window(11, *params, sym=False)).imag, 0, atol=1e-14) def test_needs_params(): for winstr in ['kaiser', 'ksr', 'kaiser_bessel_derived', 'kbd', 'gaussian', 'gauss', 'gss', 'general gaussian', 'general_gaussian', 'general gauss', 'general_gauss', 'ggs', 'dss', 'dpss', 'general cosine', 'general_cosine', 'chebwin', 'cheb', 'general hamming', 'general_hamming', ]: assert_raises(ValueError, get_window, winstr, 7) def test_not_needs_params(): for winstr in ['barthann', 'bartlett', 'blackman', 'blackmanharris', 'bohman', 'boxcar', 'cosine', 'flattop', 'hamming', 'nuttall', 'parzen', 'taylor', 'exponential', 'poisson', 'tukey', 'tuk', 'triangle', 'lanczos', 'sinc', ]: win = get_window(winstr, 7) assert_equal(len(win), 7) def test_deprecation(): if dep_hann.__doc__ is not None: # can be None with `-OO` mode assert_('signal.hann is deprecated' in dep_hann.__doc__) assert_('deprecated' not in windows.hann.__doc__) def test_deprecated_pickleable(): dep_hann2 = pickle.loads(pickle.dumps(dep_hann)) assert_(dep_hann2 is dep_hann) def test_symmetric(): for win in [windows.lanczos]: # Even sampling points w = win(4096) error = np.max(np.abs(w-np.flip(w))) assert_equal(error, 0.0) # Odd sampling points w = win(4097) error = np.max(np.abs(w-np.flip(w))) assert_equal(error, 0.0)
41,738
46.920781
5,222
py
scipy
scipy-main/scipy/signal/tests/test_bsplines.py
# pylint: disable=missing-docstring import numpy as np from numpy import array from numpy.testing import (assert_allclose, assert_array_equal, assert_almost_equal, suppress_warnings) import pytest from pytest import raises import scipy.signal._bsplines as bsp from scipy import signal class TestBSplines: """Test behaviors of B-splines. Some of the values tested against were returned as of SciPy 1.1.0 and are included for regression testing purposes. Others (at integer points) are compared to theoretical expressions (cf. Unser, Aldroubi, Eden, IEEE TSP 1993, Table 1).""" def test_spline_filter(self): np.random.seed(12457) # Test the type-error branch raises(TypeError, bsp.spline_filter, array([0]), 0) # Test the real branch np.random.seed(12457) data_array_real = np.random.rand(12, 12) # make the magnitude exceed 1, and make some negative data_array_real = 10*(1-2*data_array_real) result_array_real = array( [[-.463312621, 8.33391222, .697290949, 5.28390836, 5.92066474, 6.59452137, 9.84406950, -8.78324188, 7.20675750, -8.17222994, -4.38633345, 9.89917069], [2.67755154, 6.24192170, -3.15730578, 9.87658581, -9.96930425, 3.17194115, -4.50919947, 5.75423446, 9.65979824, -8.29066885, .971416087, -2.38331897], [-7.08868346, 4.89887705, -1.37062289, 7.70705838, 2.51526461, 3.65885497, 5.16786604, -8.77715342e-03, 4.10533325, 9.04761993, -.577960351, 9.86382519], [-4.71444301, -1.68038985, 2.84695116, 1.14315938, -3.17127091, 1.91830461, 7.13779687, -5.35737482, -9.66586425, -9.87717456, 9.93160672, 4.71948144], [9.49551194, -1.92958436, 6.25427993, -9.05582911, 3.97562282, 7.68232426, -1.04514824, -5.86021443, -8.43007451, 5.47528997, 2.06330736, -8.65968112], [-8.91720100, 8.87065356, 3.76879937, 2.56222894, -.828387146, 8.72288903, 6.42474741, -6.84576083, 9.94724115, 6.90665380, -6.61084494, -9.44907391], [9.25196790, -.774032030, 7.05371046, -2.73505725, 2.53953305, -1.82889155, 2.95454824, -1.66362046, 5.72478916, -3.10287679, 1.54017123, -7.87759020], [-3.98464539, -2.44316992, -1.12708657, 1.01725672, -8.89294671, -5.42145629, -6.16370321, 2.91775492, 9.64132208, .702499998, -2.02622392, 1.56308431], [-2.22050773, 7.89951554, 5.98970713, -7.35861835, 5.45459283, -7.76427957, 3.67280490, -4.05521315, 4.51967507, -3.22738749, -3.65080177, 3.05630155], [-6.21240584, -.296796126, -8.34800163, 9.21564563, -3.61958784, -4.77120006, -3.99454057, 1.05021988e-03, -6.95982829, 6.04380797, 8.43181250, -2.71653339], [1.19638037, 6.99718842e-02, 6.72020394, -2.13963198, 3.75309875, -5.70076744, 5.92143551, -7.22150575, -3.77114594, -1.11903194, -5.39151466, 3.06620093], [9.86326886, 1.05134482, -7.75950607, -3.64429655, 7.81848957, -9.02270373, 3.73399754, -4.71962549, -7.71144306, 3.78263161, 6.46034818, -4.43444731]]) assert_allclose(bsp.spline_filter(data_array_real, 0), result_array_real) def test_bspline(self): # Verify with theoretical results at integer points up to order 5 with suppress_warnings() as sup: sup.filter(DeprecationWarning) assert_allclose(bsp.bspline([-1, 0, 1], 0), array([0, 1, 0])) assert_allclose(bsp.bspline([-1, 0, 1], 1), array([0, 1, 0])) assert_allclose(bsp.bspline([-2, -1, 0, 1, 2], 2), array([0, 1, 6, 1, 0])/8.) assert_allclose(bsp.bspline([-2, -1, 0, 1, 2], 3), array([0, 1, 4, 1, 0])/6.) assert_allclose(bsp.bspline([-3, -2, -1, 0, 1, 2, 3], 4), array([0, 1, 76, 230, 76, 1, 0])/384.) assert_allclose(bsp.bspline([-3, -2, -1, 0, 1, 2, 3], 5), array([0, 1, 26, 66, 26, 1, 0]) / 120.) # Compare with SciPy 1.1.0 np.random.seed(12458) assert_allclose(bsp.bspline(np.random.rand(1, 1), 2), array([[0.73694695]])) def test_gauss_spline(self): np.random.seed(12459) assert_almost_equal(bsp.gauss_spline(0, 0), 1.381976597885342) assert_allclose(bsp.gauss_spline(array([1.]), 1), array([0.04865217])) def test_gauss_spline_list(self): # regression test for gh-12152 (accept array_like) knots = [-1.0, 0.0, -1.0] assert_almost_equal(bsp.gauss_spline(knots, 3), array([0.15418033, 0.6909883, 0.15418033])) def test_cubic(self): with suppress_warnings() as sup: sup.filter(DeprecationWarning) np.random.seed(12460) # Verify with theoretical results at integer points (see docstring) assert_allclose(bsp.cubic([-2, -1, 0, 1, 2]), array([0, 1, 4, 1, 0])/6.) def test_quadratic(self): np.random.seed(12461) with suppress_warnings() as sup: sup.filter(DeprecationWarning) # Verify correct results at integer points assert_allclose(bsp.quadratic([-2, -1, 0, 1, 2]), array([0, 1, 6, 1, 0])/8.) def test_cspline1d(self): np.random.seed(12462) assert_array_equal(bsp.cspline1d(array([0])), [0.]) c1d = array([1.21037185, 1.86293902, 2.98834059, 4.11660378, 4.78893826]) # test lamda != 0 assert_allclose(bsp.cspline1d(array([1., 2, 3, 4, 5]), 1), c1d) c1d0 = array([0.78683946, 2.05333735, 2.99981113, 3.94741812, 5.21051638]) assert_allclose(bsp.cspline1d(array([1., 2, 3, 4, 5])), c1d0) def test_qspline1d(self): np.random.seed(12463) assert_array_equal(bsp.qspline1d(array([0])), [0.]) # test lamda != 0 raises(ValueError, bsp.qspline1d, array([1., 2, 3, 4, 5]), 1.) raises(ValueError, bsp.qspline1d, array([1., 2, 3, 4, 5]), -1.) q1d0 = array([0.85350007, 2.02441743, 2.99999534, 3.97561055, 5.14634135]) assert_allclose(bsp.qspline1d(array([1., 2, 3, 4, 5])), q1d0) def test_cspline1d_eval(self): np.random.seed(12464) assert_allclose(bsp.cspline1d_eval(array([0., 0]), [0.]), array([0.])) assert_array_equal(bsp.cspline1d_eval(array([1., 0, 1]), []), array([])) x = [-3, -2, -1, 0, 1, 2, 3, 4, 5, 6] dx = x[1]-x[0] newx = [-6., -5.5, -5., -4.5, -4., -3.5, -3., -2.5, -2., -1.5, -1., -0.5, 0., 0.5, 1., 1.5, 2., 2.5, 3., 3.5, 4., 4.5, 5., 5.5, 6., 6.5, 7., 7.5, 8., 8.5, 9., 9.5, 10., 10.5, 11., 11.5, 12., 12.5] y = array([4.216, 6.864, 3.514, 6.203, 6.759, 7.433, 7.874, 5.879, 1.396, 4.094]) cj = bsp.cspline1d(y) newy = array([6.203, 4.41570658, 3.514, 5.16924703, 6.864, 6.04643068, 4.21600281, 6.04643068, 6.864, 5.16924703, 3.514, 4.41570658, 6.203, 6.80717667, 6.759, 6.98971173, 7.433, 7.79560142, 7.874, 7.41525761, 5.879, 3.18686814, 1.396, 2.24889482, 4.094, 2.24889482, 1.396, 3.18686814, 5.879, 7.41525761, 7.874, 7.79560142, 7.433, 6.98971173, 6.759, 6.80717667, 6.203, 4.41570658]) assert_allclose(bsp.cspline1d_eval(cj, newx, dx=dx, x0=x[0]), newy) def test_qspline1d_eval(self): np.random.seed(12465) assert_allclose(bsp.qspline1d_eval(array([0., 0]), [0.]), array([0.])) assert_array_equal(bsp.qspline1d_eval(array([1., 0, 1]), []), array([])) x = [-3, -2, -1, 0, 1, 2, 3, 4, 5, 6] dx = x[1]-x[0] newx = [-6., -5.5, -5., -4.5, -4., -3.5, -3., -2.5, -2., -1.5, -1., -0.5, 0., 0.5, 1., 1.5, 2., 2.5, 3., 3.5, 4., 4.5, 5., 5.5, 6., 6.5, 7., 7.5, 8., 8.5, 9., 9.5, 10., 10.5, 11., 11.5, 12., 12.5] y = array([4.216, 6.864, 3.514, 6.203, 6.759, 7.433, 7.874, 5.879, 1.396, 4.094]) cj = bsp.qspline1d(y) newy = array([6.203, 4.49418159, 3.514, 5.18390821, 6.864, 5.91436915, 4.21600002, 5.91436915, 6.864, 5.18390821, 3.514, 4.49418159, 6.203, 6.71900226, 6.759, 7.03980488, 7.433, 7.81016848, 7.874, 7.32718426, 5.879, 3.23872593, 1.396, 2.34046013, 4.094, 2.34046013, 1.396, 3.23872593, 5.879, 7.32718426, 7.874, 7.81016848, 7.433, 7.03980488, 6.759, 6.71900226, 6.203, 4.49418159]) assert_allclose(bsp.qspline1d_eval(cj, newx, dx=dx, x0=x[0]), newy) def test_sepfir2d_invalid_filter(): filt = np.array([1.0, 2.0, 4.0, 2.0, 1.0]) image = np.random.rand(7, 9) # No error for odd lengths signal.sepfir2d(image, filt, filt[2:]) # Row or column filter must be odd with pytest.raises(ValueError, match="odd length"): signal.sepfir2d(image, filt, filt[1:]) with pytest.raises(ValueError, match="odd length"): signal.sepfir2d(image, filt[1:], filt) # Filters must be 1-dimensional with pytest.raises(ValueError, match="object too deep"): signal.sepfir2d(image, filt.reshape(1, -1), filt) with pytest.raises(ValueError, match="object too deep"): signal.sepfir2d(image, filt, filt.reshape(1, -1)) def test_sepfir2d_invalid_image(): filt = np.array([1.0, 2.0, 4.0, 2.0, 1.0]) image = np.random.rand(8, 8) # Image must be 2 dimensional with pytest.raises(ValueError, match="object too deep"): signal.sepfir2d(image.reshape(4, 4, 4), filt, filt) with pytest.raises(ValueError, match="object of too small depth"): signal.sepfir2d(image[0], filt, filt) def test_cspline2d(): np.random.seed(181819142) image = np.random.rand(71, 73) signal.cspline2d(image, 8.0) def test_qspline2d(): np.random.seed(181819143) image = np.random.rand(71, 73) signal.qspline2d(image)
10,621
46.419643
79
py
scipy
scipy-main/scipy/signal/tests/test_upfirdn.py
# Code adapted from "upfirdn" python library with permission: # # Copyright (c) 2009, Motorola, Inc # # All Rights Reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # * Neither the name of Motorola nor the names of its contributors may be # used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import numpy as np from itertools import product from numpy.testing import assert_equal, assert_allclose from pytest import raises as assert_raises import pytest from scipy.signal import upfirdn, firwin from scipy.signal._upfirdn import _output_len, _upfirdn_modes from scipy.signal._upfirdn_apply import _pad_test def upfirdn_naive(x, h, up=1, down=1): """Naive upfirdn processing in Python. Note: arg order (x, h) differs to facilitate apply_along_axis use. """ h = np.asarray(h) out = np.zeros(len(x) * up, x.dtype) out[::up] = x out = np.convolve(h, out)[::down][:_output_len(len(h), len(x), up, down)] return out class UpFIRDnCase: """Test _UpFIRDn object""" def __init__(self, up, down, h, x_dtype): self.up = up self.down = down self.h = np.atleast_1d(h) self.x_dtype = x_dtype self.rng = np.random.RandomState(17) def __call__(self): # tiny signal self.scrub(np.ones(1, self.x_dtype)) # ones self.scrub(np.ones(10, self.x_dtype)) # ones # randn x = self.rng.randn(10).astype(self.x_dtype) if self.x_dtype in (np.complex64, np.complex128): x += 1j * self.rng.randn(10) self.scrub(x) # ramp self.scrub(np.arange(10).astype(self.x_dtype)) # 3D, random size = (2, 3, 5) x = self.rng.randn(*size).astype(self.x_dtype) if self.x_dtype in (np.complex64, np.complex128): x += 1j * self.rng.randn(*size) for axis in range(len(size)): self.scrub(x, axis=axis) x = x[:, ::2, 1::3].T for axis in range(len(size)): self.scrub(x, axis=axis) def scrub(self, x, axis=-1): yr = np.apply_along_axis(upfirdn_naive, axis, x, self.h, self.up, self.down) want_len = _output_len(len(self.h), x.shape[axis], self.up, self.down) assert yr.shape[axis] == want_len y = upfirdn(self.h, x, self.up, self.down, axis=axis) assert y.shape[axis] == want_len assert y.shape == yr.shape dtypes = (self.h.dtype, x.dtype) if all(d == np.complex64 for d in dtypes): assert_equal(y.dtype, np.complex64) elif np.complex64 in dtypes and np.float32 in dtypes: assert_equal(y.dtype, np.complex64) elif all(d == np.float32 for d in dtypes): assert_equal(y.dtype, np.float32) elif np.complex128 in dtypes or np.complex64 in dtypes: assert_equal(y.dtype, np.complex128) else: assert_equal(y.dtype, np.float64) assert_allclose(yr, y) _UPFIRDN_TYPES = (int, np.float32, np.complex64, float, complex) class TestUpfirdn: def test_valid_input(self): assert_raises(ValueError, upfirdn, [1], [1], 1, 0) # up or down < 1 assert_raises(ValueError, upfirdn, [], [1], 1, 1) # h.ndim != 1 assert_raises(ValueError, upfirdn, [[1]], [1], 1, 1) @pytest.mark.parametrize('len_h', [1, 2, 3, 4, 5]) @pytest.mark.parametrize('len_x', [1, 2, 3, 4, 5]) def test_singleton(self, len_h, len_x): # gh-9844: lengths producing expected outputs h = np.zeros(len_h) h[len_h // 2] = 1. # make h a delta x = np.ones(len_x) y = upfirdn(h, x, 1, 1) want = np.pad(x, (len_h // 2, (len_h - 1) // 2), 'constant') assert_allclose(y, want) def test_shift_x(self): # gh-9844: shifted x can change values? y = upfirdn([1, 1], [1.], 1, 1) assert_allclose(y, [1, 1]) # was [0, 1] in the issue y = upfirdn([1, 1], [0., 1.], 1, 1) assert_allclose(y, [0, 1, 1]) # A bunch of lengths/factors chosen because they exposed differences # between the "old way" and new way of computing length, and then # got `expected` from MATLAB @pytest.mark.parametrize('len_h, len_x, up, down, expected', [ (2, 2, 5, 2, [1, 0, 0, 0]), (2, 3, 6, 3, [1, 0, 1, 0, 1]), (2, 4, 4, 3, [1, 0, 0, 0, 1]), (3, 2, 6, 2, [1, 0, 0, 1, 0]), (4, 11, 3, 5, [1, 0, 0, 1, 0, 0, 1]), ]) def test_length_factors(self, len_h, len_x, up, down, expected): # gh-9844: weird factors h = np.zeros(len_h) h[0] = 1. x = np.ones(len_x) y = upfirdn(h, x, up, down) assert_allclose(y, expected) @pytest.mark.parametrize('down, want_len', [ # lengths from MATLAB (2, 5015), (11, 912), (79, 127), ]) def test_vs_convolve(self, down, want_len): # Check that up=1.0 gives same answer as convolve + slicing random_state = np.random.RandomState(17) try_types = (int, np.float32, np.complex64, float, complex) size = 10000 for dtype in try_types: x = random_state.randn(size).astype(dtype) if dtype in (np.complex64, np.complex128): x += 1j * random_state.randn(size) h = firwin(31, 1. / down, window='hamming') yl = upfirdn_naive(x, h, 1, down) y = upfirdn(h, x, up=1, down=down) assert y.shape == (want_len,) assert yl.shape[0] == y.shape[0] assert_allclose(yl, y, atol=1e-7, rtol=1e-7) @pytest.mark.parametrize('x_dtype', _UPFIRDN_TYPES) @pytest.mark.parametrize('h', (1., 1j)) @pytest.mark.parametrize('up, down', [(1, 1), (2, 2), (3, 2), (2, 3)]) def test_vs_naive_delta(self, x_dtype, h, up, down): UpFIRDnCase(up, down, h, x_dtype)() @pytest.mark.parametrize('x_dtype', _UPFIRDN_TYPES) @pytest.mark.parametrize('h_dtype', _UPFIRDN_TYPES) @pytest.mark.parametrize('p_max, q_max', list(product((10, 100), (10, 100)))) def test_vs_naive(self, x_dtype, h_dtype, p_max, q_max): tests = self._random_factors(p_max, q_max, h_dtype, x_dtype) for test in tests: test() def _random_factors(self, p_max, q_max, h_dtype, x_dtype): n_rep = 3 longest_h = 25 random_state = np.random.RandomState(17) tests = [] for _ in range(n_rep): # Randomize the up/down factors somewhat p_add = q_max if p_max > q_max else 1 q_add = p_max if q_max > p_max else 1 p = random_state.randint(p_max) + p_add q = random_state.randint(q_max) + q_add # Generate random FIR coefficients len_h = random_state.randint(longest_h) + 1 h = np.atleast_1d(random_state.randint(len_h)) h = h.astype(h_dtype) if h_dtype == complex: h += 1j * random_state.randint(len_h) tests.append(UpFIRDnCase(p, q, h, x_dtype)) return tests @pytest.mark.parametrize('mode', _upfirdn_modes) def test_extensions(self, mode): """Test vs. manually computed results for modes not in numpy's pad.""" x = np.array([1, 2, 3, 1], dtype=float) npre, npost = 6, 6 y = _pad_test(x, npre=npre, npost=npost, mode=mode) if mode == 'antisymmetric': y_expected = np.asarray( [3, 1, -1, -3, -2, -1, 1, 2, 3, 1, -1, -3, -2, -1, 1, 2]) elif mode == 'antireflect': y_expected = np.asarray( [1, 2, 3, 1, -1, 0, 1, 2, 3, 1, -1, 0, 1, 2, 3, 1]) elif mode == 'smooth': y_expected = np.asarray( [-5, -4, -3, -2, -1, 0, 1, 2, 3, 1, -1, -3, -5, -7, -9, -11]) elif mode == "line": lin_slope = (x[-1] - x[0]) / (len(x) - 1) left = x[0] + np.arange(-npre, 0, 1) * lin_slope right = x[-1] + np.arange(1, npost + 1) * lin_slope y_expected = np.concatenate((left, x, right)) else: y_expected = np.pad(x, (npre, npost), mode=mode) assert_allclose(y, y_expected) @pytest.mark.parametrize( 'size, h_len, mode, dtype', product( [8], [4, 5, 26], # include cases with h_len > 2*size _upfirdn_modes, [np.float32, np.float64, np.complex64, np.complex128], ) ) def test_modes(self, size, h_len, mode, dtype): random_state = np.random.RandomState(5) x = random_state.randn(size).astype(dtype) if dtype in (np.complex64, np.complex128): x += 1j * random_state.randn(size) h = np.arange(1, 1 + h_len, dtype=x.real.dtype) y = upfirdn(h, x, up=1, down=1, mode=mode) # expected result: pad the input, filter with zero padding, then crop npad = h_len - 1 if mode in ['antisymmetric', 'antireflect', 'smooth', 'line']: # use _pad_test test function for modes not supported by np.pad. xpad = _pad_test(x, npre=npad, npost=npad, mode=mode) else: xpad = np.pad(x, npad, mode=mode) ypad = upfirdn(h, xpad, up=1, down=1, mode='constant') y_expected = ypad[npad:-npad] atol = rtol = np.finfo(dtype).eps * 1e2 assert_allclose(y, y_expected, atol=atol, rtol=rtol) def test_output_len_long_input(): # Regression test for gh-17375. On Windows, a large enough input # that should have been well within the capabilities of 64 bit integers # would result in a 32 bit overflow because of a bug in Cython 0.29.32. len_h = 1001 in_len = 10**8 up = 320 down = 441 out_len = _output_len(len_h, in_len, up, down) # The expected value was computed "by hand" from the formula # (((in_len - 1) * up + len_h) - 1) // down + 1 assert out_len == 72562360
11,240
38.03125
78
py
scipy
scipy-main/scipy/signal/tests/__init__.py
0
0
0
py
scipy
scipy-main/scipy/signal/tests/test_savitzky_golay.py
import pytest import numpy as np from numpy.testing import (assert_allclose, assert_equal, assert_almost_equal, assert_array_equal, assert_array_almost_equal) from scipy.ndimage import convolve1d from scipy.signal import savgol_coeffs, savgol_filter from scipy.signal._savitzky_golay import _polyder def check_polyder(p, m, expected): dp = _polyder(p, m) assert_array_equal(dp, expected) def test_polyder(): cases = [ ([5], 0, [5]), ([5], 1, [0]), ([3, 2, 1], 0, [3, 2, 1]), ([3, 2, 1], 1, [6, 2]), ([3, 2, 1], 2, [6]), ([3, 2, 1], 3, [0]), ([[3, 2, 1], [5, 6, 7]], 0, [[3, 2, 1], [5, 6, 7]]), ([[3, 2, 1], [5, 6, 7]], 1, [[6, 2], [10, 6]]), ([[3, 2, 1], [5, 6, 7]], 2, [[6], [10]]), ([[3, 2, 1], [5, 6, 7]], 3, [[0], [0]]), ] for p, m, expected in cases: check_polyder(np.array(p).T, m, np.array(expected).T) #-------------------------------------------------------------------- # savgol_coeffs tests #-------------------------------------------------------------------- def alt_sg_coeffs(window_length, polyorder, pos): """This is an alternative implementation of the SG coefficients. It uses numpy.polyfit and numpy.polyval. The results should be equivalent to those of savgol_coeffs(), but this implementation is slower. window_length should be odd. """ if pos is None: pos = window_length // 2 t = np.arange(window_length) unit = (t == pos).astype(int) h = np.polyval(np.polyfit(t, unit, polyorder), t) return h def test_sg_coeffs_trivial(): # Test a trivial case of savgol_coeffs: polyorder = window_length - 1 h = savgol_coeffs(1, 0) assert_allclose(h, [1]) h = savgol_coeffs(3, 2) assert_allclose(h, [0, 1, 0], atol=1e-10) h = savgol_coeffs(5, 4) assert_allclose(h, [0, 0, 1, 0, 0], atol=1e-10) h = savgol_coeffs(5, 4, pos=1) assert_allclose(h, [0, 0, 0, 1, 0], atol=1e-10) h = savgol_coeffs(5, 4, pos=1, use='dot') assert_allclose(h, [0, 1, 0, 0, 0], atol=1e-10) def compare_coeffs_to_alt(window_length, order): # For the given window_length and order, compare the results # of savgol_coeffs and alt_sg_coeffs for pos from 0 to window_length - 1. # Also include pos=None. for pos in [None] + list(range(window_length)): h1 = savgol_coeffs(window_length, order, pos=pos, use='dot') h2 = alt_sg_coeffs(window_length, order, pos=pos) assert_allclose(h1, h2, atol=1e-10, err_msg=("window_length = %d, order = %d, pos = %s" % (window_length, order, pos))) def test_sg_coeffs_compare(): # Compare savgol_coeffs() to alt_sg_coeffs(). for window_length in range(1, 8, 2): for order in range(window_length): compare_coeffs_to_alt(window_length, order) def test_sg_coeffs_exact(): polyorder = 4 window_length = 9 halflen = window_length // 2 x = np.linspace(0, 21, 43) delta = x[1] - x[0] # The data is a cubic polynomial. We'll use an order 4 # SG filter, so the filtered values should equal the input data # (except within half window_length of the edges). y = 0.5 * x ** 3 - x h = savgol_coeffs(window_length, polyorder) y0 = convolve1d(y, h) assert_allclose(y0[halflen:-halflen], y[halflen:-halflen]) # Check the same input, but use deriv=1. dy is the exact result. dy = 1.5 * x ** 2 - 1 h = savgol_coeffs(window_length, polyorder, deriv=1, delta=delta) y1 = convolve1d(y, h) assert_allclose(y1[halflen:-halflen], dy[halflen:-halflen]) # Check the same input, but use deriv=2. d2y is the exact result. d2y = 3.0 * x h = savgol_coeffs(window_length, polyorder, deriv=2, delta=delta) y2 = convolve1d(y, h) assert_allclose(y2[halflen:-halflen], d2y[halflen:-halflen]) def test_sg_coeffs_deriv(): # The data in `x` is a sampled parabola, so using savgol_coeffs with an # order 2 or higher polynomial should give exact results. i = np.array([-2.0, 0.0, 2.0, 4.0, 6.0]) x = i ** 2 / 4 dx = i / 2 d2x = np.full_like(i, 0.5) for pos in range(x.size): coeffs0 = savgol_coeffs(5, 3, pos=pos, delta=2.0, use='dot') assert_allclose(coeffs0.dot(x), x[pos], atol=1e-10) coeffs1 = savgol_coeffs(5, 3, pos=pos, delta=2.0, use='dot', deriv=1) assert_allclose(coeffs1.dot(x), dx[pos], atol=1e-10) coeffs2 = savgol_coeffs(5, 3, pos=pos, delta=2.0, use='dot', deriv=2) assert_allclose(coeffs2.dot(x), d2x[pos], atol=1e-10) def test_sg_coeffs_deriv_gt_polyorder(): """ If deriv > polyorder, the coefficients should be all 0. This is a regression test for a bug where, e.g., savgol_coeffs(5, polyorder=1, deriv=2) raised an error. """ coeffs = savgol_coeffs(5, polyorder=1, deriv=2) assert_array_equal(coeffs, np.zeros(5)) coeffs = savgol_coeffs(7, polyorder=4, deriv=6) assert_array_equal(coeffs, np.zeros(7)) def test_sg_coeffs_large(): # Test that for large values of window_length and polyorder the array of # coefficients returned is symmetric. The aim is to ensure that # no potential numeric overflow occurs. coeffs0 = savgol_coeffs(31, 9) assert_array_almost_equal(coeffs0, coeffs0[::-1]) coeffs1 = savgol_coeffs(31, 9, deriv=1) assert_array_almost_equal(coeffs1, -coeffs1[::-1]) # -------------------------------------------------------------------- # savgol_coeffs tests for even window length # -------------------------------------------------------------------- def test_sg_coeffs_even_window_length(): # Simple case - deriv=0, polyorder=0, 1 window_lengths = [4, 6, 8, 10, 12, 14, 16] for length in window_lengths: h_p_d = savgol_coeffs(length, 0, 0) assert_allclose(h_p_d, 1/length) # Verify with closed forms # deriv=1, polyorder=1, 2 def h_p_d_closed_form_1(k, m): return 6*(k - 0.5)/((2*m + 1)*m*(2*m - 1)) # deriv=2, polyorder=2 def h_p_d_closed_form_2(k, m): numer = 15*(-4*m**2 + 1 + 12*(k - 0.5)**2) denom = 4*(2*m + 1)*(m + 1)*m*(m - 1)*(2*m - 1) return numer/denom for length in window_lengths: m = length//2 expected_output = [h_p_d_closed_form_1(k, m) for k in range(-m + 1, m + 1)][::-1] actual_output = savgol_coeffs(length, 1, 1) assert_allclose(expected_output, actual_output) actual_output = savgol_coeffs(length, 2, 1) assert_allclose(expected_output, actual_output) expected_output = [h_p_d_closed_form_2(k, m) for k in range(-m + 1, m + 1)][::-1] actual_output = savgol_coeffs(length, 2, 2) assert_allclose(expected_output, actual_output) actual_output = savgol_coeffs(length, 3, 2) assert_allclose(expected_output, actual_output) #-------------------------------------------------------------------- # savgol_filter tests #-------------------------------------------------------------------- def test_sg_filter_trivial(): """ Test some trivial edge cases for savgol_filter().""" x = np.array([1.0]) y = savgol_filter(x, 1, 0) assert_equal(y, [1.0]) # Input is a single value. With a window length of 3 and polyorder 1, # the value in y is from the straight-line fit of (-1,0), (0,3) and # (1, 0) at 0. This is just the average of the three values, hence 1.0. x = np.array([3.0]) y = savgol_filter(x, 3, 1, mode='constant') assert_almost_equal(y, [1.0], decimal=15) x = np.array([3.0]) y = savgol_filter(x, 3, 1, mode='nearest') assert_almost_equal(y, [3.0], decimal=15) x = np.array([1.0] * 3) y = savgol_filter(x, 3, 1, mode='wrap') assert_almost_equal(y, [1.0, 1.0, 1.0], decimal=15) def test_sg_filter_basic(): # Some basic test cases for savgol_filter(). x = np.array([1.0, 2.0, 1.0]) y = savgol_filter(x, 3, 1, mode='constant') assert_allclose(y, [1.0, 4.0 / 3, 1.0]) y = savgol_filter(x, 3, 1, mode='mirror') assert_allclose(y, [5.0 / 3, 4.0 / 3, 5.0 / 3]) y = savgol_filter(x, 3, 1, mode='wrap') assert_allclose(y, [4.0 / 3, 4.0 / 3, 4.0 / 3]) def test_sg_filter_2d(): x = np.array([[1.0, 2.0, 1.0], [2.0, 4.0, 2.0]]) expected = np.array([[1.0, 4.0 / 3, 1.0], [2.0, 8.0 / 3, 2.0]]) y = savgol_filter(x, 3, 1, mode='constant') assert_allclose(y, expected) y = savgol_filter(x.T, 3, 1, mode='constant', axis=0) assert_allclose(y, expected.T) def test_sg_filter_interp_edges(): # Another test with low degree polynomial data, for which we can easily # give the exact results. In this test, we use mode='interp', so # savgol_filter should match the exact solution for the entire data set, # including the edges. t = np.linspace(-5, 5, 21) delta = t[1] - t[0] # Polynomial test data. x = np.array([t, 3 * t ** 2, t ** 3 - t]) dx = np.array([np.ones_like(t), 6 * t, 3 * t ** 2 - 1.0]) d2x = np.array([np.zeros_like(t), np.full_like(t, 6), 6 * t]) window_length = 7 y = savgol_filter(x, window_length, 3, axis=-1, mode='interp') assert_allclose(y, x, atol=1e-12) y1 = savgol_filter(x, window_length, 3, axis=-1, mode='interp', deriv=1, delta=delta) assert_allclose(y1, dx, atol=1e-12) y2 = savgol_filter(x, window_length, 3, axis=-1, mode='interp', deriv=2, delta=delta) assert_allclose(y2, d2x, atol=1e-12) # Transpose everything, and test again with axis=0. x = x.T dx = dx.T d2x = d2x.T y = savgol_filter(x, window_length, 3, axis=0, mode='interp') assert_allclose(y, x, atol=1e-12) y1 = savgol_filter(x, window_length, 3, axis=0, mode='interp', deriv=1, delta=delta) assert_allclose(y1, dx, atol=1e-12) y2 = savgol_filter(x, window_length, 3, axis=0, mode='interp', deriv=2, delta=delta) assert_allclose(y2, d2x, atol=1e-12) def test_sg_filter_interp_edges_3d(): # Test mode='interp' with a 3-D array. t = np.linspace(-5, 5, 21) delta = t[1] - t[0] x1 = np.array([t, -t]) x2 = np.array([t ** 2, 3 * t ** 2 + 5]) x3 = np.array([t ** 3, 2 * t ** 3 + t ** 2 - 0.5 * t]) dx1 = np.array([np.ones_like(t), -np.ones_like(t)]) dx2 = np.array([2 * t, 6 * t]) dx3 = np.array([3 * t ** 2, 6 * t ** 2 + 2 * t - 0.5]) # z has shape (3, 2, 21) z = np.array([x1, x2, x3]) dz = np.array([dx1, dx2, dx3]) y = savgol_filter(z, 7, 3, axis=-1, mode='interp', delta=delta) assert_allclose(y, z, atol=1e-10) dy = savgol_filter(z, 7, 3, axis=-1, mode='interp', deriv=1, delta=delta) assert_allclose(dy, dz, atol=1e-10) # z has shape (3, 21, 2) z = np.array([x1.T, x2.T, x3.T]) dz = np.array([dx1.T, dx2.T, dx3.T]) y = savgol_filter(z, 7, 3, axis=1, mode='interp', delta=delta) assert_allclose(y, z, atol=1e-10) dy = savgol_filter(z, 7, 3, axis=1, mode='interp', deriv=1, delta=delta) assert_allclose(dy, dz, atol=1e-10) # z has shape (21, 3, 2) z = z.swapaxes(0, 1).copy() dz = dz.swapaxes(0, 1).copy() y = savgol_filter(z, 7, 3, axis=0, mode='interp', delta=delta) assert_allclose(y, z, atol=1e-10) dy = savgol_filter(z, 7, 3, axis=0, mode='interp', deriv=1, delta=delta) assert_allclose(dy, dz, atol=1e-10) def test_sg_filter_valid_window_length_3d(): """Tests that the window_length check is using the correct axis.""" x = np.ones((10, 20, 30)) savgol_filter(x, window_length=29, polyorder=3, mode='interp') with pytest.raises(ValueError, match='window_length must be less than'): # window_length is more than x.shape[-1]. savgol_filter(x, window_length=31, polyorder=3, mode='interp') savgol_filter(x, window_length=9, polyorder=3, axis=0, mode='interp') with pytest.raises(ValueError, match='window_length must be less than'): # window_length is more than x.shape[0]. savgol_filter(x, window_length=11, polyorder=3, axis=0, mode='interp')
12,424
33.610028
78
py
scipy
scipy-main/scipy/signal/tests/mpsig.py
""" Some signal functions implemented using mpmath. """ try: import mpmath except ImportError: mpmath = None def _prod(seq): """Returns the product of the elements in the sequence `seq`.""" p = 1 for elem in seq: p *= elem return p def _relative_degree(z, p): """ Return relative degree of transfer function from zeros and poles. This is simply len(p) - len(z), which must be nonnegative. A ValueError is raised if len(p) < len(z). """ degree = len(p) - len(z) if degree < 0: raise ValueError("Improper transfer function. " "Must have at least as many poles as zeros.") return degree def _zpkbilinear(z, p, k, fs): """Bilinear transformation to convert a filter from analog to digital.""" degree = _relative_degree(z, p) fs2 = 2*fs # Bilinear transform the poles and zeros z_z = [(fs2 + z1) / (fs2 - z1) for z1 in z] p_z = [(fs2 + p1) / (fs2 - p1) for p1 in p] # Any zeros that were at infinity get moved to the Nyquist frequency z_z.extend([-1] * degree) # Compensate for gain change numer = _prod(fs2 - z1 for z1 in z) denom = _prod(fs2 - p1 for p1 in p) k_z = k * numer / denom return z_z, p_z, k_z.real def _zpklp2lp(z, p, k, wo=1): """Transform a lowpass filter to a different cutoff frequency.""" degree = _relative_degree(z, p) # Scale all points radially from origin to shift cutoff frequency z_lp = [wo * z1 for z1 in z] p_lp = [wo * p1 for p1 in p] # Each shifted pole decreases gain by wo, each shifted zero increases it. # Cancel out the net change to keep overall gain the same k_lp = k * wo**degree return z_lp, p_lp, k_lp def _butter_analog_poles(n): """ Poles of an analog Butterworth lowpass filter. This is the same calculation as scipy.signal.buttap(n) or scipy.signal.butter(n, 1, analog=True, output='zpk'), but mpmath is used, and only the poles are returned. """ poles = [-mpmath.exp(1j*mpmath.pi*k/(2*n)) for k in range(-n+1, n, 2)] return poles def butter_lp(n, Wn): """ Lowpass Butterworth digital filter design. This computes the same result as scipy.signal.butter(n, Wn, output='zpk'), but it uses mpmath, and the results are returned in lists instead of NumPy arrays. """ zeros = [] poles = _butter_analog_poles(n) k = 1 fs = 2 warped = 2 * fs * mpmath.tan(mpmath.pi * Wn / fs) z, p, k = _zpklp2lp(zeros, poles, k, wo=warped) z, p, k = _zpkbilinear(z, p, k, fs=fs) return z, p, k def zpkfreqz(z, p, k, worN=None): """ Frequency response of a filter in zpk format, using mpmath. This is the same calculation as scipy.signal.freqz, but the input is in zpk format, the calculation is performed using mpath, and the results are returned in lists instead of NumPy arrays. """ if worN is None or isinstance(worN, int): N = worN or 512 ws = [mpmath.pi * mpmath.mpf(j) / N for j in range(N)] else: ws = worN h = [] for wk in ws: zm1 = mpmath.exp(1j * wk) numer = _prod([zm1 - t for t in z]) denom = _prod([zm1 - t for t in p]) hk = k * numer / denom h.append(hk) return ws, h
3,308
25.902439
78
py
scipy
scipy-main/scipy/signal/tests/test_cont2discrete.py
import numpy as np from numpy.testing import \ assert_array_almost_equal, assert_almost_equal, \ assert_allclose, assert_equal import pytest from scipy.signal import cont2discrete as c2d from scipy.signal import dlsim, ss2tf, ss2zpk, lsim, lti from scipy.signal import tf2ss, impulse, dimpulse, step, dstep # Author: Jeffrey Armstrong <jeff@approximatrix.com> # March 29, 2011 class TestC2D: def test_zoh(self): ac = np.eye(2) bc = np.full((2, 1), 0.5) cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]]) dc = np.array([[0.0], [0.0], [-0.33]]) ad_truth = 1.648721270700128 * np.eye(2) bd_truth = np.full((2, 1), 0.324360635350064) # c and d in discrete should be equal to their continuous counterparts dt_requested = 0.5 ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested, method='zoh') assert_array_almost_equal(ad_truth, ad) assert_array_almost_equal(bd_truth, bd) assert_array_almost_equal(cc, cd) assert_array_almost_equal(dc, dd) assert_almost_equal(dt_requested, dt) def test_foh(self): ac = np.eye(2) bc = np.full((2, 1), 0.5) cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]]) dc = np.array([[0.0], [0.0], [-0.33]]) # True values are verified with Matlab ad_truth = 1.648721270700128 * np.eye(2) bd_truth = np.full((2, 1), 0.420839287058789) cd_truth = cc dd_truth = np.array([[0.260262223725224], [0.297442541400256], [-0.144098411624840]]) dt_requested = 0.5 ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested, method='foh') assert_array_almost_equal(ad_truth, ad) assert_array_almost_equal(bd_truth, bd) assert_array_almost_equal(cd_truth, cd) assert_array_almost_equal(dd_truth, dd) assert_almost_equal(dt_requested, dt) def test_impulse(self): ac = np.eye(2) bc = np.full((2, 1), 0.5) cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]]) dc = np.array([[0.0], [0.0], [0.0]]) # True values are verified with Matlab ad_truth = 1.648721270700128 * np.eye(2) bd_truth = np.full((2, 1), 0.412180317675032) cd_truth = cc dd_truth = np.array([[0.4375], [0.5], [0.3125]]) dt_requested = 0.5 ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested, method='impulse') assert_array_almost_equal(ad_truth, ad) assert_array_almost_equal(bd_truth, bd) assert_array_almost_equal(cd_truth, cd) assert_array_almost_equal(dd_truth, dd) assert_almost_equal(dt_requested, dt) def test_gbt(self): ac = np.eye(2) bc = np.full((2, 1), 0.5) cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]]) dc = np.array([[0.0], [0.0], [-0.33]]) dt_requested = 0.5 alpha = 1.0 / 3.0 ad_truth = 1.6 * np.eye(2) bd_truth = np.full((2, 1), 0.3) cd_truth = np.array([[0.9, 1.2], [1.2, 1.2], [1.2, 0.3]]) dd_truth = np.array([[0.175], [0.2], [-0.205]]) ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested, method='gbt', alpha=alpha) assert_array_almost_equal(ad_truth, ad) assert_array_almost_equal(bd_truth, bd) assert_array_almost_equal(cd_truth, cd) assert_array_almost_equal(dd_truth, dd) def test_euler(self): ac = np.eye(2) bc = np.full((2, 1), 0.5) cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]]) dc = np.array([[0.0], [0.0], [-0.33]]) dt_requested = 0.5 ad_truth = 1.5 * np.eye(2) bd_truth = np.full((2, 1), 0.25) cd_truth = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]]) dd_truth = dc ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested, method='euler') assert_array_almost_equal(ad_truth, ad) assert_array_almost_equal(bd_truth, bd) assert_array_almost_equal(cd_truth, cd) assert_array_almost_equal(dd_truth, dd) assert_almost_equal(dt_requested, dt) def test_backward_diff(self): ac = np.eye(2) bc = np.full((2, 1), 0.5) cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]]) dc = np.array([[0.0], [0.0], [-0.33]]) dt_requested = 0.5 ad_truth = 2.0 * np.eye(2) bd_truth = np.full((2, 1), 0.5) cd_truth = np.array([[1.5, 2.0], [2.0, 2.0], [2.0, 0.5]]) dd_truth = np.array([[0.875], [1.0], [0.295]]) ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested, method='backward_diff') assert_array_almost_equal(ad_truth, ad) assert_array_almost_equal(bd_truth, bd) assert_array_almost_equal(cd_truth, cd) assert_array_almost_equal(dd_truth, dd) def test_bilinear(self): ac = np.eye(2) bc = np.full((2, 1), 0.5) cc = np.array([[0.75, 1.0], [1.0, 1.0], [1.0, 0.25]]) dc = np.array([[0.0], [0.0], [-0.33]]) dt_requested = 0.5 ad_truth = (5.0 / 3.0) * np.eye(2) bd_truth = np.full((2, 1), 1.0 / 3.0) cd_truth = np.array([[1.0, 4.0 / 3.0], [4.0 / 3.0, 4.0 / 3.0], [4.0 / 3.0, 1.0 / 3.0]]) dd_truth = np.array([[0.291666666666667], [1.0 / 3.0], [-0.121666666666667]]) ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested, method='bilinear') assert_array_almost_equal(ad_truth, ad) assert_array_almost_equal(bd_truth, bd) assert_array_almost_equal(cd_truth, cd) assert_array_almost_equal(dd_truth, dd) assert_almost_equal(dt_requested, dt) # Same continuous system again, but change sampling rate ad_truth = 1.4 * np.eye(2) bd_truth = np.full((2, 1), 0.2) cd_truth = np.array([[0.9, 1.2], [1.2, 1.2], [1.2, 0.3]]) dd_truth = np.array([[0.175], [0.2], [-0.205]]) dt_requested = 1.0 / 3.0 ad, bd, cd, dd, dt = c2d((ac, bc, cc, dc), dt_requested, method='bilinear') assert_array_almost_equal(ad_truth, ad) assert_array_almost_equal(bd_truth, bd) assert_array_almost_equal(cd_truth, cd) assert_array_almost_equal(dd_truth, dd) assert_almost_equal(dt_requested, dt) def test_transferfunction(self): numc = np.array([0.25, 0.25, 0.5]) denc = np.array([0.75, 0.75, 1.0]) numd = np.array([[1.0 / 3.0, -0.427419169438754, 0.221654141101125]]) dend = np.array([1.0, -1.351394049721225, 0.606530659712634]) dt_requested = 0.5 num, den, dt = c2d((numc, denc), dt_requested, method='zoh') assert_array_almost_equal(numd, num) assert_array_almost_equal(dend, den) assert_almost_equal(dt_requested, dt) def test_zerospolesgain(self): zeros_c = np.array([0.5, -0.5]) poles_c = np.array([1.j / np.sqrt(2), -1.j / np.sqrt(2)]) k_c = 1.0 zeros_d = [1.23371727305860, 0.735356894461267] polls_d = [0.938148335039729 + 0.346233593780536j, 0.938148335039729 - 0.346233593780536j] k_d = 1.0 dt_requested = 0.5 zeros, poles, k, dt = c2d((zeros_c, poles_c, k_c), dt_requested, method='zoh') assert_array_almost_equal(zeros_d, zeros) assert_array_almost_equal(polls_d, poles) assert_almost_equal(k_d, k) assert_almost_equal(dt_requested, dt) def test_gbt_with_sio_tf_and_zpk(self): """Test method='gbt' with alpha=0.25 for tf and zpk cases.""" # State space coefficients for the continuous SIO system. A = -1.0 B = 1.0 C = 1.0 D = 0.5 # The continuous transfer function coefficients. cnum, cden = ss2tf(A, B, C, D) # Continuous zpk representation cz, cp, ck = ss2zpk(A, B, C, D) h = 1.0 alpha = 0.25 # Explicit formulas, in the scalar case. Ad = (1 + (1 - alpha) * h * A) / (1 - alpha * h * A) Bd = h * B / (1 - alpha * h * A) Cd = C / (1 - alpha * h * A) Dd = D + alpha * C * Bd # Convert the explicit solution to tf dnum, dden = ss2tf(Ad, Bd, Cd, Dd) # Compute the discrete tf using cont2discrete. c2dnum, c2dden, dt = c2d((cnum, cden), h, method='gbt', alpha=alpha) assert_allclose(dnum, c2dnum) assert_allclose(dden, c2dden) # Convert explicit solution to zpk. dz, dp, dk = ss2zpk(Ad, Bd, Cd, Dd) # Compute the discrete zpk using cont2discrete. c2dz, c2dp, c2dk, dt = c2d((cz, cp, ck), h, method='gbt', alpha=alpha) assert_allclose(dz, c2dz) assert_allclose(dp, c2dp) assert_allclose(dk, c2dk) def test_discrete_approx(self): """ Test that the solution to the discrete approximation of a continuous system actually approximates the solution to the continuous system. This is an indirect test of the correctness of the implementation of cont2discrete. """ def u(t): return np.sin(2.5 * t) a = np.array([[-0.01]]) b = np.array([[1.0]]) c = np.array([[1.0]]) d = np.array([[0.2]]) x0 = 1.0 t = np.linspace(0, 10.0, 101) dt = t[1] - t[0] u1 = u(t) # Use lsim to compute the solution to the continuous system. t, yout, xout = lsim((a, b, c, d), T=t, U=u1, X0=x0) # Convert the continuous system to a discrete approximation. dsys = c2d((a, b, c, d), dt, method='bilinear') # Use dlsim with the pairwise averaged input to compute the output # of the discrete system. u2 = 0.5 * (u1[:-1] + u1[1:]) t2 = t[:-1] td2, yd2, xd2 = dlsim(dsys, u=u2.reshape(-1, 1), t=t2, x0=x0) # ymid is the average of consecutive terms of the "exact" output # computed by lsim2. This is what the discrete approximation # actually approximates. ymid = 0.5 * (yout[:-1] + yout[1:]) assert_allclose(yd2.ravel(), ymid, rtol=1e-4) def test_simo_tf(self): # See gh-5753 tf = ([[1, 0], [1, 1]], [1, 1]) num, den, dt = c2d(tf, 0.01) assert_equal(dt, 0.01) # sanity check assert_allclose(den, [1, -0.990404983], rtol=1e-3) assert_allclose(num, [[1, -1], [1, -0.99004983]], rtol=1e-3) def test_multioutput(self): ts = 0.01 # time step tf = ([[1, -3], [1, 5]], [1, 1]) num, den, dt = c2d(tf, ts) tf1 = (tf[0][0], tf[1]) num1, den1, dt1 = c2d(tf1, ts) tf2 = (tf[0][1], tf[1]) num2, den2, dt2 = c2d(tf2, ts) # Sanity checks assert_equal(dt, dt1) assert_equal(dt, dt2) # Check that we get the same results assert_allclose(num, np.vstack((num1, num2)), rtol=1e-13) # Single input, so the denominator should # not be multidimensional like the numerator assert_allclose(den, den1, rtol=1e-13) assert_allclose(den, den2, rtol=1e-13) class TestC2dLti: def test_c2d_ss(self): # StateSpace A = np.array([[-0.3, 0.1], [0.2, -0.7]]) B = np.array([[0], [1]]) C = np.array([[1, 0]]) D = 0 A_res = np.array([[0.985136404135682, 0.004876671474795], [0.009753342949590, 0.965629718236502]]) B_res = np.array([[0.000122937599964], [0.049135527547844]]) sys_ssc = lti(A, B, C, D) sys_ssd = sys_ssc.to_discrete(0.05) assert_allclose(sys_ssd.A, A_res) assert_allclose(sys_ssd.B, B_res) assert_allclose(sys_ssd.C, C) assert_allclose(sys_ssd.D, D) def test_c2d_tf(self): sys = lti([0.5, 0.3], [1.0, 0.4]) sys = sys.to_discrete(0.005) # Matlab results num_res = np.array([0.5, -0.485149004980066]) den_res = np.array([1.0, -0.980198673306755]) # Somehow a lot of numerical errors assert_allclose(sys.den, den_res, atol=0.02) assert_allclose(sys.num, num_res, atol=0.02) class TestC2dInvariants: # Some test cases for checking the invariances. # Array of triplets: (system, sample time, number of samples) cases = [ (tf2ss([1, 1], [1, 1.5, 1]), 0.25, 10), (tf2ss([1, 2], [1, 1.5, 3, 1]), 0.5, 10), (tf2ss(0.1, [1, 1, 2, 1]), 0.5, 10), ] # Check that systems discretized with the impulse-invariant # method really hold the invariant @pytest.mark.parametrize("sys,sample_time,samples_number", cases) def test_impulse_invariant(self, sys, sample_time, samples_number): time = np.arange(samples_number) * sample_time _, yout_cont = impulse(sys, T=time) _, yout_disc = dimpulse(c2d(sys, sample_time, method='impulse'), n=len(time)) assert_allclose(sample_time * yout_cont.ravel(), yout_disc[0].ravel()) # Step invariant should hold for ZOH discretized systems @pytest.mark.parametrize("sys,sample_time,samples_number", cases) def test_step_invariant(self, sys, sample_time, samples_number): time = np.arange(samples_number) * sample_time _, yout_cont = step(sys, T=time) _, yout_disc = dstep(c2d(sys, sample_time, method='zoh'), n=len(time)) assert_allclose(yout_cont.ravel(), yout_disc[0].ravel()) # Linear invariant should hold for FOH discretized systems @pytest.mark.parametrize("sys,sample_time,samples_number", cases) def test_linear_invariant(self, sys, sample_time, samples_number): time = np.arange(samples_number) * sample_time _, yout_cont, _ = lsim(sys, T=time, U=time) _, yout_disc, _ = dlsim(c2d(sys, sample_time, method='foh'), u=time) assert_allclose(yout_cont.ravel(), yout_disc.ravel())
14,605
34.026379
78
py
scipy
scipy-main/scipy/signal/tests/test_fir_filter_design.py
import numpy as np from numpy.testing import (assert_almost_equal, assert_array_almost_equal, assert_equal, assert_, assert_allclose, assert_warns) from pytest import raises as assert_raises import pytest from scipy.fft import fft from scipy.special import sinc from scipy.signal import kaiser_beta, kaiser_atten, kaiserord, \ firwin, firwin2, freqz, remez, firls, minimum_phase def test_kaiser_beta(): b = kaiser_beta(58.7) assert_almost_equal(b, 0.1102 * 50.0) b = kaiser_beta(22.0) assert_almost_equal(b, 0.5842 + 0.07886) b = kaiser_beta(21.0) assert_equal(b, 0.0) b = kaiser_beta(10.0) assert_equal(b, 0.0) def test_kaiser_atten(): a = kaiser_atten(1, 1.0) assert_equal(a, 7.95) a = kaiser_atten(2, 1/np.pi) assert_equal(a, 2.285 + 7.95) def test_kaiserord(): assert_raises(ValueError, kaiserord, 1.0, 1.0) numtaps, beta = kaiserord(2.285 + 7.95 - 0.001, 1/np.pi) assert_equal((numtaps, beta), (2, 0.0)) class TestFirwin: def check_response(self, h, expected_response, tol=.05): N = len(h) alpha = 0.5 * (N-1) m = np.arange(0,N) - alpha # time indices of taps for freq, expected in expected_response: actual = abs(np.sum(h*np.exp(-1.j*np.pi*m*freq))) mse = abs(actual-expected)**2 assert_(mse < tol, 'response not as expected, mse=%g > %g' % (mse, tol)) def test_response(self): N = 51 f = .5 # increase length just to try even/odd h = firwin(N, f) # low-pass from 0 to f self.check_response(h, [(.25,1), (.75,0)]) h = firwin(N+1, f, window='nuttall') # specific window self.check_response(h, [(.25,1), (.75,0)]) h = firwin(N+2, f, pass_zero=False) # stop from 0 to f --> high-pass self.check_response(h, [(.25,0), (.75,1)]) f1, f2, f3, f4 = .2, .4, .6, .8 h = firwin(N+3, [f1, f2], pass_zero=False) # band-pass filter self.check_response(h, [(.1,0), (.3,1), (.5,0)]) h = firwin(N+4, [f1, f2]) # band-stop filter self.check_response(h, [(.1,1), (.3,0), (.5,1)]) h = firwin(N+5, [f1, f2, f3, f4], pass_zero=False, scale=False) self.check_response(h, [(.1,0), (.3,1), (.5,0), (.7,1), (.9,0)]) h = firwin(N+6, [f1, f2, f3, f4]) # multiband filter self.check_response(h, [(.1,1), (.3,0), (.5,1), (.7,0), (.9,1)]) h = firwin(N+7, 0.1, width=.03) # low-pass self.check_response(h, [(.05,1), (.75,0)]) h = firwin(N+8, 0.1, pass_zero=False) # high-pass self.check_response(h, [(.05,0), (.75,1)]) def mse(self, h, bands): """Compute mean squared error versus ideal response across frequency band. h -- coefficients bands -- list of (left, right) tuples relative to 1==Nyquist of passbands """ w, H = freqz(h, worN=1024) f = w/np.pi passIndicator = np.zeros(len(w), bool) for left, right in bands: passIndicator |= (f >= left) & (f < right) Hideal = np.where(passIndicator, 1, 0) mse = np.mean(abs(abs(H)-Hideal)**2) return mse def test_scaling(self): """ For one lowpass, bandpass, and highpass example filter, this test checks two things: - the mean squared error over the frequency domain of the unscaled filter is smaller than the scaled filter (true for rectangular window) - the response of the scaled filter is exactly unity at the center of the first passband """ N = 11 cases = [ ([.5], True, (0, 1)), ([0.2, .6], False, (.4, 1)), ([.5], False, (1, 1)), ] for cutoff, pass_zero, expected_response in cases: h = firwin(N, cutoff, scale=False, pass_zero=pass_zero, window='ones') hs = firwin(N, cutoff, scale=True, pass_zero=pass_zero, window='ones') if len(cutoff) == 1: if pass_zero: cutoff = [0] + cutoff else: cutoff = cutoff + [1] assert_(self.mse(h, [cutoff]) < self.mse(hs, [cutoff]), 'least squares violation') self.check_response(hs, [expected_response], 1e-12) class TestFirWinMore: """Different author, different style, different tests...""" def test_lowpass(self): width = 0.04 ntaps, beta = kaiserord(120, width) kwargs = dict(cutoff=0.5, window=('kaiser', beta), scale=False) taps = firwin(ntaps, **kwargs) # Check the symmetry of taps. assert_array_almost_equal(taps[:ntaps//2], taps[ntaps:ntaps-ntaps//2-1:-1]) # Check the gain at a few samples where we know it should be approximately 0 or 1. freq_samples = np.array([0.0, 0.25, 0.5-width/2, 0.5+width/2, 0.75, 1.0]) freqs, response = freqz(taps, worN=np.pi*freq_samples) assert_array_almost_equal(np.abs(response), [1.0, 1.0, 1.0, 0.0, 0.0, 0.0], decimal=5) taps_str = firwin(ntaps, pass_zero='lowpass', **kwargs) assert_allclose(taps, taps_str) def test_highpass(self): width = 0.04 ntaps, beta = kaiserord(120, width) # Ensure that ntaps is odd. ntaps |= 1 kwargs = dict(cutoff=0.5, window=('kaiser', beta), scale=False) taps = firwin(ntaps, pass_zero=False, **kwargs) # Check the symmetry of taps. assert_array_almost_equal(taps[:ntaps//2], taps[ntaps:ntaps-ntaps//2-1:-1]) # Check the gain at a few samples where we know it should be approximately 0 or 1. freq_samples = np.array([0.0, 0.25, 0.5-width/2, 0.5+width/2, 0.75, 1.0]) freqs, response = freqz(taps, worN=np.pi*freq_samples) assert_array_almost_equal(np.abs(response), [0.0, 0.0, 0.0, 1.0, 1.0, 1.0], decimal=5) taps_str = firwin(ntaps, pass_zero='highpass', **kwargs) assert_allclose(taps, taps_str) def test_bandpass(self): width = 0.04 ntaps, beta = kaiserord(120, width) kwargs = dict(cutoff=[0.3, 0.7], window=('kaiser', beta), scale=False) taps = firwin(ntaps, pass_zero=False, **kwargs) # Check the symmetry of taps. assert_array_almost_equal(taps[:ntaps//2], taps[ntaps:ntaps-ntaps//2-1:-1]) # Check the gain at a few samples where we know it should be approximately 0 or 1. freq_samples = np.array([0.0, 0.2, 0.3-width/2, 0.3+width/2, 0.5, 0.7-width/2, 0.7+width/2, 0.8, 1.0]) freqs, response = freqz(taps, worN=np.pi*freq_samples) assert_array_almost_equal(np.abs(response), [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0], decimal=5) taps_str = firwin(ntaps, pass_zero='bandpass', **kwargs) assert_allclose(taps, taps_str) def test_bandstop_multi(self): width = 0.04 ntaps, beta = kaiserord(120, width) kwargs = dict(cutoff=[0.2, 0.5, 0.8], window=('kaiser', beta), scale=False) taps = firwin(ntaps, **kwargs) # Check the symmetry of taps. assert_array_almost_equal(taps[:ntaps//2], taps[ntaps:ntaps-ntaps//2-1:-1]) # Check the gain at a few samples where we know it should be approximately 0 or 1. freq_samples = np.array([0.0, 0.1, 0.2-width/2, 0.2+width/2, 0.35, 0.5-width/2, 0.5+width/2, 0.65, 0.8-width/2, 0.8+width/2, 0.9, 1.0]) freqs, response = freqz(taps, worN=np.pi*freq_samples) assert_array_almost_equal(np.abs(response), [1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0], decimal=5) taps_str = firwin(ntaps, pass_zero='bandstop', **kwargs) assert_allclose(taps, taps_str) def test_fs_nyq(self): """Test the fs and nyq keywords.""" nyquist = 1000 width = 40.0 relative_width = width/nyquist ntaps, beta = kaiserord(120, relative_width) taps = firwin(ntaps, cutoff=[300, 700], window=('kaiser', beta), pass_zero=False, scale=False, fs=2*nyquist) # Check the symmetry of taps. assert_array_almost_equal(taps[:ntaps//2], taps[ntaps:ntaps-ntaps//2-1:-1]) # Check the gain at a few samples where we know it should be approximately 0 or 1. freq_samples = np.array([0.0, 200, 300-width/2, 300+width/2, 500, 700-width/2, 700+width/2, 800, 1000]) freqs, response = freqz(taps, worN=np.pi*freq_samples/nyquist) assert_array_almost_equal(np.abs(response), [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0], decimal=5) with np.testing.suppress_warnings() as sup: sup.filter(DeprecationWarning, "Keyword argument 'nyq'") taps2 = firwin(ntaps, cutoff=[300, 700], window=('kaiser', beta), pass_zero=False, scale=False, nyq=nyquist) assert_allclose(taps2, taps) def test_bad_cutoff(self): """Test that invalid cutoff argument raises ValueError.""" # cutoff values must be greater than 0 and less than 1. assert_raises(ValueError, firwin, 99, -0.5) assert_raises(ValueError, firwin, 99, 1.5) # Don't allow 0 or 1 in cutoff. assert_raises(ValueError, firwin, 99, [0, 0.5]) assert_raises(ValueError, firwin, 99, [0.5, 1]) # cutoff values must be strictly increasing. assert_raises(ValueError, firwin, 99, [0.1, 0.5, 0.2]) assert_raises(ValueError, firwin, 99, [0.1, 0.5, 0.5]) # Must have at least one cutoff value. assert_raises(ValueError, firwin, 99, []) # 2D array not allowed. assert_raises(ValueError, firwin, 99, [[0.1, 0.2],[0.3, 0.4]]) # cutoff values must be less than nyq. with np.testing.suppress_warnings() as sup: sup.filter(DeprecationWarning, "Keyword argument 'nyq'") assert_raises(ValueError, firwin, 99, 50.0, nyq=40) assert_raises(ValueError, firwin, 99, [10, 20, 30], nyq=25) assert_raises(ValueError, firwin, 99, 50.0, fs=80) assert_raises(ValueError, firwin, 99, [10, 20, 30], fs=50) def test_even_highpass_raises_value_error(self): """Test that attempt to create a highpass filter with an even number of taps raises a ValueError exception.""" assert_raises(ValueError, firwin, 40, 0.5, pass_zero=False) assert_raises(ValueError, firwin, 40, [.25, 0.5]) def test_bad_pass_zero(self): """Test degenerate pass_zero cases.""" with assert_raises(ValueError, match='pass_zero must be'): firwin(41, 0.5, pass_zero='foo') with assert_raises(TypeError, match='cannot be interpreted'): firwin(41, 0.5, pass_zero=1.) for pass_zero in ('lowpass', 'highpass'): with assert_raises(ValueError, match='cutoff must have one'): firwin(41, [0.5, 0.6], pass_zero=pass_zero) for pass_zero in ('bandpass', 'bandstop'): with assert_raises(ValueError, match='must have at least two'): firwin(41, [0.5], pass_zero=pass_zero) def test_nyq_deprecation(self): with pytest.warns(DeprecationWarning, match="Keyword argument 'nyq' is deprecated in " ): firwin(1, 1, nyq=10) class TestFirwin2: def test_invalid_args(self): # `freq` and `gain` have different lengths. with assert_raises(ValueError, match='must be of same length'): firwin2(50, [0, 0.5, 1], [0.0, 1.0]) # `nfreqs` is less than `ntaps`. with assert_raises(ValueError, match='ntaps must be less than nfreqs'): firwin2(50, [0, 0.5, 1], [0.0, 1.0, 1.0], nfreqs=33) # Decreasing value in `freq` with assert_raises(ValueError, match='must be nondecreasing'): firwin2(50, [0, 0.5, 0.4, 1.0], [0, .25, .5, 1.0]) # Value in `freq` repeated more than once. with assert_raises(ValueError, match='must not occur more than twice'): firwin2(50, [0, .1, .1, .1, 1.0], [0.0, 0.5, 0.75, 1.0, 1.0]) # `freq` does not start at 0.0. with assert_raises(ValueError, match='start with 0'): firwin2(50, [0.5, 1.0], [0.0, 1.0]) # `freq` does not end at fs/2. with assert_raises(ValueError, match='end with fs/2'): firwin2(50, [0.0, 0.5], [0.0, 1.0]) # Value 0 is repeated in `freq` with assert_raises(ValueError, match='0 must not be repeated'): firwin2(50, [0.0, 0.0, 0.5, 1.0], [1.0, 1.0, 0.0, 0.0]) # Value fs/2 is repeated in `freq` with assert_raises(ValueError, match='fs/2 must not be repeated'): firwin2(50, [0.0, 0.5, 1.0, 1.0], [1.0, 1.0, 0.0, 0.0]) # Value in `freq` that is too close to a repeated number with assert_raises(ValueError, match='cannot contain numbers ' 'that are too close'): firwin2(50, [0.0, 0.5 - np.finfo(float).eps * 0.5, 0.5, 0.5, 1.0], [1.0, 1.0, 1.0, 0.0, 0.0]) # Type II filter, but the gain at nyquist frequency is not zero. with assert_raises(ValueError, match='Type II filter'): firwin2(16, [0.0, 0.5, 1.0], [0.0, 1.0, 1.0]) # Type III filter, but the gains at nyquist and zero rate are not zero. with assert_raises(ValueError, match='Type III filter'): firwin2(17, [0.0, 0.5, 1.0], [0.0, 1.0, 1.0], antisymmetric=True) with assert_raises(ValueError, match='Type III filter'): firwin2(17, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0], antisymmetric=True) with assert_raises(ValueError, match='Type III filter'): firwin2(17, [0.0, 0.5, 1.0], [1.0, 1.0, 1.0], antisymmetric=True) # Type IV filter, but the gain at zero rate is not zero. with assert_raises(ValueError, match='Type IV filter'): firwin2(16, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0], antisymmetric=True) def test01(self): width = 0.04 beta = 12.0 ntaps = 400 # Filter is 1 from w=0 to w=0.5, then decreases linearly from 1 to 0 as w # increases from w=0.5 to w=1 (w=1 is the Nyquist frequency). freq = [0.0, 0.5, 1.0] gain = [1.0, 1.0, 0.0] taps = firwin2(ntaps, freq, gain, window=('kaiser', beta)) freq_samples = np.array([0.0, 0.25, 0.5-width/2, 0.5+width/2, 0.75, 1.0-width/2]) freqs, response = freqz(taps, worN=np.pi*freq_samples) assert_array_almost_equal(np.abs(response), [1.0, 1.0, 1.0, 1.0-width, 0.5, width], decimal=5) def test02(self): width = 0.04 beta = 12.0 # ntaps must be odd for positive gain at Nyquist. ntaps = 401 # An ideal highpass filter. freq = [0.0, 0.5, 0.5, 1.0] gain = [0.0, 0.0, 1.0, 1.0] taps = firwin2(ntaps, freq, gain, window=('kaiser', beta)) freq_samples = np.array([0.0, 0.25, 0.5-width, 0.5+width, 0.75, 1.0]) freqs, response = freqz(taps, worN=np.pi*freq_samples) assert_array_almost_equal(np.abs(response), [0.0, 0.0, 0.0, 1.0, 1.0, 1.0], decimal=5) def test03(self): width = 0.02 ntaps, beta = kaiserord(120, width) # ntaps must be odd for positive gain at Nyquist. ntaps = int(ntaps) | 1 freq = [0.0, 0.4, 0.4, 0.5, 0.5, 1.0] gain = [1.0, 1.0, 0.0, 0.0, 1.0, 1.0] taps = firwin2(ntaps, freq, gain, window=('kaiser', beta)) freq_samples = np.array([0.0, 0.4-width, 0.4+width, 0.45, 0.5-width, 0.5+width, 0.75, 1.0]) freqs, response = freqz(taps, worN=np.pi*freq_samples) assert_array_almost_equal(np.abs(response), [1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0], decimal=5) def test04(self): """Test firwin2 when window=None.""" ntaps = 5 # Ideal lowpass: gain is 1 on [0,0.5], and 0 on [0.5, 1.0] freq = [0.0, 0.5, 0.5, 1.0] gain = [1.0, 1.0, 0.0, 0.0] taps = firwin2(ntaps, freq, gain, window=None, nfreqs=8193) alpha = 0.5 * (ntaps - 1) m = np.arange(0, ntaps) - alpha h = 0.5 * sinc(0.5 * m) assert_array_almost_equal(h, taps) def test05(self): """Test firwin2 for calculating Type IV filters""" ntaps = 1500 freq = [0.0, 1.0] gain = [0.0, 1.0] taps = firwin2(ntaps, freq, gain, window=None, antisymmetric=True) assert_array_almost_equal(taps[: ntaps // 2], -taps[ntaps // 2:][::-1]) freqs, response = freqz(taps, worN=2048) assert_array_almost_equal(abs(response), freqs / np.pi, decimal=4) def test06(self): """Test firwin2 for calculating Type III filters""" ntaps = 1501 freq = [0.0, 0.5, 0.55, 1.0] gain = [0.0, 0.5, 0.0, 0.0] taps = firwin2(ntaps, freq, gain, window=None, antisymmetric=True) assert_equal(taps[ntaps // 2], 0.0) assert_array_almost_equal(taps[: ntaps // 2], -taps[ntaps // 2 + 1:][::-1]) freqs, response1 = freqz(taps, worN=2048) response2 = np.interp(freqs / np.pi, freq, gain) assert_array_almost_equal(abs(response1), response2, decimal=3) def test_fs_nyq(self): taps1 = firwin2(80, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0]) taps2 = firwin2(80, [0.0, 30.0, 60.0], [1.0, 1.0, 0.0], fs=120.0) assert_array_almost_equal(taps1, taps2) with np.testing.suppress_warnings() as sup: sup.filter(DeprecationWarning, "Keyword argument 'nyq'") taps2 = firwin2(80, [0.0, 30.0, 60.0], [1.0, 1.0, 0.0], nyq=60.0) assert_array_almost_equal(taps1, taps2) def test_tuple(self): taps1 = firwin2(150, (0.0, 0.5, 0.5, 1.0), (1.0, 1.0, 0.0, 0.0)) taps2 = firwin2(150, [0.0, 0.5, 0.5, 1.0], [1.0, 1.0, 0.0, 0.0]) assert_array_almost_equal(taps1, taps2) def test_input_modyfication(self): freq1 = np.array([0.0, 0.5, 0.5, 1.0]) freq2 = np.array(freq1) firwin2(80, freq1, [1.0, 1.0, 0.0, 0.0]) assert_equal(freq1, freq2) def test_nyq_deprecation(self): with pytest.warns(DeprecationWarning, match="Keyword argument 'nyq' is deprecated in " ): firwin2(1, [0, 10], [1, 1], nyq=10) class TestRemez: def test_bad_args(self): assert_raises(ValueError, remez, 11, [0.1, 0.4], [1], type='pooka') def test_hilbert(self): N = 11 # number of taps in the filter a = 0.1 # width of the transition band # design an unity gain hilbert bandpass filter from w to 0.5-w h = remez(11, [a, 0.5-a], [1], type='hilbert') # make sure the filter has correct # of taps assert_(len(h) == N, "Number of Taps") # make sure it is type III (anti-symmetric tap coefficients) assert_array_almost_equal(h[:(N-1)//2], -h[:-(N-1)//2-1:-1]) # Since the requested response is symmetric, all even coefficients # should be zero (or in this case really small) assert_((abs(h[1::2]) < 1e-15).all(), "Even Coefficients Equal Zero") # now check the frequency response w, H = freqz(h, 1) f = w/2/np.pi Hmag = abs(H) # should have a zero at 0 and pi (in this case close to zero) assert_((Hmag[[0, -1]] < 0.02).all(), "Zero at zero and pi") # check that the pass band is close to unity idx = np.logical_and(f > a, f < 0.5-a) assert_((abs(Hmag[idx] - 1) < 0.015).all(), "Pass Band Close To Unity") def test_compare(self): # test comparison to MATLAB k = [0.024590270518440, -0.041314581814658, -0.075943803756711, -0.003530911231040, 0.193140296954975, 0.373400753484939, 0.373400753484939, 0.193140296954975, -0.003530911231040, -0.075943803756711, -0.041314581814658, 0.024590270518440] with np.testing.suppress_warnings() as sup: sup.filter(DeprecationWarning, "'remez'") h = remez(12, [0, 0.3, 0.5, 1], [1, 0], Hz=2.) assert_allclose(h, k) h = remez(12, [0, 0.3, 0.5, 1], [1, 0], fs=2.) assert_allclose(h, k) h = [-0.038976016082299, 0.018704846485491, -0.014644062687875, 0.002879152556419, 0.016849978528150, -0.043276706138248, 0.073641298245579, -0.103908158578635, 0.129770906801075, -0.147163447297124, 0.153302248456347, -0.147163447297124, 0.129770906801075, -0.103908158578635, 0.073641298245579, -0.043276706138248, 0.016849978528150, 0.002879152556419, -0.014644062687875, 0.018704846485491, -0.038976016082299] with np.testing.suppress_warnings() as sup: sup.filter(DeprecationWarning, "'remez'") assert_allclose(remez(21, [0, 0.8, 0.9, 1], [0, 1], Hz=2.), h) assert_allclose(remez(21, [0, 0.8, 0.9, 1], [0, 1], fs=2.), h) def test_Hz_deprecation(self): with pytest.warns(DeprecationWarning, match="'remez' keyword argument 'Hz'" ): remez(12, [0, 0.3, 0.5, 1], [1, 0], Hz=2.) class TestFirls: def test_bad_args(self): # even numtaps assert_raises(ValueError, firls, 10, [0.1, 0.2], [0, 0]) # odd bands assert_raises(ValueError, firls, 11, [0.1, 0.2, 0.4], [0, 0, 0]) # len(bands) != len(desired) assert_raises(ValueError, firls, 11, [0.1, 0.2, 0.3, 0.4], [0, 0, 0]) # non-monotonic bands assert_raises(ValueError, firls, 11, [0.2, 0.1], [0, 0]) assert_raises(ValueError, firls, 11, [0.1, 0.2, 0.3, 0.3], [0] * 4) assert_raises(ValueError, firls, 11, [0.3, 0.4, 0.1, 0.2], [0] * 4) assert_raises(ValueError, firls, 11, [0.1, 0.3, 0.2, 0.4], [0] * 4) # negative desired assert_raises(ValueError, firls, 11, [0.1, 0.2], [-1, 1]) # len(weight) != len(pairs) assert_raises(ValueError, firls, 11, [0.1, 0.2], [0, 0], [1, 2]) # negative weight assert_raises(ValueError, firls, 11, [0.1, 0.2], [0, 0], [-1]) def test_firls(self): N = 11 # number of taps in the filter a = 0.1 # width of the transition band # design a halfband symmetric low-pass filter h = firls(11, [0, a, 0.5-a, 0.5], [1, 1, 0, 0], fs=1.0) # make sure the filter has correct # of taps assert_equal(len(h), N) # make sure it is symmetric midx = (N-1) // 2 assert_array_almost_equal(h[:midx], h[:-midx-1:-1]) # make sure the center tap is 0.5 assert_almost_equal(h[midx], 0.5) # For halfband symmetric, odd coefficients (except the center) # should be zero (really small) hodd = np.hstack((h[1:midx:2], h[-midx+1::2])) assert_array_almost_equal(hodd, 0) # now check the frequency response w, H = freqz(h, 1) f = w/2/np.pi Hmag = np.abs(H) # check that the pass band is close to unity idx = np.logical_and(f > 0, f < a) assert_array_almost_equal(Hmag[idx], 1, decimal=3) # check that the stop band is close to zero idx = np.logical_and(f > 0.5-a, f < 0.5) assert_array_almost_equal(Hmag[idx], 0, decimal=3) def test_compare(self): # compare to OCTAVE output taps = firls(9, [0, 0.5, 0.55, 1], [1, 1, 0, 0], [1, 2]) # >> taps = firls(8, [0 0.5 0.55 1], [1 1 0 0], [1, 2]); known_taps = [-6.26930101730182e-04, -1.03354450635036e-01, -9.81576747564301e-03, 3.17271686090449e-01, 5.11409425599933e-01, 3.17271686090449e-01, -9.81576747564301e-03, -1.03354450635036e-01, -6.26930101730182e-04] assert_allclose(taps, known_taps) # compare to MATLAB output taps = firls(11, [0, 0.5, 0.5, 1], [1, 1, 0, 0], [1, 2]) # >> taps = firls(10, [0 0.5 0.5 1], [1 1 0 0], [1, 2]); known_taps = [ 0.058545300496815, -0.014233383714318, -0.104688258464392, 0.012403323025279, 0.317930861136062, 0.488047220029700, 0.317930861136062, 0.012403323025279, -0.104688258464392, -0.014233383714318, 0.058545300496815] assert_allclose(taps, known_taps) # With linear changes: taps = firls(7, (0, 1, 2, 3, 4, 5), [1, 0, 0, 1, 1, 0], fs=20) # >> taps = firls(6, [0, 0.1, 0.2, 0.3, 0.4, 0.5], [1, 0, 0, 1, 1, 0]) known_taps = [ 1.156090832768218, -4.1385894727395849, 7.5288619164321826, -8.5530572592947856, 7.5288619164321826, -4.1385894727395849, 1.156090832768218] assert_allclose(taps, known_taps) with np.testing.suppress_warnings() as sup: sup.filter(DeprecationWarning, "Keyword argument 'nyq'") taps = firls(7, (0, 1, 2, 3, 4, 5), [1, 0, 0, 1, 1, 0], nyq=10) assert_allclose(taps, known_taps) with pytest.raises(ValueError, match='between 0 and 1'): firls(7, [0, 1], [0, 1], nyq=0.5) def test_rank_deficient(self): # solve() runs but warns (only sometimes, so here we don't use match) x = firls(21, [0, 0.1, 0.9, 1], [1, 1, 0, 0]) w, h = freqz(x, fs=2.) assert_allclose(np.abs(h[:2]), 1., atol=1e-5) assert_allclose(np.abs(h[-2:]), 0., atol=1e-6) # switch to pinvh (tolerances could be higher with longer # filters, but using shorter ones is faster computationally and # the idea is the same) x = firls(101, [0, 0.01, 0.99, 1], [1, 1, 0, 0]) w, h = freqz(x, fs=2.) mask = w < 0.01 assert mask.sum() > 3 assert_allclose(np.abs(h[mask]), 1., atol=1e-4) mask = w > 0.99 assert mask.sum() > 3 assert_allclose(np.abs(h[mask]), 0., atol=1e-4) def test_nyq_deprecation(self): with pytest.warns(DeprecationWarning, match="Keyword argument 'nyq' is deprecated in " ): firls(1, (0, 1), (0, 0), nyq=10) class TestMinimumPhase: def test_bad_args(self): # not enough taps assert_raises(ValueError, minimum_phase, [1.]) assert_raises(ValueError, minimum_phase, [1., 1.]) assert_raises(ValueError, minimum_phase, np.full(10, 1j)) assert_raises(ValueError, minimum_phase, 'foo') assert_raises(ValueError, minimum_phase, np.ones(10), n_fft=8) assert_raises(ValueError, minimum_phase, np.ones(10), method='foo') assert_warns(RuntimeWarning, minimum_phase, np.arange(3)) def test_homomorphic(self): # check that it can recover frequency responses of arbitrary # linear-phase filters # for some cases we can get the actual filter back h = [1, -1] h_new = minimum_phase(np.convolve(h, h[::-1])) assert_allclose(h_new, h, rtol=0.05) # but in general we only guarantee we get the magnitude back rng = np.random.RandomState(0) for n in (2, 3, 10, 11, 15, 16, 17, 20, 21, 100, 101): h = rng.randn(n) h_new = minimum_phase(np.convolve(h, h[::-1])) assert_allclose(np.abs(fft(h_new)), np.abs(fft(h)), rtol=1e-4) def test_hilbert(self): # compare to MATLAB output of reference implementation # f=[0 0.3 0.5 1]; # a=[1 1 0 0]; # h=remez(11,f,a); h = remez(12, [0, 0.3, 0.5, 1], [1, 0], fs=2.) k = [0.349585548646686, 0.373552164395447, 0.326082685363438, 0.077152207480935, -0.129943946349364, -0.059355880509749] m = minimum_phase(h, 'hilbert') assert_allclose(m, k, rtol=5e-3) # f=[0 0.8 0.9 1]; # a=[0 0 1 1]; # h=remez(20,f,a); h = remez(21, [0, 0.8, 0.9, 1], [0, 1], fs=2.) k = [0.232486803906329, -0.133551833687071, 0.151871456867244, -0.157957283165866, 0.151739294892963, -0.129293146705090, 0.100787844523204, -0.065832656741252, 0.035361328741024, -0.014977068692269, -0.158416139047557] m = minimum_phase(h, 'hilbert', n_fft=2**19) assert_allclose(m, k, rtol=2e-3)
28,932
41.863704
90
py
scipy
scipy-main/scipy/signal/tests/test_max_len_seq.py
import numpy as np from numpy.testing import assert_allclose, assert_array_equal from pytest import raises as assert_raises from numpy.fft import fft, ifft from scipy.signal import max_len_seq class TestMLS: def test_mls_inputs(self): # can't all be zero state assert_raises(ValueError, max_len_seq, 10, state=np.zeros(10)) # wrong size state assert_raises(ValueError, max_len_seq, 10, state=np.ones(3)) # wrong length assert_raises(ValueError, max_len_seq, 10, length=-1) assert_array_equal(max_len_seq(10, length=0)[0], []) # unknown taps assert_raises(ValueError, max_len_seq, 64) # bad taps assert_raises(ValueError, max_len_seq, 10, taps=[-1, 1]) def test_mls_output(self): # define some alternate working taps alt_taps = {2: [1], 3: [2], 4: [3], 5: [4, 3, 2], 6: [5, 4, 1], 7: [4], 8: [7, 5, 3]} # assume the other bit levels work, too slow to test higher orders... for nbits in range(2, 8): for state in [None, np.round(np.random.rand(nbits))]: for taps in [None, alt_taps[nbits]]: if state is not None and np.all(state == 0): state[0] = 1 # they can't all be zero orig_m = max_len_seq(nbits, state=state, taps=taps)[0] m = 2. * orig_m - 1. # convert to +/- 1 representation # First, make sure we got all 1's or -1 err_msg = "mls had non binary terms" assert_array_equal(np.abs(m), np.ones_like(m), err_msg=err_msg) # Test via circular cross-correlation, which is just mult. # in the frequency domain with one signal conjugated tester = np.real(ifft(fft(m) * np.conj(fft(m)))) out_len = 2**nbits - 1 # impulse amplitude == test_len err_msg = "mls impulse has incorrect value" assert_allclose(tester[0], out_len, err_msg=err_msg) # steady-state is -1 err_msg = "mls steady-state has incorrect value" assert_allclose(tester[1:], np.full(out_len - 1, -1), err_msg=err_msg) # let's do the split thing using a couple options for n in (1, 2**(nbits - 1)): m1, s1 = max_len_seq(nbits, state=state, taps=taps, length=n) m2, s2 = max_len_seq(nbits, state=s1, taps=taps, length=1) m3, s3 = max_len_seq(nbits, state=s2, taps=taps, length=out_len - n - 1) new_m = np.concatenate((m1, m2, m3)) assert_array_equal(orig_m, new_m)
3,106
46.075758
79
py
scipy
scipy-main/scipy/signal/tests/test_filter_design.py
import warnings from scipy._lib import _pep440 import numpy as np from numpy.testing import (assert_array_almost_equal, assert_array_equal, assert_array_less, assert_equal, assert_, assert_allclose, assert_warns, suppress_warnings) import pytest from pytest import raises as assert_raises from numpy import array, spacing, sin, pi, sort, sqrt from scipy.signal import (argrelextrema, BadCoefficients, bessel, besselap, bilinear, buttap, butter, buttord, cheb1ap, cheb1ord, cheb2ap, cheb2ord, cheby1, cheby2, ellip, ellipap, ellipord, firwin, freqs_zpk, freqs, freqz, freqz_zpk, gammatone, group_delay, iircomb, iirdesign, iirfilter, iirnotch, iirpeak, lp2bp, lp2bs, lp2hp, lp2lp, normalize, medfilt, order_filter, sos2tf, sos2zpk, sosfreqz, tf2sos, tf2zpk, zpk2sos, zpk2tf, bilinear_zpk, lp2lp_zpk, lp2hp_zpk, lp2bp_zpk, lp2bs_zpk) from scipy.signal._filter_design import (_cplxreal, _cplxpair, _norm_factor, _bessel_poly, _bessel_zeros) try: import mpmath except ImportError: mpmath = None def mpmath_check(min_ver): return pytest.mark.skipif(mpmath is None or _pep440.parse(mpmath.__version__) < _pep440.Version(min_ver), reason="mpmath version >= %s required" % min_ver) class TestCplxPair: def test_trivial_input(self): assert_equal(_cplxpair([]).size, 0) assert_equal(_cplxpair(1), 1) def test_output_order(self): assert_allclose(_cplxpair([1+1j, 1-1j]), [1-1j, 1+1j]) a = [1+1j, 1+1j, 1, 1-1j, 1-1j, 2] b = [1-1j, 1+1j, 1-1j, 1+1j, 1, 2] assert_allclose(_cplxpair(a), b) # points spaced around the unit circle z = np.exp(2j*pi*array([4, 3, 5, 2, 6, 1, 0])/7) z1 = np.copy(z) np.random.shuffle(z) assert_allclose(_cplxpair(z), z1) np.random.shuffle(z) assert_allclose(_cplxpair(z), z1) np.random.shuffle(z) assert_allclose(_cplxpair(z), z1) # Should be able to pair up all the conjugates x = np.random.rand(10000) + 1j * np.random.rand(10000) y = x.conj() z = np.random.rand(10000) x = np.concatenate((x, y, z)) np.random.shuffle(x) c = _cplxpair(x) # Every other element of head should be conjugates: assert_allclose(c[0:20000:2], np.conj(c[1:20000:2])) # Real parts of head should be in sorted order: assert_allclose(c[0:20000:2].real, np.sort(c[0:20000:2].real)) # Tail should be sorted real numbers: assert_allclose(c[20000:], np.sort(c[20000:])) def test_real_integer_input(self): assert_array_equal(_cplxpair([2, 0, 1]), [0, 1, 2]) def test_tolerances(self): eps = spacing(1) assert_allclose(_cplxpair([1j, -1j, 1+1j*eps], tol=2*eps), [-1j, 1j, 1+1j*eps]) # sorting close to 0 assert_allclose(_cplxpair([-eps+1j, +eps-1j]), [-1j, +1j]) assert_allclose(_cplxpair([+eps+1j, -eps-1j]), [-1j, +1j]) assert_allclose(_cplxpair([+1j, -1j]), [-1j, +1j]) def test_unmatched_conjugates(self): # 1+2j is unmatched assert_raises(ValueError, _cplxpair, [1+3j, 1-3j, 1+2j]) # 1+2j and 1-3j are unmatched assert_raises(ValueError, _cplxpair, [1+3j, 1-3j, 1+2j, 1-3j]) # 1+3j is unmatched assert_raises(ValueError, _cplxpair, [1+3j, 1-3j, 1+3j]) # Not conjugates assert_raises(ValueError, _cplxpair, [4+5j, 4+5j]) assert_raises(ValueError, _cplxpair, [1-7j, 1-7j]) # No pairs assert_raises(ValueError, _cplxpair, [1+3j]) assert_raises(ValueError, _cplxpair, [1-3j]) class TestCplxReal: def test_trivial_input(self): assert_equal(_cplxreal([]), ([], [])) assert_equal(_cplxreal(1), ([], [1])) def test_output_order(self): zc, zr = _cplxreal(np.roots(array([1, 0, 0, 1]))) assert_allclose(np.append(zc, zr), [1/2 + 1j*sin(pi/3), -1]) eps = spacing(1) a = [0+1j, 0-1j, eps + 1j, eps - 1j, -eps + 1j, -eps - 1j, 1, 4, 2, 3, 0, 0, 2+3j, 2-3j, 1-eps + 1j, 1+2j, 1-2j, 1+eps - 1j, # sorts out of order 3+1j, 3+1j, 3+1j, 3-1j, 3-1j, 3-1j, 2-3j, 2+3j] zc, zr = _cplxreal(a) assert_allclose(zc, [1j, 1j, 1j, 1+1j, 1+2j, 2+3j, 2+3j, 3+1j, 3+1j, 3+1j]) assert_allclose(zr, [0, 0, 1, 2, 3, 4]) z = array([1-eps + 1j, 1+2j, 1-2j, 1+eps - 1j, 1+eps+3j, 1-2*eps-3j, 0+1j, 0-1j, 2+4j, 2-4j, 2+3j, 2-3j, 3+7j, 3-7j, 4-eps+1j, 4+eps-2j, 4-1j, 4-eps+2j]) zc, zr = _cplxreal(z) assert_allclose(zc, [1j, 1+1j, 1+2j, 1+3j, 2+3j, 2+4j, 3+7j, 4+1j, 4+2j]) assert_equal(zr, []) def test_unmatched_conjugates(self): # 1+2j is unmatched assert_raises(ValueError, _cplxreal, [1+3j, 1-3j, 1+2j]) # 1+2j and 1-3j are unmatched assert_raises(ValueError, _cplxreal, [1+3j, 1-3j, 1+2j, 1-3j]) # 1+3j is unmatched assert_raises(ValueError, _cplxreal, [1+3j, 1-3j, 1+3j]) # No pairs assert_raises(ValueError, _cplxreal, [1+3j]) assert_raises(ValueError, _cplxreal, [1-3j]) def test_real_integer_input(self): zc, zr = _cplxreal([2, 0, 1, 4]) assert_array_equal(zc, []) assert_array_equal(zr, [0, 1, 2, 4]) class TestTf2zpk: @pytest.mark.parametrize('dt', (np.float64, np.complex128)) def test_simple(self, dt): z_r = np.array([0.5, -0.5]) p_r = np.array([1.j / np.sqrt(2), -1.j / np.sqrt(2)]) # Sort the zeros/poles so that we don't fail the test if the order # changes z_r.sort() p_r.sort() b = np.poly(z_r).astype(dt) a = np.poly(p_r).astype(dt) z, p, k = tf2zpk(b, a) z.sort() # The real part of `p` is ~0.0, so sort by imaginary part p = p[np.argsort(p.imag)] assert_array_almost_equal(z, z_r) assert_array_almost_equal(p, p_r) assert_array_almost_equal(k, 1.) assert k.dtype == dt def test_bad_filter(self): # Regression test for #651: better handling of badly conditioned # filter coefficients. with suppress_warnings(): warnings.simplefilter("error", BadCoefficients) assert_raises(BadCoefficients, tf2zpk, [1e-15], [1.0, 1.0]) class TestZpk2Tf: def test_identity(self): """Test the identity transfer function.""" z = [] p = [] k = 1. b, a = zpk2tf(z, p, k) b_r = np.array([1.]) # desired result a_r = np.array([1.]) # desired result # The test for the *type* of the return values is a regression # test for ticket #1095. In the case p=[], zpk2tf used to # return the scalar 1.0 instead of array([1.0]). assert_array_equal(b, b_r) assert_(isinstance(b, np.ndarray)) assert_array_equal(a, a_r) assert_(isinstance(a, np.ndarray)) class TestSos2Zpk: def test_basic(self): sos = [[1, 0, 1, 1, 0, -0.81], [1, 0, 0, 1, 0, +0.49]] z, p, k = sos2zpk(sos) z2 = [1j, -1j, 0, 0] p2 = [0.9, -0.9, 0.7j, -0.7j] k2 = 1 assert_array_almost_equal(sort(z), sort(z2), decimal=4) assert_array_almost_equal(sort(p), sort(p2), decimal=4) assert_array_almost_equal(k, k2) sos = [[1.00000, +0.61803, 1.0000, 1.00000, +0.60515, 0.95873], [1.00000, -1.61803, 1.0000, 1.00000, -1.58430, 0.95873], [1.00000, +1.00000, 0.0000, 1.00000, +0.97915, 0.00000]] z, p, k = sos2zpk(sos) z2 = [-0.3090 + 0.9511j, -0.3090 - 0.9511j, 0.8090 + 0.5878j, 0.8090 - 0.5878j, -1.0000 + 0.0000j, 0] p2 = [-0.3026 + 0.9312j, -0.3026 - 0.9312j, 0.7922 + 0.5755j, 0.7922 - 0.5755j, -0.9791 + 0.0000j, 0] k2 = 1 assert_array_almost_equal(sort(z), sort(z2), decimal=4) assert_array_almost_equal(sort(p), sort(p2), decimal=4) sos = array([[1, 2, 3, 1, 0.2, 0.3], [4, 5, 6, 1, 0.4, 0.5]]) z = array([-1 - 1.41421356237310j, -1 + 1.41421356237310j, -0.625 - 1.05326872164704j, -0.625 + 1.05326872164704j]) p = array([-0.2 - 0.678232998312527j, -0.2 + 0.678232998312527j, -0.1 - 0.538516480713450j, -0.1 + 0.538516480713450j]) k = 4 z2, p2, k2 = sos2zpk(sos) assert_allclose(_cplxpair(z2), z) assert_allclose(_cplxpair(p2), p) assert_allclose(k2, k) def test_fewer_zeros(self): """Test not the expected number of p/z (effectively at origin).""" sos = butter(3, 0.1, output='sos') z, p, k = sos2zpk(sos) assert len(z) == 4 assert len(p) == 4 sos = butter(12, [5., 30.], 'bandpass', fs=1200., analog=False, output='sos') with pytest.warns(BadCoefficients, match='Badly conditioned'): z, p, k = sos2zpk(sos) assert len(z) == 24 assert len(p) == 24 class TestSos2Tf: def test_basic(self): sos = [[1, 1, 1, 1, 0, -1], [-2, 3, 1, 1, 10, 1]] b, a = sos2tf(sos) assert_array_almost_equal(b, [-2, 1, 2, 4, 1]) assert_array_almost_equal(a, [1, 10, 0, -10, -1]) class TestTf2Sos: def test_basic(self): num = [2, 16, 44, 56, 32] den = [3, 3, -15, 18, -12] sos = tf2sos(num, den) sos2 = [[0.6667, 4.0000, 5.3333, 1.0000, +2.0000, -4.0000], [1.0000, 2.0000, 2.0000, 1.0000, -1.0000, +1.0000]] assert_array_almost_equal(sos, sos2, decimal=4) b = [1, -3, 11, -27, 18] a = [16, 12, 2, -4, -1] sos = tf2sos(b, a) sos2 = [[0.0625, -0.1875, 0.1250, 1.0000, -0.2500, -0.1250], [1.0000, +0.0000, 9.0000, 1.0000, +1.0000, +0.5000]] # assert_array_almost_equal(sos, sos2, decimal=4) @pytest.mark.parametrize('b, a, analog, sos', [([1], [1], False, [[1., 0., 0., 1., 0., 0.]]), ([1], [1], True, [[0., 0., 1., 0., 0., 1.]]), ([1], [1., 0., -1.01, 0, 0.01], False, [[1., 0., 0., 1., 0., -0.01], [1., 0., 0., 1., 0., -1]]), ([1], [1., 0., -1.01, 0, 0.01], True, [[0., 0., 1., 1., 0., -1], [0., 0., 1., 1., 0., -0.01]])]) def test_analog(self, b, a, analog, sos): sos2 = tf2sos(b, a, analog=analog) assert_array_almost_equal(sos, sos2, decimal=4) class TestZpk2Sos: @pytest.mark.parametrize('dt', 'fdgFDG') @pytest.mark.parametrize('pairing, analog', [('nearest', False), ('keep_odd', False), ('minimal', False), ('minimal', True)]) def test_dtypes(self, dt, pairing, analog): z = np.array([-1, -1]).astype(dt) ct = dt.upper() # the poles have to be complex p = np.array([0.57149 + 0.29360j, 0.57149 - 0.29360j]).astype(ct) k = np.array(1).astype(dt) sos = zpk2sos(z, p, k, pairing=pairing, analog=analog) sos2 = [[1, 2, 1, 1, -1.14298, 0.41280]] # octave & MATLAB assert_array_almost_equal(sos, sos2, decimal=4) def test_basic(self): for pairing in ('nearest', 'keep_odd'): # # Cases that match octave # z = [-1, -1] p = [0.57149 + 0.29360j, 0.57149 - 0.29360j] k = 1 sos = zpk2sos(z, p, k, pairing=pairing) sos2 = [[1, 2, 1, 1, -1.14298, 0.41280]] # octave & MATLAB assert_array_almost_equal(sos, sos2, decimal=4) z = [1j, -1j] p = [0.9, -0.9, 0.7j, -0.7j] k = 1 sos = zpk2sos(z, p, k, pairing=pairing) sos2 = [[1, 0, 1, 1, 0, +0.49], [1, 0, 0, 1, 0, -0.81]] # octave # sos2 = [[0, 0, 1, 1, -0.9, 0], # [1, 0, 1, 1, 0.9, 0]] # MATLAB assert_array_almost_equal(sos, sos2, decimal=4) z = [] p = [0.8, -0.5+0.25j, -0.5-0.25j] k = 1. sos = zpk2sos(z, p, k, pairing=pairing) sos2 = [[1., 0., 0., 1., 1., 0.3125], [1., 0., 0., 1., -0.8, 0.]] # octave, MATLAB fails assert_array_almost_equal(sos, sos2, decimal=4) z = [1., 1., 0.9j, -0.9j] p = [0.99+0.01j, 0.99-0.01j, 0.1+0.9j, 0.1-0.9j] k = 1 sos = zpk2sos(z, p, k, pairing=pairing) sos2 = [[1, 0, 0.81, 1, -0.2, 0.82], [1, -2, 1, 1, -1.98, 0.9802]] # octave # sos2 = [[1, -2, 1, 1, -0.2, 0.82], # [1, 0, 0.81, 1, -1.98, 0.9802]] # MATLAB assert_array_almost_equal(sos, sos2, decimal=4) z = [0.9+0.1j, 0.9-0.1j, -0.9] p = [0.75+0.25j, 0.75-0.25j, 0.9] k = 1 sos = zpk2sos(z, p, k, pairing=pairing) if pairing == 'keep_odd': sos2 = [[1, -1.8, 0.82, 1, -1.5, 0.625], [1, 0.9, 0, 1, -0.9, 0]] # octave; MATLAB fails assert_array_almost_equal(sos, sos2, decimal=4) else: # pairing == 'nearest' sos2 = [[1, 0.9, 0, 1, -1.5, 0.625], [1, -1.8, 0.82, 1, -0.9, 0]] # our algorithm assert_array_almost_equal(sos, sos2, decimal=4) # # Cases that differ from octave: # z = [-0.3090 + 0.9511j, -0.3090 - 0.9511j, 0.8090 + 0.5878j, +0.8090 - 0.5878j, -1.0000 + 0.0000j] p = [-0.3026 + 0.9312j, -0.3026 - 0.9312j, 0.7922 + 0.5755j, +0.7922 - 0.5755j, -0.9791 + 0.0000j] k = 1 sos = zpk2sos(z, p, k, pairing=pairing) # sos2 = [[1, 0.618, 1, 1, 0.6052, 0.95870], # [1, -1.618, 1, 1, -1.5844, 0.95878], # [1, 1, 0, 1, 0.9791, 0]] # octave, MATLAB fails sos2 = [[1, 1, 0, 1, +0.97915, 0], [1, 0.61803, 1, 1, +0.60515, 0.95873], [1, -1.61803, 1, 1, -1.58430, 0.95873]] assert_array_almost_equal(sos, sos2, decimal=4) z = [-1 - 1.4142j, -1 + 1.4142j, -0.625 - 1.0533j, -0.625 + 1.0533j] p = [-0.2 - 0.6782j, -0.2 + 0.6782j, -0.1 - 0.5385j, -0.1 + 0.5385j] k = 4 sos = zpk2sos(z, p, k, pairing=pairing) sos2 = [[4, 8, 12, 1, 0.2, 0.3], [1, 1.25, 1.5, 1, 0.4, 0.5]] # MATLAB # sos2 = [[4, 8, 12, 1, 0.4, 0.5], # [1, 1.25, 1.5, 1, 0.2, 0.3]] # octave assert_allclose(sos, sos2, rtol=1e-4, atol=1e-4) z = [] p = [0.2, -0.5+0.25j, -0.5-0.25j] k = 1. sos = zpk2sos(z, p, k, pairing=pairing) sos2 = [[1., 0., 0., 1., -0.2, 0.], [1., 0., 0., 1., 1., 0.3125]] # sos2 = [[1., 0., 0., 1., 1., 0.3125], # [1., 0., 0., 1., -0.2, 0]] # octave, MATLAB fails assert_array_almost_equal(sos, sos2, decimal=4) # The next two examples are adapted from Leland B. Jackson, # "Digital Filters and Signal Processing (1995) p.400: # http://books.google.com/books?id=VZ8uabI1pNMC&lpg=PA400&ots=gRD9pi8Jua&dq=Pole%2Fzero%20pairing%20for%20minimum%20roundoff%20noise%20in%20BSF.&pg=PA400#v=onepage&q=Pole%2Fzero%20pairing%20for%20minimum%20roundoff%20noise%20in%20BSF.&f=false deg2rad = np.pi / 180. k = 1. # first example thetas = [22.5, 45, 77.5] mags = [0.8, 0.6, 0.9] z = np.array([np.exp(theta * deg2rad * 1j) for theta in thetas]) z = np.concatenate((z, np.conj(z))) p = np.array([mag * np.exp(theta * deg2rad * 1j) for theta, mag in zip(thetas, mags)]) p = np.concatenate((p, np.conj(p))) sos = zpk2sos(z, p, k) # sos2 = [[1, -0.43288, 1, 1, -0.38959, 0.81], # octave, # [1, -1.41421, 1, 1, -0.84853, 0.36], # MATLAB fails # [1, -1.84776, 1, 1, -1.47821, 0.64]] # Note that pole-zero pairing matches, but ordering is different sos2 = [[1, -1.41421, 1, 1, -0.84853, 0.36], [1, -1.84776, 1, 1, -1.47821, 0.64], [1, -0.43288, 1, 1, -0.38959, 0.81]] assert_array_almost_equal(sos, sos2, decimal=4) # second example z = np.array([np.exp(theta * deg2rad * 1j) for theta in (85., 10.)]) z = np.concatenate((z, np.conj(z), [1, -1])) sos = zpk2sos(z, p, k) # sos2 = [[1, -0.17431, 1, 1, -0.38959, 0.81], # octave "wrong", # [1, -1.96962, 1, 1, -0.84853, 0.36], # MATLAB fails # [1, 0, -1, 1, -1.47821, 0.64000]] # Our pole-zero pairing matches the text, Octave does not sos2 = [[1, 0, -1, 1, -0.84853, 0.36], [1, -1.96962, 1, 1, -1.47821, 0.64], [1, -0.17431, 1, 1, -0.38959, 0.81]] assert_array_almost_equal(sos, sos2, decimal=4) # these examples are taken from the doc string, and show the # effect of the 'pairing' argument @pytest.mark.parametrize('pairing, sos', [('nearest', np.array([[1., 1., 0.5, 1., -0.75, 0.], [1., 1., 0., 1., -1.6, 0.65]])), ('keep_odd', np.array([[1., 1., 0, 1., -0.75, 0.], [1., 1., 0.5, 1., -1.6, 0.65]])), ('minimal', np.array([[0., 1., 1., 0., 1., -0.75], [1., 1., 0.5, 1., -1.6, 0.65]]))]) def test_pairing(self, pairing, sos): z1 = np.array([-1, -0.5-0.5j, -0.5+0.5j]) p1 = np.array([0.75, 0.8+0.1j, 0.8-0.1j]) sos2 = zpk2sos(z1, p1, 1, pairing=pairing) assert_array_almost_equal(sos, sos2, decimal=4) @pytest.mark.parametrize('p, sos_dt', [([-1, 1, -0.1, 0.1], [[0., 0., 1., 1., 0., -0.01], [0., 0., 1., 1., 0., -1]]), ([-0.7071+0.7071j, -0.7071-0.7071j, -0.1j, 0.1j], [[0., 0., 1., 1., 0., 0.01], [0., 0., 1., 1., 1.4142, 1.]])]) def test_analog(self, p, sos_dt): # test `analog` argument # for discrete time, poles closest to unit circle should appear last # for cont. time, poles closest to imaginary axis should appear last sos2_dt = zpk2sos([], p, 1, pairing='minimal', analog=False) sos2_ct = zpk2sos([], p, 1, pairing='minimal', analog=True) assert_array_almost_equal(sos_dt, sos2_dt, decimal=4) assert_array_almost_equal(sos_dt[::-1], sos2_ct, decimal=4) def test_bad_args(self): with pytest.raises(ValueError, match=r'pairing must be one of'): zpk2sos([1], [2], 1, pairing='no_such_pairing') with pytest.raises(ValueError, match=r'.*pairing must be "minimal"'): zpk2sos([1], [2], 1, pairing='keep_odd', analog=True) with pytest.raises(ValueError, match=r'.*must have len\(p\)>=len\(z\)'): zpk2sos([1, 1], [2], 1, analog=True) with pytest.raises(ValueError, match=r'k must be real'): zpk2sos([1], [2], k=1j) class TestFreqs: def test_basic(self): _, h = freqs([1.0], [1.0], worN=8) assert_array_almost_equal(h, np.ones(8)) def test_output(self): # 1st order low-pass filter: H(s) = 1 / (s + 1) w = [0.1, 1, 10, 100] num = [1] den = [1, 1] w, H = freqs(num, den, worN=w) s = w * 1j expected = 1 / (s + 1) assert_array_almost_equal(H.real, expected.real) assert_array_almost_equal(H.imag, expected.imag) def test_freq_range(self): # Test that freqresp() finds a reasonable frequency range. # 1st order low-pass filter: H(s) = 1 / (s + 1) # Expected range is from 0.01 to 10. num = [1] den = [1, 1] n = 10 expected_w = np.logspace(-2, 1, n) w, H = freqs(num, den, worN=n) assert_array_almost_equal(w, expected_w) def test_plot(self): def plot(w, h): assert_array_almost_equal(h, np.ones(8)) assert_raises(ZeroDivisionError, freqs, [1.0], [1.0], worN=8, plot=lambda w, h: 1 / 0) freqs([1.0], [1.0], worN=8, plot=plot) def test_backward_compat(self): # For backward compatibility, test if None act as a wrapper for default w1, h1 = freqs([1.0], [1.0]) w2, h2 = freqs([1.0], [1.0], None) assert_array_almost_equal(w1, w2) assert_array_almost_equal(h1, h2) def test_w_or_N_types(self): # Measure at 8 equally-spaced points for N in (8, np.int8(8), np.int16(8), np.int32(8), np.int64(8), np.array(8)): w, h = freqs([1.0], [1.0], worN=N) assert_equal(len(w), 8) assert_array_almost_equal(h, np.ones(8)) # Measure at frequency 8 rad/sec for w in (8.0, 8.0+0j): w_out, h = freqs([1.0], [1.0], worN=w) assert_array_almost_equal(w_out, [8]) assert_array_almost_equal(h, [1]) class TestFreqs_zpk: def test_basic(self): _, h = freqs_zpk([1.0], [1.0], [1.0], worN=8) assert_array_almost_equal(h, np.ones(8)) def test_output(self): # 1st order low-pass filter: H(s) = 1 / (s + 1) w = [0.1, 1, 10, 100] z = [] p = [-1] k = 1 w, H = freqs_zpk(z, p, k, worN=w) s = w * 1j expected = 1 / (s + 1) assert_array_almost_equal(H.real, expected.real) assert_array_almost_equal(H.imag, expected.imag) def test_freq_range(self): # Test that freqresp() finds a reasonable frequency range. # 1st order low-pass filter: H(s) = 1 / (s + 1) # Expected range is from 0.01 to 10. z = [] p = [-1] k = 1 n = 10 expected_w = np.logspace(-2, 1, n) w, H = freqs_zpk(z, p, k, worN=n) assert_array_almost_equal(w, expected_w) def test_vs_freqs(self): b, a = cheby1(4, 5, 100, analog=True, output='ba') z, p, k = cheby1(4, 5, 100, analog=True, output='zpk') w1, h1 = freqs(b, a) w2, h2 = freqs_zpk(z, p, k) assert_allclose(w1, w2) assert_allclose(h1, h2, rtol=1e-6) def test_backward_compat(self): # For backward compatibility, test if None act as a wrapper for default w1, h1 = freqs_zpk([1.0], [1.0], [1.0]) w2, h2 = freqs_zpk([1.0], [1.0], [1.0], None) assert_array_almost_equal(w1, w2) assert_array_almost_equal(h1, h2) def test_w_or_N_types(self): # Measure at 8 equally-spaced points for N in (8, np.int8(8), np.int16(8), np.int32(8), np.int64(8), np.array(8)): w, h = freqs_zpk([], [], 1, worN=N) assert_equal(len(w), 8) assert_array_almost_equal(h, np.ones(8)) # Measure at frequency 8 rad/sec for w in (8.0, 8.0+0j): w_out, h = freqs_zpk([], [], 1, worN=w) assert_array_almost_equal(w_out, [8]) assert_array_almost_equal(h, [1]) class TestFreqz: def test_ticket1441(self): """Regression test for ticket 1441.""" # Because freqz previously used arange instead of linspace, # when N was large, it would return one more point than # requested. N = 100000 w, h = freqz([1.0], worN=N) assert_equal(w.shape, (N,)) def test_basic(self): w, h = freqz([1.0], worN=8) assert_array_almost_equal(w, np.pi * np.arange(8) / 8.) assert_array_almost_equal(h, np.ones(8)) w, h = freqz([1.0], worN=9) assert_array_almost_equal(w, np.pi * np.arange(9) / 9.) assert_array_almost_equal(h, np.ones(9)) for a in [1, np.ones(2)]: w, h = freqz(np.ones(2), a, worN=0) assert_equal(w.shape, (0,)) assert_equal(h.shape, (0,)) assert_equal(h.dtype, np.dtype('complex128')) t = np.linspace(0, 1, 4, endpoint=False) for b, a, h_whole in zip( ([1., 0, 0, 0], np.sin(2 * np.pi * t)), ([1., 0, 0, 0], [0.5, 0, 0, 0]), ([1., 1., 1., 1.], [0, -4j, 0, 4j])): w, h = freqz(b, a, worN=4, whole=True) expected_w = np.linspace(0, 2 * np.pi, 4, endpoint=False) assert_array_almost_equal(w, expected_w) assert_array_almost_equal(h, h_whole) # simultaneously check int-like support w, h = freqz(b, a, worN=np.int32(4), whole=True) assert_array_almost_equal(w, expected_w) assert_array_almost_equal(h, h_whole) w, h = freqz(b, a, worN=w, whole=True) assert_array_almost_equal(w, expected_w) assert_array_almost_equal(h, h_whole) def test_basic_whole(self): w, h = freqz([1.0], worN=8, whole=True) assert_array_almost_equal(w, 2 * np.pi * np.arange(8.0) / 8) assert_array_almost_equal(h, np.ones(8)) def test_plot(self): def plot(w, h): assert_array_almost_equal(w, np.pi * np.arange(8.0) / 8) assert_array_almost_equal(h, np.ones(8)) assert_raises(ZeroDivisionError, freqz, [1.0], worN=8, plot=lambda w, h: 1 / 0) freqz([1.0], worN=8, plot=plot) def test_fft_wrapping(self): # Some simple real FIR filters bs = list() # filters as_ = list() hs_whole = list() hs_half = list() # 3 taps t = np.linspace(0, 1, 3, endpoint=False) bs.append(np.sin(2 * np.pi * t)) as_.append(3.) hs_whole.append([0, -0.5j, 0.5j]) hs_half.append([0, np.sqrt(1./12.), -0.5j]) # 4 taps t = np.linspace(0, 1, 4, endpoint=False) bs.append(np.sin(2 * np.pi * t)) as_.append(0.5) hs_whole.append([0, -4j, 0, 4j]) hs_half.append([0, np.sqrt(8), -4j, -np.sqrt(8)]) del t for ii, b in enumerate(bs): # whole a = as_[ii] expected_w = np.linspace(0, 2 * np.pi, len(b), endpoint=False) w, h = freqz(b, a, worN=expected_w, whole=True) # polyval err_msg = f'b = {b}, a={a}' assert_array_almost_equal(w, expected_w, err_msg=err_msg) assert_array_almost_equal(h, hs_whole[ii], err_msg=err_msg) w, h = freqz(b, a, worN=len(b), whole=True) # FFT assert_array_almost_equal(w, expected_w, err_msg=err_msg) assert_array_almost_equal(h, hs_whole[ii], err_msg=err_msg) # non-whole expected_w = np.linspace(0, np.pi, len(b), endpoint=False) w, h = freqz(b, a, worN=expected_w, whole=False) # polyval assert_array_almost_equal(w, expected_w, err_msg=err_msg) assert_array_almost_equal(h, hs_half[ii], err_msg=err_msg) w, h = freqz(b, a, worN=len(b), whole=False) # FFT assert_array_almost_equal(w, expected_w, err_msg=err_msg) assert_array_almost_equal(h, hs_half[ii], err_msg=err_msg) # some random FIR filters (real + complex) # assume polyval is accurate rng = np.random.RandomState(0) for ii in range(2, 10): # number of taps b = rng.randn(ii) for kk in range(2): a = rng.randn(1) if kk == 0 else rng.randn(3) for jj in range(2): if jj == 1: b = b + rng.randn(ii) * 1j # whole expected_w = np.linspace(0, 2 * np.pi, ii, endpoint=False) w, expected_h = freqz(b, a, worN=expected_w, whole=True) assert_array_almost_equal(w, expected_w) w, h = freqz(b, a, worN=ii, whole=True) assert_array_almost_equal(w, expected_w) assert_array_almost_equal(h, expected_h) # half expected_w = np.linspace(0, np.pi, ii, endpoint=False) w, expected_h = freqz(b, a, worN=expected_w, whole=False) assert_array_almost_equal(w, expected_w) w, h = freqz(b, a, worN=ii, whole=False) assert_array_almost_equal(w, expected_w) assert_array_almost_equal(h, expected_h) def test_broadcasting1(self): # Test broadcasting with worN an integer or a 1-D array, # b and a are n-dimensional arrays. np.random.seed(123) b = np.random.rand(3, 5, 1) a = np.random.rand(2, 1) for whole in [False, True]: # Test with worN being integers (one fast for FFT and one not), # a 1-D array, and an empty array. for worN in [16, 17, np.linspace(0, 1, 10), np.array([])]: w, h = freqz(b, a, worN=worN, whole=whole) for k in range(b.shape[1]): bk = b[:, k, 0] ak = a[:, 0] ww, hh = freqz(bk, ak, worN=worN, whole=whole) assert_allclose(ww, w) assert_allclose(hh, h[k]) def test_broadcasting2(self): # Test broadcasting with worN an integer or a 1-D array, # b is an n-dimensional array, and a is left at the default value. np.random.seed(123) b = np.random.rand(3, 5, 1) for whole in [False, True]: for worN in [16, 17, np.linspace(0, 1, 10)]: w, h = freqz(b, worN=worN, whole=whole) for k in range(b.shape[1]): bk = b[:, k, 0] ww, hh = freqz(bk, worN=worN, whole=whole) assert_allclose(ww, w) assert_allclose(hh, h[k]) def test_broadcasting3(self): # Test broadcasting where b.shape[-1] is the same length # as worN, and a is left at the default value. np.random.seed(123) N = 16 b = np.random.rand(3, N) for whole in [False, True]: for worN in [N, np.linspace(0, 1, N)]: w, h = freqz(b, worN=worN, whole=whole) assert_equal(w.size, N) for k in range(N): bk = b[:, k] ww, hh = freqz(bk, worN=w[k], whole=whole) assert_allclose(ww, w[k]) assert_allclose(hh, h[k]) def test_broadcasting4(self): # Test broadcasting with worN a 2-D array. np.random.seed(123) b = np.random.rand(4, 2, 1, 1) a = np.random.rand(5, 2, 1, 1) for whole in [False, True]: for worN in [np.random.rand(6, 7), np.empty((6, 0))]: w, h = freqz(b, a, worN=worN, whole=whole) assert_allclose(w, worN, rtol=1e-14) assert_equal(h.shape, (2,) + worN.shape) for k in range(2): ww, hh = freqz(b[:, k, 0, 0], a[:, k, 0, 0], worN=worN.ravel(), whole=whole) assert_allclose(ww, worN.ravel(), rtol=1e-14) assert_allclose(hh, h[k, :, :].ravel()) def test_backward_compat(self): # For backward compatibility, test if None act as a wrapper for default w1, h1 = freqz([1.0], 1) w2, h2 = freqz([1.0], 1, None) assert_array_almost_equal(w1, w2) assert_array_almost_equal(h1, h2) def test_fs_param(self): fs = 900 b = [0.039479155677484369, 0.11843746703245311, 0.11843746703245311, 0.039479155677484369] a = [1.0, -1.3199152021838287, 0.80341991081938424, -0.16767146321568049] # N = None, whole=False w1, h1 = freqz(b, a, fs=fs) w2, h2 = freqz(b, a) assert_allclose(h1, h2) assert_allclose(w1, np.linspace(0, fs/2, 512, endpoint=False)) # N = None, whole=True w1, h1 = freqz(b, a, whole=True, fs=fs) w2, h2 = freqz(b, a, whole=True) assert_allclose(h1, h2) assert_allclose(w1, np.linspace(0, fs, 512, endpoint=False)) # N = 5, whole=False w1, h1 = freqz(b, a, 5, fs=fs) w2, h2 = freqz(b, a, 5) assert_allclose(h1, h2) assert_allclose(w1, np.linspace(0, fs/2, 5, endpoint=False)) # N = 5, whole=True w1, h1 = freqz(b, a, 5, whole=True, fs=fs) w2, h2 = freqz(b, a, 5, whole=True) assert_allclose(h1, h2) assert_allclose(w1, np.linspace(0, fs, 5, endpoint=False)) # w is an array_like for w in ([123], (123,), np.array([123]), (50, 123, 230), np.array([50, 123, 230])): w1, h1 = freqz(b, a, w, fs=fs) w2, h2 = freqz(b, a, 2*pi*np.array(w)/fs) assert_allclose(h1, h2) assert_allclose(w, w1) def test_w_or_N_types(self): # Measure at 7 (polyval) or 8 (fft) equally-spaced points for N in (7, np.int8(7), np.int16(7), np.int32(7), np.int64(7), np.array(7), 8, np.int8(8), np.int16(8), np.int32(8), np.int64(8), np.array(8)): w, h = freqz([1.0], worN=N) assert_array_almost_equal(w, np.pi * np.arange(N) / N) assert_array_almost_equal(h, np.ones(N)) w, h = freqz([1.0], worN=N, fs=100) assert_array_almost_equal(w, np.linspace(0, 50, N, endpoint=False)) assert_array_almost_equal(h, np.ones(N)) # Measure at frequency 8 Hz for w in (8.0, 8.0+0j): # Only makes sense when fs is specified w_out, h = freqz([1.0], worN=w, fs=100) assert_array_almost_equal(w_out, [8]) assert_array_almost_equal(h, [1]) def test_nyquist(self): w, h = freqz([1.0], worN=8, include_nyquist=True) assert_array_almost_equal(w, np.pi * np.arange(8) / 7.) assert_array_almost_equal(h, np.ones(8)) w, h = freqz([1.0], worN=9, include_nyquist=True) assert_array_almost_equal(w, np.pi * np.arange(9) / 8.) assert_array_almost_equal(h, np.ones(9)) for a in [1, np.ones(2)]: w, h = freqz(np.ones(2), a, worN=0, include_nyquist=True) assert_equal(w.shape, (0,)) assert_equal(h.shape, (0,)) assert_equal(h.dtype, np.dtype('complex128')) w1, h1 = freqz([1.0], worN=8, whole = True, include_nyquist=True) w2, h2 = freqz([1.0], worN=8, whole = True, include_nyquist=False) assert_array_almost_equal(w1, w2) assert_array_almost_equal(h1, h2) class TestSOSFreqz: def test_sosfreqz_basic(self): # Compare the results of freqz and sosfreqz for a low order # Butterworth filter. N = 500 b, a = butter(4, 0.2) sos = butter(4, 0.2, output='sos') w, h = freqz(b, a, worN=N) w2, h2 = sosfreqz(sos, worN=N) assert_equal(w2, w) assert_allclose(h2, h, rtol=1e-10, atol=1e-14) b, a = ellip(3, 1, 30, (0.2, 0.3), btype='bandpass') sos = ellip(3, 1, 30, (0.2, 0.3), btype='bandpass', output='sos') w, h = freqz(b, a, worN=N) w2, h2 = sosfreqz(sos, worN=N) assert_equal(w2, w) assert_allclose(h2, h, rtol=1e-10, atol=1e-14) # must have at least one section assert_raises(ValueError, sosfreqz, sos[:0]) def test_sosfrez_design(self): # Compare sosfreqz output against expected values for different # filter types # from cheb2ord N, Wn = cheb2ord([0.1, 0.6], [0.2, 0.5], 3, 60) sos = cheby2(N, 60, Wn, 'stop', output='sos') w, h = sosfreqz(sos) h = np.abs(h) w /= np.pi assert_allclose(20 * np.log10(h[w <= 0.1]), 0, atol=3.01) assert_allclose(20 * np.log10(h[w >= 0.6]), 0., atol=3.01) assert_allclose(h[(w >= 0.2) & (w <= 0.5)], 0., atol=1e-3) # <= -60 dB N, Wn = cheb2ord([0.1, 0.6], [0.2, 0.5], 3, 150) sos = cheby2(N, 150, Wn, 'stop', output='sos') w, h = sosfreqz(sos) dB = 20*np.log10(np.abs(h)) w /= np.pi assert_allclose(dB[w <= 0.1], 0, atol=3.01) assert_allclose(dB[w >= 0.6], 0., atol=3.01) assert_array_less(dB[(w >= 0.2) & (w <= 0.5)], -149.9) # from cheb1ord N, Wn = cheb1ord(0.2, 0.3, 3, 40) sos = cheby1(N, 3, Wn, 'low', output='sos') w, h = sosfreqz(sos) h = np.abs(h) w /= np.pi assert_allclose(20 * np.log10(h[w <= 0.2]), 0, atol=3.01) assert_allclose(h[w >= 0.3], 0., atol=1e-2) # <= -40 dB N, Wn = cheb1ord(0.2, 0.3, 1, 150) sos = cheby1(N, 1, Wn, 'low', output='sos') w, h = sosfreqz(sos) dB = 20*np.log10(np.abs(h)) w /= np.pi assert_allclose(dB[w <= 0.2], 0, atol=1.01) assert_array_less(dB[w >= 0.3], -149.9) # adapted from ellipord N, Wn = ellipord(0.3, 0.2, 3, 60) sos = ellip(N, 0.3, 60, Wn, 'high', output='sos') w, h = sosfreqz(sos) h = np.abs(h) w /= np.pi assert_allclose(20 * np.log10(h[w >= 0.3]), 0, atol=3.01) assert_allclose(h[w <= 0.1], 0., atol=1.5e-3) # <= -60 dB (approx) # adapted from buttord N, Wn = buttord([0.2, 0.5], [0.14, 0.6], 3, 40) sos = butter(N, Wn, 'band', output='sos') w, h = sosfreqz(sos) h = np.abs(h) w /= np.pi assert_allclose(h[w <= 0.14], 0., atol=1e-2) # <= -40 dB assert_allclose(h[w >= 0.6], 0., atol=1e-2) # <= -40 dB assert_allclose(20 * np.log10(h[(w >= 0.2) & (w <= 0.5)]), 0, atol=3.01) N, Wn = buttord([0.2, 0.5], [0.14, 0.6], 3, 100) sos = butter(N, Wn, 'band', output='sos') w, h = sosfreqz(sos) dB = 20*np.log10(np.maximum(np.abs(h), 1e-10)) w /= np.pi assert_array_less(dB[(w > 0) & (w <= 0.14)], -99.9) assert_array_less(dB[w >= 0.6], -99.9) assert_allclose(dB[(w >= 0.2) & (w <= 0.5)], 0, atol=3.01) def test_sosfreqz_design_ellip(self): N, Wn = ellipord(0.3, 0.1, 3, 60) sos = ellip(N, 0.3, 60, Wn, 'high', output='sos') w, h = sosfreqz(sos) h = np.abs(h) w /= np.pi assert_allclose(20 * np.log10(h[w >= 0.3]), 0, atol=3.01) assert_allclose(h[w <= 0.1], 0., atol=1.5e-3) # <= -60 dB (approx) N, Wn = ellipord(0.3, 0.2, .5, 150) sos = ellip(N, .5, 150, Wn, 'high', output='sos') w, h = sosfreqz(sos) dB = 20*np.log10(np.maximum(np.abs(h), 1e-10)) w /= np.pi assert_allclose(dB[w >= 0.3], 0, atol=.55) # Allow some numerical slop in the upper bound -150, so this is # a check that dB[w <= 0.2] is less than or almost equal to -150. assert dB[w <= 0.2].max() < -150*(1 - 1e-12) @mpmath_check("0.10") def test_sos_freqz_against_mp(self): # Compare the result of sosfreqz applied to a high order Butterworth # filter against the result computed using mpmath. (signal.freqz fails # miserably with such high order filters.) from . import mpsig N = 500 order = 25 Wn = 0.15 with mpmath.workdps(80): z_mp, p_mp, k_mp = mpsig.butter_lp(order, Wn) w_mp, h_mp = mpsig.zpkfreqz(z_mp, p_mp, k_mp, N) w_mp = np.array([float(x) for x in w_mp]) h_mp = np.array([complex(x) for x in h_mp]) sos = butter(order, Wn, output='sos') w, h = sosfreqz(sos, worN=N) assert_allclose(w, w_mp, rtol=1e-12, atol=1e-14) assert_allclose(h, h_mp, rtol=1e-12, atol=1e-14) def test_fs_param(self): fs = 900 sos = [[0.03934683014103762, 0.07869366028207524, 0.03934683014103762, 1.0, -0.37256600288916636, 0.0], [1.0, 1.0, 0.0, 1.0, -0.9495739996946778, 0.45125966317124144]] # N = None, whole=False w1, h1 = sosfreqz(sos, fs=fs) w2, h2 = sosfreqz(sos) assert_allclose(h1, h2) assert_allclose(w1, np.linspace(0, fs/2, 512, endpoint=False)) # N = None, whole=True w1, h1 = sosfreqz(sos, whole=True, fs=fs) w2, h2 = sosfreqz(sos, whole=True) assert_allclose(h1, h2, atol=1e-27) assert_allclose(w1, np.linspace(0, fs, 512, endpoint=False)) # N = 5, whole=False w1, h1 = sosfreqz(sos, 5, fs=fs) w2, h2 = sosfreqz(sos, 5) assert_allclose(h1, h2) assert_allclose(w1, np.linspace(0, fs/2, 5, endpoint=False)) # N = 5, whole=True w1, h1 = sosfreqz(sos, 5, whole=True, fs=fs) w2, h2 = sosfreqz(sos, 5, whole=True) assert_allclose(h1, h2) assert_allclose(w1, np.linspace(0, fs, 5, endpoint=False)) # w is an array_like for w in ([123], (123,), np.array([123]), (50, 123, 230), np.array([50, 123, 230])): w1, h1 = sosfreqz(sos, w, fs=fs) w2, h2 = sosfreqz(sos, 2*pi*np.array(w)/fs) assert_allclose(h1, h2) assert_allclose(w, w1) def test_w_or_N_types(self): # Measure at 7 (polyval) or 8 (fft) equally-spaced points for N in (7, np.int8(7), np.int16(7), np.int32(7), np.int64(7), np.array(7), 8, np.int8(8), np.int16(8), np.int32(8), np.int64(8), np.array(8)): w, h = sosfreqz([1, 0, 0, 1, 0, 0], worN=N) assert_array_almost_equal(w, np.pi * np.arange(N) / N) assert_array_almost_equal(h, np.ones(N)) w, h = sosfreqz([1, 0, 0, 1, 0, 0], worN=N, fs=100) assert_array_almost_equal(w, np.linspace(0, 50, N, endpoint=False)) assert_array_almost_equal(h, np.ones(N)) # Measure at frequency 8 Hz for w in (8.0, 8.0+0j): # Only makes sense when fs is specified w_out, h = sosfreqz([1, 0, 0, 1, 0, 0], worN=w, fs=100) assert_array_almost_equal(w_out, [8]) assert_array_almost_equal(h, [1]) class TestFreqz_zpk: def test_ticket1441(self): """Regression test for ticket 1441.""" # Because freqz previously used arange instead of linspace, # when N was large, it would return one more point than # requested. N = 100000 w, h = freqz_zpk([0.5], [0.5], 1.0, worN=N) assert_equal(w.shape, (N,)) def test_basic(self): w, h = freqz_zpk([0.5], [0.5], 1.0, worN=8) assert_array_almost_equal(w, np.pi * np.arange(8.0) / 8) assert_array_almost_equal(h, np.ones(8)) def test_basic_whole(self): w, h = freqz_zpk([0.5], [0.5], 1.0, worN=8, whole=True) assert_array_almost_equal(w, 2 * np.pi * np.arange(8.0) / 8) assert_array_almost_equal(h, np.ones(8)) def test_vs_freqz(self): b, a = cheby1(4, 5, 0.5, analog=False, output='ba') z, p, k = cheby1(4, 5, 0.5, analog=False, output='zpk') w1, h1 = freqz(b, a) w2, h2 = freqz_zpk(z, p, k) assert_allclose(w1, w2) assert_allclose(h1, h2, rtol=1e-6) def test_backward_compat(self): # For backward compatibility, test if None act as a wrapper for default w1, h1 = freqz_zpk([0.5], [0.5], 1.0) w2, h2 = freqz_zpk([0.5], [0.5], 1.0, None) assert_array_almost_equal(w1, w2) assert_array_almost_equal(h1, h2) def test_fs_param(self): fs = 900 z = [-1, -1, -1] p = [0.4747869998473389+0.4752230717749344j, 0.37256600288916636, 0.4747869998473389-0.4752230717749344j] k = 0.03934683014103762 # N = None, whole=False w1, h1 = freqz_zpk(z, p, k, whole=False, fs=fs) w2, h2 = freqz_zpk(z, p, k, whole=False) assert_allclose(h1, h2) assert_allclose(w1, np.linspace(0, fs/2, 512, endpoint=False)) # N = None, whole=True w1, h1 = freqz_zpk(z, p, k, whole=True, fs=fs) w2, h2 = freqz_zpk(z, p, k, whole=True) assert_allclose(h1, h2) assert_allclose(w1, np.linspace(0, fs, 512, endpoint=False)) # N = 5, whole=False w1, h1 = freqz_zpk(z, p, k, 5, fs=fs) w2, h2 = freqz_zpk(z, p, k, 5) assert_allclose(h1, h2) assert_allclose(w1, np.linspace(0, fs/2, 5, endpoint=False)) # N = 5, whole=True w1, h1 = freqz_zpk(z, p, k, 5, whole=True, fs=fs) w2, h2 = freqz_zpk(z, p, k, 5, whole=True) assert_allclose(h1, h2) assert_allclose(w1, np.linspace(0, fs, 5, endpoint=False)) # w is an array_like for w in ([123], (123,), np.array([123]), (50, 123, 230), np.array([50, 123, 230])): w1, h1 = freqz_zpk(z, p, k, w, fs=fs) w2, h2 = freqz_zpk(z, p, k, 2*pi*np.array(w)/fs) assert_allclose(h1, h2) assert_allclose(w, w1) def test_w_or_N_types(self): # Measure at 8 equally-spaced points for N in (8, np.int8(8), np.int16(8), np.int32(8), np.int64(8), np.array(8)): w, h = freqz_zpk([], [], 1, worN=N) assert_array_almost_equal(w, np.pi * np.arange(8) / 8.) assert_array_almost_equal(h, np.ones(8)) w, h = freqz_zpk([], [], 1, worN=N, fs=100) assert_array_almost_equal(w, np.linspace(0, 50, 8, endpoint=False)) assert_array_almost_equal(h, np.ones(8)) # Measure at frequency 8 Hz for w in (8.0, 8.0+0j): # Only makes sense when fs is specified w_out, h = freqz_zpk([], [], 1, worN=w, fs=100) assert_array_almost_equal(w_out, [8]) assert_array_almost_equal(h, [1]) class TestNormalize: def test_allclose(self): """Test for false positive on allclose in normalize() in filter_design.py""" # Test to make sure the allclose call within signal.normalize does not # choose false positives. Then check against a known output from MATLAB # to make sure the fix doesn't break anything. # These are the coefficients returned from # `[b,a] = cheby1(8, 0.5, 0.048)' # in MATLAB. There are at least 15 significant figures in each # coefficient, so it makes sense to test for errors on the order of # 1e-13 (this can always be relaxed if different platforms have # different rounding errors) b_matlab = np.array([2.150733144728282e-11, 1.720586515782626e-10, 6.022052805239190e-10, 1.204410561047838e-09, 1.505513201309798e-09, 1.204410561047838e-09, 6.022052805239190e-10, 1.720586515782626e-10, 2.150733144728282e-11]) a_matlab = np.array([1.000000000000000e+00, -7.782402035027959e+00, 2.654354569747454e+01, -5.182182531666387e+01, 6.334127355102684e+01, -4.963358186631157e+01, 2.434862182949389e+01, -6.836925348604676e+00, 8.412934944449140e-01]) # This is the input to signal.normalize after passing through the # equivalent steps in signal.iirfilter as was done for MATLAB b_norm_in = np.array([1.5543135865293012e-06, 1.2434508692234413e-05, 4.3520780422820447e-05, 8.7041560845640893e-05, 1.0880195105705122e-04, 8.7041560845640975e-05, 4.3520780422820447e-05, 1.2434508692234413e-05, 1.5543135865293012e-06]) a_norm_in = np.array([7.2269025909127173e+04, -5.6242661430467968e+05, 1.9182761917308895e+06, -3.7451128364682454e+06, 4.5776121393762771e+06, -3.5869706138592605e+06, 1.7596511818472347e+06, -4.9409793515707983e+05, 6.0799461347219651e+04]) b_output, a_output = normalize(b_norm_in, a_norm_in) # The test on b works for decimal=14 but the one for a does not. For # the sake of consistency, both of these are decimal=13. If something # breaks on another platform, it is probably fine to relax this lower. assert_array_almost_equal(b_matlab, b_output, decimal=13) assert_array_almost_equal(a_matlab, a_output, decimal=13) def test_errors(self): """Test the error cases.""" # all zero denominator assert_raises(ValueError, normalize, [1, 2], 0) # denominator not 1 dimensional assert_raises(ValueError, normalize, [1, 2], [[1]]) # numerator too many dimensions assert_raises(ValueError, normalize, [[[1, 2]]], 1) class TestLp2lp: def test_basic(self): b = [1] a = [1, np.sqrt(2), 1] b_lp, a_lp = lp2lp(b, a, 0.38574256627112119) assert_array_almost_equal(b_lp, [0.1488], decimal=4) assert_array_almost_equal(a_lp, [1, 0.5455, 0.1488], decimal=4) class TestLp2hp: def test_basic(self): b = [0.25059432325190018] a = [1, 0.59724041654134863, 0.92834805757524175, 0.25059432325190018] b_hp, a_hp = lp2hp(b, a, 2*np.pi*5000) assert_allclose(b_hp, [1, 0, 0, 0]) assert_allclose(a_hp, [1, 1.1638e5, 2.3522e9, 1.2373e14], rtol=1e-4) class TestLp2bp: def test_basic(self): b = [1] a = [1, 2, 2, 1] b_bp, a_bp = lp2bp(b, a, 2*np.pi*4000, 2*np.pi*2000) assert_allclose(b_bp, [1.9844e12, 0, 0, 0], rtol=1e-6) assert_allclose(a_bp, [1, 2.5133e4, 2.2108e9, 3.3735e13, 1.3965e18, 1.0028e22, 2.5202e26], rtol=1e-4) class TestLp2bs: def test_basic(self): b = [1] a = [1, 1] b_bs, a_bs = lp2bs(b, a, 0.41722257286366754, 0.18460575326152251) assert_array_almost_equal(b_bs, [1, 0, 0.17407], decimal=5) assert_array_almost_equal(a_bs, [1, 0.18461, 0.17407], decimal=5) class TestBilinear: def test_basic(self): b = [0.14879732743343033] a = [1, 0.54552236880522209, 0.14879732743343033] b_z, a_z = bilinear(b, a, 0.5) assert_array_almost_equal(b_z, [0.087821, 0.17564, 0.087821], decimal=5) assert_array_almost_equal(a_z, [1, -1.0048, 0.35606], decimal=4) b = [1, 0, 0.17407467530697837] a = [1, 0.18460575326152251, 0.17407467530697837] b_z, a_z = bilinear(b, a, 0.5) assert_array_almost_equal(b_z, [0.86413, -1.2158, 0.86413], decimal=4) assert_array_almost_equal(a_z, [1, -1.2158, 0.72826], decimal=4) class TestLp2lp_zpk: def test_basic(self): z = [] p = [(-1+1j)/np.sqrt(2), (-1-1j)/np.sqrt(2)] k = 1 z_lp, p_lp, k_lp = lp2lp_zpk(z, p, k, 5) assert_array_equal(z_lp, []) assert_allclose(sort(p_lp), sort(p)*5) assert_allclose(k_lp, 25) # Pseudo-Chebyshev with both poles and zeros z = [-2j, +2j] p = [-0.75, -0.5-0.5j, -0.5+0.5j] k = 3 z_lp, p_lp, k_lp = lp2lp_zpk(z, p, k, 20) assert_allclose(sort(z_lp), sort([-40j, +40j])) assert_allclose(sort(p_lp), sort([-15, -10-10j, -10+10j])) assert_allclose(k_lp, 60) class TestLp2hp_zpk: def test_basic(self): z = [] p = [(-1+1j)/np.sqrt(2), (-1-1j)/np.sqrt(2)] k = 1 z_hp, p_hp, k_hp = lp2hp_zpk(z, p, k, 5) assert_array_equal(z_hp, [0, 0]) assert_allclose(sort(p_hp), sort(p)*5) assert_allclose(k_hp, 1) z = [-2j, +2j] p = [-0.75, -0.5-0.5j, -0.5+0.5j] k = 3 z_hp, p_hp, k_hp = lp2hp_zpk(z, p, k, 6) assert_allclose(sort(z_hp), sort([-3j, 0, +3j])) assert_allclose(sort(p_hp), sort([-8, -6-6j, -6+6j])) assert_allclose(k_hp, 32) class TestLp2bp_zpk: def test_basic(self): z = [-2j, +2j] p = [-0.75, -0.5-0.5j, -0.5+0.5j] k = 3 z_bp, p_bp, k_bp = lp2bp_zpk(z, p, k, 15, 8) assert_allclose(sort(z_bp), sort([-25j, -9j, 0, +9j, +25j])) assert_allclose(sort(p_bp), sort([-3 + 6j*sqrt(6), -3 - 6j*sqrt(6), +2j+sqrt(-8j-225)-2, -2j+sqrt(+8j-225)-2, +2j-sqrt(-8j-225)-2, -2j-sqrt(+8j-225)-2, ])) assert_allclose(k_bp, 24) class TestLp2bs_zpk: def test_basic(self): z = [-2j, +2j] p = [-0.75, -0.5-0.5j, -0.5+0.5j] k = 3 z_bs, p_bs, k_bs = lp2bs_zpk(z, p, k, 35, 12) assert_allclose(sort(z_bs), sort([+35j, -35j, +3j+sqrt(1234)*1j, -3j+sqrt(1234)*1j, +3j-sqrt(1234)*1j, -3j-sqrt(1234)*1j])) assert_allclose(sort(p_bs), sort([+3j*sqrt(129) - 8, -3j*sqrt(129) - 8, (-6 + 6j) - sqrt(-1225 - 72j), (-6 - 6j) - sqrt(-1225 + 72j), (-6 + 6j) + sqrt(-1225 - 72j), (-6 - 6j) + sqrt(-1225 + 72j), ])) assert_allclose(k_bs, 32) class TestBilinear_zpk: def test_basic(self): z = [-2j, +2j] p = [-0.75, -0.5-0.5j, -0.5+0.5j] k = 3 z_d, p_d, k_d = bilinear_zpk(z, p, k, 10) assert_allclose(sort(z_d), sort([(20-2j)/(20+2j), (20+2j)/(20-2j), -1])) assert_allclose(sort(p_d), sort([77/83, (1j/2 + 39/2) / (41/2 - 1j/2), (39/2 - 1j/2) / (1j/2 + 41/2), ])) assert_allclose(k_d, 9696/69803) class TestPrototypeType: def test_output_type(self): # Prototypes should consistently output arrays, not lists # https://github.com/scipy/scipy/pull/441 for func in (buttap, besselap, lambda N: cheb1ap(N, 1), lambda N: cheb2ap(N, 20), lambda N: ellipap(N, 1, 20)): for N in range(7): z, p, k = func(N) assert_(isinstance(z, np.ndarray)) assert_(isinstance(p, np.ndarray)) def dB(x): # Return magnitude in decibels, avoiding divide-by-zero warnings # (and deal with some "not less-ordered" errors when -inf shows up) return 20 * np.log10(np.maximum(np.abs(x), np.finfo(np.float64).tiny)) class TestButtord: def test_lowpass(self): wp = 0.2 ws = 0.3 rp = 3 rs = 60 N, Wn = buttord(wp, ws, rp, rs, False) b, a = butter(N, Wn, 'lowpass', False) w, h = freqz(b, a) w /= np.pi assert_array_less(-rp, dB(h[w <= wp])) assert_array_less(dB(h[ws <= w]), -rs) assert_equal(N, 16) assert_allclose(Wn, 2.0002776782743284e-01, rtol=1e-15) def test_highpass(self): wp = 0.3 ws = 0.2 rp = 3 rs = 70 N, Wn = buttord(wp, ws, rp, rs, False) b, a = butter(N, Wn, 'highpass', False) w, h = freqz(b, a) w /= np.pi assert_array_less(-rp, dB(h[wp <= w])) assert_array_less(dB(h[w <= ws]), -rs) assert_equal(N, 18) assert_allclose(Wn, 2.9996603079132672e-01, rtol=1e-15) def test_bandpass(self): wp = [0.2, 0.5] ws = [0.1, 0.6] rp = 3 rs = 80 N, Wn = buttord(wp, ws, rp, rs, False) b, a = butter(N, Wn, 'bandpass', False) w, h = freqz(b, a) w /= np.pi assert_array_less(-rp - 0.1, dB(h[np.logical_and(wp[0] <= w, w <= wp[1])])) assert_array_less(dB(h[np.logical_or(w <= ws[0], ws[1] <= w)]), -rs + 0.1) assert_equal(N, 18) assert_allclose(Wn, [1.9998742411409134e-01, 5.0002139595676276e-01], rtol=1e-15) def test_bandstop(self): wp = [0.1, 0.6] ws = [0.2, 0.5] rp = 3 rs = 90 N, Wn = buttord(wp, ws, rp, rs, False) b, a = butter(N, Wn, 'bandstop', False) w, h = freqz(b, a) w /= np.pi assert_array_less(-rp, dB(h[np.logical_or(w <= wp[0], wp[1] <= w)])) assert_array_less(dB(h[np.logical_and(ws[0] <= w, w <= ws[1])]), -rs) assert_equal(N, 20) assert_allclose(Wn, [1.4759432329294042e-01, 5.9997365985276407e-01], rtol=1e-6) def test_analog(self): wp = 200 ws = 600 rp = 3 rs = 60 N, Wn = buttord(wp, ws, rp, rs, True) b, a = butter(N, Wn, 'lowpass', True) w, h = freqs(b, a) assert_array_less(-rp, dB(h[w <= wp])) assert_array_less(dB(h[ws <= w]), -rs) assert_equal(N, 7) assert_allclose(Wn, 2.0006785355671877e+02, rtol=1e-15) n, Wn = buttord(1, 550/450, 1, 26, analog=True) assert_equal(n, 19) assert_allclose(Wn, 1.0361980524629517, rtol=1e-15) assert_equal(buttord(1, 1.2, 1, 80, analog=True)[0], 55) def test_fs_param(self): wp = [4410, 11025] ws = [2205, 13230] rp = 3 rs = 80 fs = 44100 N, Wn = buttord(wp, ws, rp, rs, False, fs=fs) b, a = butter(N, Wn, 'bandpass', False, fs=fs) w, h = freqz(b, a, fs=fs) assert_array_less(-rp - 0.1, dB(h[np.logical_and(wp[0] <= w, w <= wp[1])])) assert_array_less(dB(h[np.logical_or(w <= ws[0], ws[1] <= w)]), -rs + 0.1) assert_equal(N, 18) assert_allclose(Wn, [4409.722701715714, 11025.47178084662], rtol=1e-15) def test_invalid_input(self): with pytest.raises(ValueError) as exc_info: buttord([20, 50], [14, 60], 3, 2) assert "gpass should be smaller than gstop" in str(exc_info.value) with pytest.raises(ValueError) as exc_info: buttord([20, 50], [14, 60], -1, 2) assert "gpass should be larger than 0.0" in str(exc_info.value) with pytest.raises(ValueError) as exc_info: buttord([20, 50], [14, 60], 1, -2) assert "gstop should be larger than 0.0" in str(exc_info.value) def test_runtime_warnings(self): with pytest.warns(RuntimeWarning, match=r'Order is zero'): buttord(0.0, 1.0, 3, 60) def test_ellip_butter(self): # The purpose of the test is to compare to some known output from past # scipy versions. The values to compare to are generated with scipy # 1.9.1 (there is nothing special about this particular version though) n, wn = buttord([0.1, 0.6], [0.2, 0.5], 3, 60) assert n == 14 class TestCheb1ord: def test_lowpass(self): wp = 0.2 ws = 0.3 rp = 3 rs = 60 N, Wn = cheb1ord(wp, ws, rp, rs, False) b, a = cheby1(N, rp, Wn, 'low', False) w, h = freqz(b, a) w /= np.pi assert_array_less(-rp - 0.1, dB(h[w <= wp])) assert_array_less(dB(h[ws <= w]), -rs + 0.1) assert_equal(N, 8) assert_allclose(Wn, 0.2, rtol=1e-15) def test_highpass(self): wp = 0.3 ws = 0.2 rp = 3 rs = 70 N, Wn = cheb1ord(wp, ws, rp, rs, False) b, a = cheby1(N, rp, Wn, 'high', False) w, h = freqz(b, a) w /= np.pi assert_array_less(-rp - 0.1, dB(h[wp <= w])) assert_array_less(dB(h[w <= ws]), -rs + 0.1) assert_equal(N, 9) assert_allclose(Wn, 0.3, rtol=1e-15) def test_bandpass(self): wp = [0.2, 0.5] ws = [0.1, 0.6] rp = 3 rs = 80 N, Wn = cheb1ord(wp, ws, rp, rs, False) b, a = cheby1(N, rp, Wn, 'band', False) w, h = freqz(b, a) w /= np.pi assert_array_less(-rp - 0.1, dB(h[np.logical_and(wp[0] <= w, w <= wp[1])])) assert_array_less(dB(h[np.logical_or(w <= ws[0], ws[1] <= w)]), -rs + 0.1) assert_equal(N, 9) assert_allclose(Wn, [0.2, 0.5], rtol=1e-15) def test_bandstop(self): wp = [0.1, 0.6] ws = [0.2, 0.5] rp = 3 rs = 90 N, Wn = cheb1ord(wp, ws, rp, rs, False) b, a = cheby1(N, rp, Wn, 'stop', False) w, h = freqz(b, a) w /= np.pi assert_array_less(-rp - 0.1, dB(h[np.logical_or(w <= wp[0], wp[1] <= w)])) assert_array_less(dB(h[np.logical_and(ws[0] <= w, w <= ws[1])]), -rs + 0.1) assert_equal(N, 10) assert_allclose(Wn, [0.14758232569947785, 0.6], rtol=1e-5) def test_analog(self): wp = 700 ws = 100 rp = 3 rs = 70 N, Wn = cheb1ord(wp, ws, rp, rs, True) b, a = cheby1(N, rp, Wn, 'high', True) w, h = freqs(b, a) assert_array_less(-rp - 0.1, dB(h[wp <= w])) assert_array_less(dB(h[w <= ws]), -rs + 0.1) assert_equal(N, 4) assert_allclose(Wn, 700, rtol=1e-15) assert_equal(cheb1ord(1, 1.2, 1, 80, analog=True)[0], 17) def test_fs_param(self): wp = 4800 ws = 7200 rp = 3 rs = 60 fs = 48000 N, Wn = cheb1ord(wp, ws, rp, rs, False, fs=fs) b, a = cheby1(N, rp, Wn, 'low', False, fs=fs) w, h = freqz(b, a, fs=fs) assert_array_less(-rp - 0.1, dB(h[w <= wp])) assert_array_less(dB(h[ws <= w]), -rs + 0.1) assert_equal(N, 8) assert_allclose(Wn, 4800, rtol=1e-15) def test_invalid_input(self): with pytest.raises(ValueError) as exc_info: cheb1ord(0.2, 0.3, 3, 2) assert "gpass should be smaller than gstop" in str(exc_info.value) with pytest.raises(ValueError) as exc_info: cheb1ord(0.2, 0.3, -1, 2) assert "gpass should be larger than 0.0" in str(exc_info.value) with pytest.raises(ValueError) as exc_info: cheb1ord(0.2, 0.3, 1, -2) assert "gstop should be larger than 0.0" in str(exc_info.value) def test_ellip_cheb1(self): # The purpose of the test is to compare to some known output from past # scipy versions. The values to compare to are generated with scipy # 1.9.1 (there is nothing special about this particular version though) n, wn = cheb1ord([0.1, 0.6], [0.2, 0.5], 3, 60) assert n == 7 n2, w2 = cheb2ord([0.1, 0.6], [0.2, 0.5], 3, 60) assert not (wn == w2).all() class TestCheb2ord: def test_lowpass(self): wp = 0.2 ws = 0.3 rp = 3 rs = 60 N, Wn = cheb2ord(wp, ws, rp, rs, False) b, a = cheby2(N, rs, Wn, 'lp', False) w, h = freqz(b, a) w /= np.pi assert_array_less(-rp - 0.1, dB(h[w <= wp])) assert_array_less(dB(h[ws <= w]), -rs + 0.1) assert_equal(N, 8) assert_allclose(Wn, 0.28647639976553163, rtol=1e-15) def test_highpass(self): wp = 0.3 ws = 0.2 rp = 3 rs = 70 N, Wn = cheb2ord(wp, ws, rp, rs, False) b, a = cheby2(N, rs, Wn, 'hp', False) w, h = freqz(b, a) w /= np.pi assert_array_less(-rp - 0.1, dB(h[wp <= w])) assert_array_less(dB(h[w <= ws]), -rs + 0.1) assert_equal(N, 9) assert_allclose(Wn, 0.20697492182903282, rtol=1e-15) def test_bandpass(self): wp = [0.2, 0.5] ws = [0.1, 0.6] rp = 3 rs = 80 N, Wn = cheb2ord(wp, ws, rp, rs, False) b, a = cheby2(N, rs, Wn, 'bp', False) w, h = freqz(b, a) w /= np.pi assert_array_less(-rp - 0.1, dB(h[np.logical_and(wp[0] <= w, w <= wp[1])])) assert_array_less(dB(h[np.logical_or(w <= ws[0], ws[1] <= w)]), -rs + 0.1) assert_equal(N, 9) assert_allclose(Wn, [0.14876937565923479, 0.59748447842351482], rtol=1e-15) def test_bandstop(self): wp = [0.1, 0.6] ws = [0.2, 0.5] rp = 3 rs = 90 N, Wn = cheb2ord(wp, ws, rp, rs, False) b, a = cheby2(N, rs, Wn, 'bs', False) w, h = freqz(b, a) w /= np.pi assert_array_less(-rp - 0.1, dB(h[np.logical_or(w <= wp[0], wp[1] <= w)])) assert_array_less(dB(h[np.logical_and(ws[0] <= w, w <= ws[1])]), -rs + 0.1) assert_equal(N, 10) assert_allclose(Wn, [0.19926249974781743, 0.50125246585567362], rtol=1e-6) def test_analog(self): wp = [20, 50] ws = [10, 60] rp = 3 rs = 80 N, Wn = cheb2ord(wp, ws, rp, rs, True) b, a = cheby2(N, rs, Wn, 'bp', True) w, h = freqs(b, a) assert_array_less(-rp - 0.1, dB(h[np.logical_and(wp[0] <= w, w <= wp[1])])) assert_array_less(dB(h[np.logical_or(w <= ws[0], ws[1] <= w)]), -rs + 0.1) assert_equal(N, 11) assert_allclose(Wn, [1.673740595370124e+01, 5.974641487254268e+01], rtol=1e-15) def test_fs_param(self): wp = 150 ws = 100 rp = 3 rs = 70 fs = 1000 N, Wn = cheb2ord(wp, ws, rp, rs, False, fs=fs) b, a = cheby2(N, rs, Wn, 'hp', False, fs=fs) w, h = freqz(b, a, fs=fs) assert_array_less(-rp - 0.1, dB(h[wp <= w])) assert_array_less(dB(h[w <= ws]), -rs + 0.1) assert_equal(N, 9) assert_allclose(Wn, 103.4874609145164, rtol=1e-15) def test_invalid_input(self): with pytest.raises(ValueError) as exc_info: cheb2ord([0.1, 0.6], [0.2, 0.5], 3, 2) assert "gpass should be smaller than gstop" in str(exc_info.value) with pytest.raises(ValueError) as exc_info: cheb2ord([0.1, 0.6], [0.2, 0.5], -1, 2) assert "gpass should be larger than 0.0" in str(exc_info.value) with pytest.raises(ValueError) as exc_info: cheb2ord([0.1, 0.6], [0.2, 0.5], 1, -2) assert "gstop should be larger than 0.0" in str(exc_info.value) def test_ellip_cheb2(self): # The purpose of the test is to compare to some known output from past # scipy versions. The values to compare to are generated with scipy # 1.9.1 (there is nothing special about this particular version though) n, wn = cheb2ord([0.1, 0.6], [0.2, 0.5], 3, 60) assert n == 7 n1, w1 = cheb1ord([0.1, 0.6], [0.2, 0.5], 3, 60) assert not (wn == w1).all() class TestEllipord: def test_lowpass(self): wp = 0.2 ws = 0.3 rp = 3 rs = 60 N, Wn = ellipord(wp, ws, rp, rs, False) b, a = ellip(N, rp, rs, Wn, 'lp', False) w, h = freqz(b, a) w /= np.pi assert_array_less(-rp - 0.1, dB(h[w <= wp])) assert_array_less(dB(h[ws <= w]), -rs + 0.1) assert_equal(N, 5) assert_allclose(Wn, 0.2, rtol=1e-15) def test_lowpass_1000dB(self): # failed when ellipkm1 wasn't used in ellipord and ellipap wp = 0.2 ws = 0.3 rp = 3 rs = 1000 N, Wn = ellipord(wp, ws, rp, rs, False) sos = ellip(N, rp, rs, Wn, 'lp', False, output='sos') w, h = sosfreqz(sos) w /= np.pi assert_array_less(-rp - 0.1, dB(h[w <= wp])) assert_array_less(dB(h[ws <= w]), -rs + 0.1) def test_highpass(self): wp = 0.3 ws = 0.2 rp = 3 rs = 70 N, Wn = ellipord(wp, ws, rp, rs, False) b, a = ellip(N, rp, rs, Wn, 'hp', False) w, h = freqz(b, a) w /= np.pi assert_array_less(-rp - 0.1, dB(h[wp <= w])) assert_array_less(dB(h[w <= ws]), -rs + 0.1) assert_equal(N, 6) assert_allclose(Wn, 0.3, rtol=1e-15) def test_bandpass(self): wp = [0.2, 0.5] ws = [0.1, 0.6] rp = 3 rs = 80 N, Wn = ellipord(wp, ws, rp, rs, False) b, a = ellip(N, rp, rs, Wn, 'bp', False) w, h = freqz(b, a) w /= np.pi assert_array_less(-rp - 0.1, dB(h[np.logical_and(wp[0] <= w, w <= wp[1])])) assert_array_less(dB(h[np.logical_or(w <= ws[0], ws[1] <= w)]), -rs + 0.1) assert_equal(N, 6) assert_allclose(Wn, [0.2, 0.5], rtol=1e-15) def test_bandstop(self): wp = [0.1, 0.6] ws = [0.2, 0.5] rp = 3 rs = 90 N, Wn = ellipord(wp, ws, rp, rs, False) b, a = ellip(N, rp, rs, Wn, 'bs', False) w, h = freqz(b, a) w /= np.pi assert_array_less(-rp - 0.1, dB(h[np.logical_or(w <= wp[0], wp[1] <= w)])) assert_array_less(dB(h[np.logical_and(ws[0] <= w, w <= ws[1])]), -rs + 0.1) assert_equal(N, 7) assert_allclose(Wn, [0.14758232794342988, 0.6], rtol=1e-5) def test_analog(self): wp = [1000, 6000] ws = [2000, 5000] rp = 3 rs = 90 N, Wn = ellipord(wp, ws, rp, rs, True) b, a = ellip(N, rp, rs, Wn, 'bs', True) w, h = freqs(b, a) assert_array_less(-rp - 0.1, dB(h[np.logical_or(w <= wp[0], wp[1] <= w)])) assert_array_less(dB(h[np.logical_and(ws[0] <= w, w <= ws[1])]), -rs + 0.1) assert_equal(N, 8) assert_allclose(Wn, [1666.6666, 6000]) assert_equal(ellipord(1, 1.2, 1, 80, analog=True)[0], 9) def test_fs_param(self): wp = [400, 2400] ws = [800, 2000] rp = 3 rs = 90 fs = 8000 N, Wn = ellipord(wp, ws, rp, rs, False, fs=fs) b, a = ellip(N, rp, rs, Wn, 'bs', False, fs=fs) w, h = freqz(b, a, fs=fs) assert_array_less(-rp - 0.1, dB(h[np.logical_or(w <= wp[0], wp[1] <= w)])) assert_array_less(dB(h[np.logical_and(ws[0] <= w, w <= ws[1])]), -rs + 0.1) assert_equal(N, 7) assert_allclose(Wn, [590.3293117737195, 2400], rtol=1e-5) def test_invalid_input(self): with pytest.raises(ValueError) as exc_info: ellipord(0.2, 0.5, 3, 2) assert "gpass should be smaller than gstop" in str(exc_info.value) with pytest.raises(ValueError) as exc_info: ellipord(0.2, 0.5, -1, 2) assert "gpass should be larger than 0.0" in str(exc_info.value) with pytest.raises(ValueError) as exc_info: ellipord(0.2, 0.5, 1, -2) assert "gstop should be larger than 0.0" in str(exc_info.value) def test_ellip_butter(self): # The purpose of the test is to compare to some known output from past # scipy versions. The values to compare to are generated with scipy # 1.9.1 (there is nothing special about this particular version though) n, wn = ellipord([0.1, 0.6], [0.2, 0.5], 3, 60) assert n == 5 class TestBessel: def test_degenerate(self): for norm in ('delay', 'phase', 'mag'): # 0-order filter is just a passthrough b, a = bessel(0, 1, analog=True, norm=norm) assert_array_equal(b, [1]) assert_array_equal(a, [1]) # 1-order filter is same for all types b, a = bessel(1, 1, analog=True, norm=norm) assert_allclose(b, [1], rtol=1e-15) assert_allclose(a, [1, 1], rtol=1e-15) z, p, k = bessel(1, 0.3, analog=True, output='zpk', norm=norm) assert_array_equal(z, []) assert_allclose(p, [-0.3], rtol=1e-14) assert_allclose(k, 0.3, rtol=1e-14) def test_high_order(self): # high even order, 'phase' z, p, k = bessel(24, 100, analog=True, output='zpk') z2 = [] p2 = [ -9.055312334014323e+01 + 4.844005815403969e+00j, -8.983105162681878e+01 + 1.454056170018573e+01j, -8.837357994162065e+01 + 2.426335240122282e+01j, -8.615278316179575e+01 + 3.403202098404543e+01j, -8.312326467067703e+01 + 4.386985940217900e+01j, -7.921695461084202e+01 + 5.380628489700191e+01j, -7.433392285433246e+01 + 6.388084216250878e+01j, -6.832565803501586e+01 + 7.415032695116071e+01j, -6.096221567378025e+01 + 8.470292433074425e+01j, -5.185914574820616e+01 + 9.569048385258847e+01j, -4.027853855197555e+01 + 1.074195196518679e+02j, -2.433481337524861e+01 + 1.207298683731973e+02j, ] k2 = 9.999999999999989e+47 assert_array_equal(z, z2) assert_allclose(sorted(p, key=np.imag), sorted(np.union1d(p2, np.conj(p2)), key=np.imag)) assert_allclose(k, k2, rtol=1e-14) # high odd order, 'phase' z, p, k = bessel(23, 1000, analog=True, output='zpk') z2 = [] p2 = [ -2.497697202208956e+02 + 1.202813187870698e+03j, -4.126986617510172e+02 + 1.065328794475509e+03j, -5.304922463809596e+02 + 9.439760364018479e+02j, -9.027564978975828e+02 + 1.010534334242318e+02j, -8.909283244406079e+02 + 2.023024699647598e+02j, -8.709469394347836e+02 + 3.039581994804637e+02j, -8.423805948131370e+02 + 4.062657947488952e+02j, -8.045561642249877e+02 + 5.095305912401127e+02j, -7.564660146766259e+02 + 6.141594859516342e+02j, -6.965966033906477e+02 + 7.207341374730186e+02j, -6.225903228776276e+02 + 8.301558302815096e+02j, -9.066732476324988e+02] k2 = 9.999999999999983e+68 assert_array_equal(z, z2) assert_allclose(sorted(p, key=np.imag), sorted(np.union1d(p2, np.conj(p2)), key=np.imag)) assert_allclose(k, k2, rtol=1e-14) # high even order, 'delay' (Orchard 1965 "The Roots of the # Maximally Flat-Delay Polynomials" Table 1) z, p, k = bessel(31, 1, analog=True, output='zpk', norm='delay') p2 = [-20.876706, -20.826543 + 1.735732j, -20.675502 + 3.473320j, -20.421895 + 5.214702j, -20.062802 + 6.961982j, -19.593895 + 8.717546j, -19.009148 + 10.484195j, -18.300400 + 12.265351j, -17.456663 + 14.065350j, -16.463032 + 15.889910j, -15.298849 + 17.746914j, -13.934466 + 19.647827j, -12.324914 + 21.610519j, -10.395893 + 23.665701j, - 8.005600 + 25.875019j, - 4.792045 + 28.406037j, ] assert_allclose(sorted(p, key=np.imag), sorted(np.union1d(p2, np.conj(p2)), key=np.imag)) # high odd order, 'delay' z, p, k = bessel(30, 1, analog=True, output='zpk', norm='delay') p2 = [-20.201029 + 0.867750j, -20.097257 + 2.604235j, -19.888485 + 4.343721j, -19.572188 + 6.088363j, -19.144380 + 7.840570j, -18.599342 + 9.603147j, -17.929195 + 11.379494j, -17.123228 + 13.173901j, -16.166808 + 14.992008j, -15.039580 + 16.841580j, -13.712245 + 18.733902j, -12.140295 + 20.686563j, -10.250119 + 22.729808j, - 7.901170 + 24.924391j, - 4.734679 + 27.435615j, ] assert_allclose(sorted(p, key=np.imag), sorted(np.union1d(p2, np.conj(p2)), key=np.imag)) def test_refs(self): # Compare to http://www.crbond.com/papers/bsf2.pdf # "Delay Normalized Bessel Polynomial Coefficients" bond_b = 10395 bond_a = [1, 21, 210, 1260, 4725, 10395, 10395] b, a = bessel(6, 1, norm='delay', analog=True) assert_allclose(bond_b, b) assert_allclose(bond_a, a) # "Delay Normalized Bessel Pole Locations" bond_poles = { 1: [-1.0000000000], 2: [-1.5000000000 + 0.8660254038j], 3: [-1.8389073227 + 1.7543809598j, -2.3221853546], 4: [-2.1037893972 + 2.6574180419j, -2.8962106028 + 0.8672341289j], 5: [-2.3246743032 + 3.5710229203j, -3.3519563992 + 1.7426614162j, -3.6467385953], 6: [-2.5159322478 + 4.4926729537j, -3.7357083563 + 2.6262723114j, -4.2483593959 + 0.8675096732j], 7: [-2.6856768789 + 5.4206941307j, -4.0701391636 + 3.5171740477j, -4.7582905282 + 1.7392860611j, -4.9717868585], 8: [-2.8389839489 + 6.3539112986j, -4.3682892172 + 4.4144425005j, -5.2048407906 + 2.6161751526j, -5.5878860433 + 0.8676144454j], 9: [-2.9792607982 + 7.2914636883j, -4.6384398872 + 5.3172716754j, -5.6044218195 + 3.4981569179j, -6.1293679043 + 1.7378483835j, -6.2970191817], 10: [-3.1089162336 + 8.2326994591j, -4.8862195669 + 6.2249854825j, -5.9675283286 + 4.3849471889j, -6.6152909655 + 2.6115679208j, -6.9220449054 + 0.8676651955j] } for N in range(1, 11): p1 = np.sort(bond_poles[N]) p2 = np.sort(np.concatenate(_cplxreal(besselap(N, 'delay')[1]))) assert_array_almost_equal(p1, p2, decimal=10) # "Frequency Normalized Bessel Pole Locations" bond_poles = { 1: [-1.0000000000], 2: [-1.1016013306 + 0.6360098248j], 3: [-1.0474091610 + 0.9992644363j, -1.3226757999], 4: [-0.9952087644 + 1.2571057395j, -1.3700678306 + 0.4102497175j], 5: [-0.9576765486 + 1.4711243207j, -1.3808773259 + 0.7179095876j, -1.5023162714], 6: [-0.9306565229 + 1.6618632689j, -1.3818580976 + 0.9714718907j, -1.5714904036 + 0.3208963742j], 7: [-0.9098677806 + 1.8364513530j, -1.3789032168 + 1.1915667778j, -1.6120387662 + 0.5892445069j, -1.6843681793], 8: [-0.8928697188 + 1.9983258436j, -1.3738412176 + 1.3883565759j, -1.6369394181 + 0.8227956251j, -1.7574084004 + 0.2728675751j], 9: [-0.8783992762 + 2.1498005243j, -1.3675883098 + 1.5677337122j, -1.6523964846 + 1.0313895670j, -1.8071705350 + 0.5123837306j, -1.8566005012], 10: [-0.8657569017 + 2.2926048310j, -1.3606922784 + 1.7335057427j, -1.6618102414 + 1.2211002186j, -1.8421962445 + 0.7272575978j, -1.9276196914 + 0.2416234710j] } for N in range(1, 11): p1 = np.sort(bond_poles[N]) p2 = np.sort(np.concatenate(_cplxreal(besselap(N, 'mag')[1]))) assert_array_almost_equal(p1, p2, decimal=10) # Compare to https://www.ranecommercial.com/legacy/note147.html # "Table 1 - Bessel Crossovers of Second, Third, and Fourth-Order" a = [1, 1, 1/3] b2, a2 = bessel(2, 1, norm='delay', analog=True) assert_allclose(a[::-1], a2/b2) a = [1, 1, 2/5, 1/15] b2, a2 = bessel(3, 1, norm='delay', analog=True) assert_allclose(a[::-1], a2/b2) a = [1, 1, 9/21, 2/21, 1/105] b2, a2 = bessel(4, 1, norm='delay', analog=True) assert_allclose(a[::-1], a2/b2) a = [1, np.sqrt(3), 1] b2, a2 = bessel(2, 1, norm='phase', analog=True) assert_allclose(a[::-1], a2/b2) # TODO: Why so inaccurate? Is reference flawed? a = [1, 2.481, 2.463, 1.018] b2, a2 = bessel(3, 1, norm='phase', analog=True) assert_array_almost_equal(a[::-1], a2/b2, decimal=1) # TODO: Why so inaccurate? Is reference flawed? a = [1, 3.240, 4.5, 3.240, 1.050] b2, a2 = bessel(4, 1, norm='phase', analog=True) assert_array_almost_equal(a[::-1], a2/b2, decimal=1) # Table of -3 dB factors: N, scale = 2, 1.272 scale2 = besselap(N, 'mag')[1] / besselap(N, 'phase')[1] assert_array_almost_equal(scale, scale2, decimal=3) # TODO: Why so inaccurate? Is reference flawed? N, scale = 3, 1.413 scale2 = besselap(N, 'mag')[1] / besselap(N, 'phase')[1] assert_array_almost_equal(scale, scale2, decimal=2) # TODO: Why so inaccurate? Is reference flawed? N, scale = 4, 1.533 scale2 = besselap(N, 'mag')[1] / besselap(N, 'phase')[1] assert_array_almost_equal(scale, scale2, decimal=1) def test_hardcoded(self): # Compare to values from original hardcoded implementation originals = { 0: [], 1: [-1], 2: [-.8660254037844386467637229 + .4999999999999999999999996j], 3: [-.9416000265332067855971980, -.7456403858480766441810907 + .7113666249728352680992154j], 4: [-.6572111716718829545787788 + .8301614350048733772399715j, -.9047587967882449459642624 + .2709187330038746636700926j], 5: [-.9264420773877602247196260, -.8515536193688395541722677 + .4427174639443327209850002j, -.5905759446119191779319432 + .9072067564574549539291747j], 6: [-.9093906830472271808050953 + .1856964396793046769246397j, -.7996541858328288520243325 + .5621717346937317988594118j, -.5385526816693109683073792 + .9616876881954277199245657j], 7: [-.9194871556490290014311619, -.8800029341523374639772340 + .3216652762307739398381830j, -.7527355434093214462291616 + .6504696305522550699212995j, -.4966917256672316755024763 + 1.002508508454420401230220j], 8: [-.9096831546652910216327629 + .1412437976671422927888150j, -.8473250802359334320103023 + .4259017538272934994996429j, -.7111381808485399250796172 + .7186517314108401705762571j, -.4621740412532122027072175 + 1.034388681126901058116589j], 9: [-.9154957797499037686769223, -.8911217017079759323183848 + .2526580934582164192308115j, -.8148021112269012975514135 + .5085815689631499483745341j, -.6743622686854761980403401 + .7730546212691183706919682j, -.4331415561553618854685942 + 1.060073670135929666774323j], 10: [-.9091347320900502436826431 + .1139583137335511169927714j, -.8688459641284764527921864 + .3430008233766309973110589j, -.7837694413101441082655890 + .5759147538499947070009852j, -.6417513866988316136190854 + .8175836167191017226233947j, -.4083220732868861566219785 + 1.081274842819124562037210j], 11: [-.9129067244518981934637318, -.8963656705721166099815744 + .2080480375071031919692341j, -.8453044014712962954184557 + .4178696917801248292797448j, -.7546938934722303128102142 + .6319150050721846494520941j, -.6126871554915194054182909 + .8547813893314764631518509j, -.3868149510055090879155425 + 1.099117466763120928733632j], 12: [-.9084478234140682638817772 + 95506365213450398415258360e-27j, -.8802534342016826507901575 + .2871779503524226723615457j, -.8217296939939077285792834 + .4810212115100676440620548j, -.7276681615395159454547013 + .6792961178764694160048987j, -.5866369321861477207528215 + .8863772751320727026622149j, -.3679640085526312839425808 + 1.114373575641546257595657j], 13: [-.9110914665984182781070663, -.8991314665475196220910718 + .1768342956161043620980863j, -.8625094198260548711573628 + .3547413731172988997754038j, -.7987460692470972510394686 + .5350752120696801938272504j, -.7026234675721275653944062 + .7199611890171304131266374j, -.5631559842430199266325818 + .9135900338325109684927731j, -.3512792323389821669401925 + 1.127591548317705678613239j], 14: [-.9077932138396487614720659 + 82196399419401501888968130e-27j, -.8869506674916445312089167 + .2470079178765333183201435j, -.8441199160909851197897667 + .4131653825102692595237260j, -.7766591387063623897344648 + .5819170677377608590492434j, -.6794256425119233117869491 + .7552857305042033418417492j, -.5418766775112297376541293 + .9373043683516919569183099j, -.3363868224902037330610040 + 1.139172297839859991370924j], 15: [-.9097482363849064167228581, -.9006981694176978324932918 + .1537681197278439351298882j, -.8731264620834984978337843 + .3082352470564267657715883j, -.8256631452587146506294553 + .4642348752734325631275134j, -.7556027168970728127850416 + .6229396358758267198938604j, -.6579196593110998676999362 + .7862895503722515897065645j, -.5224954069658330616875186 + .9581787261092526478889345j, -.3229963059766444287113517 + 1.149416154583629539665297j], 16: [-.9072099595087001356491337 + 72142113041117326028823950e-27j, -.8911723070323647674780132 + .2167089659900576449410059j, -.8584264231521330481755780 + .3621697271802065647661080j, -.8074790293236003885306146 + .5092933751171800179676218j, -.7356166304713115980927279 + .6591950877860393745845254j, -.6379502514039066715773828 + .8137453537108761895522580j, -.5047606444424766743309967 + .9767137477799090692947061j, -.3108782755645387813283867 + 1.158552841199330479412225j], 17: [-.9087141161336397432860029, -.9016273850787285964692844 + .1360267995173024591237303j, -.8801100704438627158492165 + .2725347156478803885651973j, -.8433414495836129204455491 + .4100759282910021624185986j, -.7897644147799708220288138 + .5493724405281088674296232j, -.7166893842372349049842743 + .6914936286393609433305754j, -.6193710717342144521602448 + .8382497252826992979368621j, -.4884629337672704194973683 + .9932971956316781632345466j, -.2998489459990082015466971 + 1.166761272925668786676672j], 18: [-.9067004324162775554189031 + 64279241063930693839360680e-27j, -.8939764278132455733032155 + .1930374640894758606940586j, -.8681095503628830078317207 + .3224204925163257604931634j, -.8281885016242836608829018 + .4529385697815916950149364j, -.7726285030739558780127746 + .5852778162086640620016316j, -.6987821445005273020051878 + .7204696509726630531663123j, -.6020482668090644386627299 + .8602708961893664447167418j, -.4734268069916151511140032 + 1.008234300314801077034158j, -.2897592029880489845789953 + 1.174183010600059128532230j], 19: [-.9078934217899404528985092, -.9021937639390660668922536 + .1219568381872026517578164j, -.8849290585034385274001112 + .2442590757549818229026280j, -.8555768765618421591093993 + .3672925896399872304734923j, -.8131725551578197705476160 + .4915365035562459055630005j, -.7561260971541629355231897 + .6176483917970178919174173j, -.6818424412912442033411634 + .7466272357947761283262338j, -.5858613321217832644813602 + .8801817131014566284786759j, -.4595043449730988600785456 + 1.021768776912671221830298j, -.2804866851439370027628724 + 1.180931628453291873626003j], 20: [-.9062570115576771146523497 + 57961780277849516990208850e-27j, -.8959150941925768608568248 + .1740317175918705058595844j, -.8749560316673332850673214 + .2905559296567908031706902j, -.8427907479956670633544106 + .4078917326291934082132821j, -.7984251191290606875799876 + .5264942388817132427317659j, -.7402780309646768991232610 + .6469975237605228320268752j, -.6658120544829934193890626 + .7703721701100763015154510j, -.5707026806915714094398061 + .8982829066468255593407161j, -.4465700698205149555701841 + 1.034097702560842962315411j, -.2719299580251652601727704 + 1.187099379810885886139638j], 21: [-.9072262653142957028884077, -.9025428073192696303995083 + .1105252572789856480992275j, -.8883808106664449854431605 + .2213069215084350419975358j, -.8643915813643204553970169 + .3326258512522187083009453j, -.8299435470674444100273463 + .4448177739407956609694059j, -.7840287980408341576100581 + .5583186348022854707564856j, -.7250839687106612822281339 + .6737426063024382240549898j, -.6506315378609463397807996 + .7920349342629491368548074j, -.5564766488918562465935297 + .9148198405846724121600860j, -.4345168906815271799687308 + 1.045382255856986531461592j, -.2640041595834031147954813 + 1.192762031948052470183960j], 22: [-.9058702269930872551848625 + 52774908289999045189007100e-27j, -.8972983138153530955952835 + .1584351912289865608659759j, -.8799661455640176154025352 + .2644363039201535049656450j, -.8534754036851687233084587 + .3710389319482319823405321j, -.8171682088462720394344996 + .4785619492202780899653575j, -.7700332930556816872932937 + .5874255426351153211965601j, -.7105305456418785989070935 + .6982266265924524000098548j, -.6362427683267827226840153 + .8118875040246347267248508j, -.5430983056306302779658129 + .9299947824439872998916657j, -.4232528745642628461715044 + 1.055755605227545931204656j, -.2566376987939318038016012 + 1.197982433555213008346532j], 23: [-.9066732476324988168207439, -.9027564979912504609412993 + .1010534335314045013252480j, -.8909283242471251458653994 + .2023024699381223418195228j, -.8709469395587416239596874 + .3039581993950041588888925j, -.8423805948021127057054288 + .4062657948237602726779246j, -.8045561642053176205623187 + .5095305912227258268309528j, -.7564660146829880581478138 + .6141594859476032127216463j, -.6965966033912705387505040 + .7207341374753046970247055j, -.6225903228771341778273152 + .8301558302812980678845563j, -.5304922463810191698502226 + .9439760364018300083750242j, -.4126986617510148836149955 + 1.065328794475513585531053j, -.2497697202208956030229911 + 1.202813187870697831365338j], 24: [-.9055312363372773709269407 + 48440066540478700874836350e-27j, -.8983105104397872954053307 + .1454056133873610120105857j, -.8837358034555706623131950 + .2426335234401383076544239j, -.8615278304016353651120610 + .3403202112618624773397257j, -.8312326466813240652679563 + .4386985933597305434577492j, -.7921695462343492518845446 + .5380628490968016700338001j, -.7433392285088529449175873 + .6388084216222567930378296j, -.6832565803536521302816011 + .7415032695091650806797753j, -.6096221567378335562589532 + .8470292433077202380020454j, -.5185914574820317343536707 + .9569048385259054576937721j, -.4027853855197518014786978 + 1.074195196518674765143729j, -.2433481337524869675825448 + 1.207298683731972524975429j], 25: [-.9062073871811708652496104, -.9028833390228020537142561 + 93077131185102967450643820e-27j, -.8928551459883548836774529 + .1863068969804300712287138j, -.8759497989677857803656239 + .2798521321771408719327250j, -.8518616886554019782346493 + .3738977875907595009446142j, -.8201226043936880253962552 + .4686668574656966589020580j, -.7800496278186497225905443 + .5644441210349710332887354j, -.7306549271849967721596735 + .6616149647357748681460822j, -.6704827128029559528610523 + .7607348858167839877987008j, -.5972898661335557242320528 + .8626676330388028512598538j, -.5073362861078468845461362 + .9689006305344868494672405j, -.3934529878191079606023847 + 1.082433927173831581956863j, -.2373280669322028974199184 + 1.211476658382565356579418j], } for N in originals: p1 = sorted(np.union1d(originals[N], np.conj(originals[N])), key=np.imag) p2 = sorted(besselap(N)[1], key=np.imag) assert_allclose(p1, p2, rtol=1e-14) def test_norm_phase(self): # Test some orders and frequencies and see that they have the right # phase at w0 for N in (1, 2, 3, 4, 5, 51, 72): for w0 in (1, 100): b, a = bessel(N, w0, analog=True, norm='phase') w = np.linspace(0, w0, 100) w, h = freqs(b, a, w) phase = np.unwrap(np.angle(h)) assert_allclose(phase[[0, -1]], (0, -N*pi/4), rtol=1e-1) def test_norm_mag(self): # Test some orders and frequencies and see that they have the right # mag at w0 for N in (1, 2, 3, 4, 5, 51, 72): for w0 in (1, 100): b, a = bessel(N, w0, analog=True, norm='mag') w = (0, w0) w, h = freqs(b, a, w) mag = abs(h) assert_allclose(mag, (1, 1/np.sqrt(2))) def test_norm_delay(self): # Test some orders and frequencies and see that they have the right # delay at DC for N in (1, 2, 3, 4, 5, 51, 72): for w0 in (1, 100): b, a = bessel(N, w0, analog=True, norm='delay') w = np.linspace(0, 10*w0, 1000) w, h = freqs(b, a, w) delay = -np.diff(np.unwrap(np.angle(h)))/np.diff(w) assert_allclose(delay[0], 1/w0, rtol=1e-4) def test_norm_factor(self): mpmath_values = { 1: 1, 2: 1.361654128716130520, 3: 1.755672368681210649, 4: 2.113917674904215843, 5: 2.427410702152628137, 6: 2.703395061202921876, 7: 2.951722147038722771, 8: 3.179617237510651330, 9: 3.391693138911660101, 10: 3.590980594569163482, 11: 3.779607416439620092, 12: 3.959150821144285315, 13: 4.130825499383535980, 14: 4.295593409533637564, 15: 4.454233021624377494, 16: 4.607385465472647917, 17: 4.755586548961147727, 18: 4.899289677284488007, 19: 5.038882681488207605, 20: 5.174700441742707423, 21: 5.307034531360917274, 22: 5.436140703250035999, 23: 5.562244783787878196, 24: 5.685547371295963521, 25: 5.806227623775418541, 50: 8.268963160013226298, 51: 8.352374541546012058, } for N in mpmath_values: z, p, k = besselap(N, 'delay') assert_allclose(mpmath_values[N], _norm_factor(p, k), rtol=1e-13) def test_bessel_poly(self): assert_array_equal(_bessel_poly(5), [945, 945, 420, 105, 15, 1]) assert_array_equal(_bessel_poly(4, True), [1, 10, 45, 105, 105]) def test_bessel_zeros(self): assert_array_equal(_bessel_zeros(0), []) def test_invalid(self): assert_raises(ValueError, besselap, 5, 'nonsense') assert_raises(ValueError, besselap, -5) assert_raises(ValueError, besselap, 3.2) assert_raises(ValueError, _bessel_poly, -3) assert_raises(ValueError, _bessel_poly, 3.3) def test_fs_param(self): for norm in ('phase', 'mag', 'delay'): for fs in (900, 900.1, 1234.567): for N in (0, 1, 2, 3, 10): for fc in (100, 100.1, 432.12345): for btype in ('lp', 'hp'): ba1 = bessel(N, fc, btype, norm=norm, fs=fs) ba2 = bessel(N, fc/(fs/2), btype, norm=norm) assert_allclose(ba1, ba2) for fc in ((100, 200), (100.1, 200.2), (321.123, 432.123)): for btype in ('bp', 'bs'): ba1 = bessel(N, fc, btype, norm=norm, fs=fs) for seq in (list, tuple, array): fcnorm = seq([f/(fs/2) for f in fc]) ba2 = bessel(N, fcnorm, btype, norm=norm) assert_allclose(ba1, ba2) class TestButter: def test_degenerate(self): # 0-order filter is just a passthrough b, a = butter(0, 1, analog=True) assert_array_equal(b, [1]) assert_array_equal(a, [1]) # 1-order filter is same for all types b, a = butter(1, 1, analog=True) assert_array_almost_equal(b, [1]) assert_array_almost_equal(a, [1, 1]) z, p, k = butter(1, 0.3, output='zpk') assert_array_equal(z, [-1]) assert_allclose(p, [3.249196962329063e-01], rtol=1e-14) assert_allclose(k, 3.375401518835469e-01, rtol=1e-14) def test_basic(self): # analog s-plane for N in range(25): wn = 0.01 z, p, k = butter(N, wn, 'low', analog=True, output='zpk') assert_array_almost_equal([], z) assert_(len(p) == N) # All poles should be at distance wn from origin assert_array_almost_equal(wn, abs(p)) assert_(all(np.real(p) <= 0)) # No poles in right half of S-plane assert_array_almost_equal(wn**N, k) # digital z-plane for N in range(25): wn = 0.01 z, p, k = butter(N, wn, 'high', analog=False, output='zpk') assert_array_equal(np.ones(N), z) # All zeros exactly at DC assert_(all(np.abs(p) <= 1)) # No poles outside unit circle b1, a1 = butter(2, 1, analog=True) assert_array_almost_equal(b1, [1]) assert_array_almost_equal(a1, [1, np.sqrt(2), 1]) b2, a2 = butter(5, 1, analog=True) assert_array_almost_equal(b2, [1]) assert_array_almost_equal(a2, [1, 3.2361, 5.2361, 5.2361, 3.2361, 1], decimal=4) b3, a3 = butter(10, 1, analog=True) assert_array_almost_equal(b3, [1]) assert_array_almost_equal(a3, [1, 6.3925, 20.4317, 42.8021, 64.8824, 74.2334, 64.8824, 42.8021, 20.4317, 6.3925, 1], decimal=4) b2, a2 = butter(19, 1.0441379169150726, analog=True) assert_array_almost_equal(b2, [2.2720], decimal=4) assert_array_almost_equal(a2, 1.0e+004 * np.array([ 0.0001, 0.0013, 0.0080, 0.0335, 0.1045, 0.2570, 0.5164, 0.8669, 1.2338, 1.5010, 1.5672, 1.4044, 1.0759, 0.6986, 0.3791, 0.1681, 0.0588, 0.0153, 0.0026, 0.0002]), decimal=0) b, a = butter(5, 0.4) assert_array_almost_equal(b, [0.0219, 0.1097, 0.2194, 0.2194, 0.1097, 0.0219], decimal=4) assert_array_almost_equal(a, [1.0000, -0.9853, 0.9738, -0.3864, 0.1112, -0.0113], decimal=4) def test_highpass(self): # highpass, high even order z, p, k = butter(28, 0.43, 'high', output='zpk') z2 = np.ones(28) p2 = [ 2.068257195514592e-01 + 9.238294351481734e-01j, 2.068257195514592e-01 - 9.238294351481734e-01j, 1.874933103892023e-01 + 8.269455076775277e-01j, 1.874933103892023e-01 - 8.269455076775277e-01j, 1.717435567330153e-01 + 7.383078571194629e-01j, 1.717435567330153e-01 - 7.383078571194629e-01j, 1.588266870755982e-01 + 6.564623730651094e-01j, 1.588266870755982e-01 - 6.564623730651094e-01j, 1.481881532502603e-01 + 5.802343458081779e-01j, 1.481881532502603e-01 - 5.802343458081779e-01j, 1.394122576319697e-01 + 5.086609000582009e-01j, 1.394122576319697e-01 - 5.086609000582009e-01j, 1.321840881809715e-01 + 4.409411734716436e-01j, 1.321840881809715e-01 - 4.409411734716436e-01j, 1.262633413354405e-01 + 3.763990035551881e-01j, 1.262633413354405e-01 - 3.763990035551881e-01j, 1.214660449478046e-01 + 3.144545234797277e-01j, 1.214660449478046e-01 - 3.144545234797277e-01j, 1.104868766650320e-01 + 2.771505404367791e-02j, 1.104868766650320e-01 - 2.771505404367791e-02j, 1.111768629525075e-01 + 8.331369153155753e-02j, 1.111768629525075e-01 - 8.331369153155753e-02j, 1.125740630842972e-01 + 1.394219509611784e-01j, 1.125740630842972e-01 - 1.394219509611784e-01j, 1.147138487992747e-01 + 1.963932363793666e-01j, 1.147138487992747e-01 - 1.963932363793666e-01j, 1.176516491045901e-01 + 2.546021573417188e-01j, 1.176516491045901e-01 - 2.546021573417188e-01j, ] k2 = 1.446671081817286e-06 assert_array_equal(z, z2) assert_allclose(sorted(p, key=np.imag), sorted(p2, key=np.imag), rtol=1e-7) assert_allclose(k, k2, rtol=1e-10) # highpass, high odd order z, p, k = butter(27, 0.56, 'high', output='zpk') z2 = np.ones(27) p2 = [ -1.772572785680147e-01 + 9.276431102995948e-01j, -1.772572785680147e-01 - 9.276431102995948e-01j, -1.600766565322114e-01 + 8.264026279893268e-01j, -1.600766565322114e-01 - 8.264026279893268e-01j, -1.461948419016121e-01 + 7.341841939120078e-01j, -1.461948419016121e-01 - 7.341841939120078e-01j, -1.348975284762046e-01 + 6.493235066053785e-01j, -1.348975284762046e-01 - 6.493235066053785e-01j, -1.256628210712206e-01 + 5.704921366889227e-01j, -1.256628210712206e-01 - 5.704921366889227e-01j, -1.181038235962314e-01 + 4.966120551231630e-01j, -1.181038235962314e-01 - 4.966120551231630e-01j, -1.119304913239356e-01 + 4.267938916403775e-01j, -1.119304913239356e-01 - 4.267938916403775e-01j, -1.069237739782691e-01 + 3.602914879527338e-01j, -1.069237739782691e-01 - 3.602914879527338e-01j, -1.029178030691416e-01 + 2.964677964142126e-01j, -1.029178030691416e-01 - 2.964677964142126e-01j, -9.978747500816100e-02 + 2.347687643085738e-01j, -9.978747500816100e-02 - 2.347687643085738e-01j, -9.743974496324025e-02 + 1.747028739092479e-01j, -9.743974496324025e-02 - 1.747028739092479e-01j, -9.580754551625957e-02 + 1.158246860771989e-01j, -9.580754551625957e-02 - 1.158246860771989e-01j, -9.484562207782568e-02 + 5.772118357151691e-02j, -9.484562207782568e-02 - 5.772118357151691e-02j, -9.452783117928215e-02 ] k2 = 9.585686688851069e-09 assert_array_equal(z, z2) assert_allclose(sorted(p, key=np.imag), sorted(p2, key=np.imag), rtol=1e-8) assert_allclose(k, k2) def test_bandpass(self): z, p, k = butter(8, [0.25, 0.33], 'band', output='zpk') z2 = [1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1, -1] p2 = [ 4.979909925436156e-01 + 8.367609424799387e-01j, 4.979909925436156e-01 - 8.367609424799387e-01j, 4.913338722555539e-01 + 7.866774509868817e-01j, 4.913338722555539e-01 - 7.866774509868817e-01j, 5.035229361778706e-01 + 7.401147376726750e-01j, 5.035229361778706e-01 - 7.401147376726750e-01j, 5.307617160406101e-01 + 7.029184459442954e-01j, 5.307617160406101e-01 - 7.029184459442954e-01j, 5.680556159453138e-01 + 6.788228792952775e-01j, 5.680556159453138e-01 - 6.788228792952775e-01j, 6.100962560818854e-01 + 6.693849403338664e-01j, 6.100962560818854e-01 - 6.693849403338664e-01j, 6.904694312740631e-01 + 6.930501690145245e-01j, 6.904694312740631e-01 - 6.930501690145245e-01j, 6.521767004237027e-01 + 6.744414640183752e-01j, 6.521767004237027e-01 - 6.744414640183752e-01j, ] k2 = 3.398854055800844e-08 assert_array_equal(z, z2) assert_allclose(sorted(p, key=np.imag), sorted(p2, key=np.imag), rtol=1e-13) assert_allclose(k, k2, rtol=1e-13) # bandpass analog z, p, k = butter(4, [90.5, 110.5], 'bp', analog=True, output='zpk') z2 = np.zeros(4) p2 = [ -4.179137760733086e+00 + 1.095935899082837e+02j, -4.179137760733086e+00 - 1.095935899082837e+02j, -9.593598668443835e+00 + 1.034745398029734e+02j, -9.593598668443835e+00 - 1.034745398029734e+02j, -8.883991981781929e+00 + 9.582087115567160e+01j, -8.883991981781929e+00 - 9.582087115567160e+01j, -3.474530886568715e+00 + 9.111599925805801e+01j, -3.474530886568715e+00 - 9.111599925805801e+01j, ] k2 = 1.600000000000001e+05 assert_array_equal(z, z2) assert_allclose(sorted(p, key=np.imag), sorted(p2, key=np.imag)) assert_allclose(k, k2, rtol=1e-15) def test_bandstop(self): z, p, k = butter(7, [0.45, 0.56], 'stop', output='zpk') z2 = [-1.594474531383421e-02 + 9.998728744679880e-01j, -1.594474531383421e-02 - 9.998728744679880e-01j, -1.594474531383421e-02 + 9.998728744679880e-01j, -1.594474531383421e-02 - 9.998728744679880e-01j, -1.594474531383421e-02 + 9.998728744679880e-01j, -1.594474531383421e-02 - 9.998728744679880e-01j, -1.594474531383421e-02 + 9.998728744679880e-01j, -1.594474531383421e-02 - 9.998728744679880e-01j, -1.594474531383421e-02 + 9.998728744679880e-01j, -1.594474531383421e-02 - 9.998728744679880e-01j, -1.594474531383421e-02 + 9.998728744679880e-01j, -1.594474531383421e-02 - 9.998728744679880e-01j, -1.594474531383421e-02 + 9.998728744679880e-01j, -1.594474531383421e-02 - 9.998728744679880e-01j] p2 = [-1.766850742887729e-01 + 9.466951258673900e-01j, -1.766850742887729e-01 - 9.466951258673900e-01j, 1.467897662432886e-01 + 9.515917126462422e-01j, 1.467897662432886e-01 - 9.515917126462422e-01j, -1.370083529426906e-01 + 8.880376681273993e-01j, -1.370083529426906e-01 - 8.880376681273993e-01j, 1.086774544701390e-01 + 8.915240810704319e-01j, 1.086774544701390e-01 - 8.915240810704319e-01j, -7.982704457700891e-02 + 8.506056315273435e-01j, -7.982704457700891e-02 - 8.506056315273435e-01j, 5.238812787110331e-02 + 8.524011102699969e-01j, 5.238812787110331e-02 - 8.524011102699969e-01j, -1.357545000491310e-02 + 8.382287744986582e-01j, -1.357545000491310e-02 - 8.382287744986582e-01j] k2 = 4.577122512960063e-01 assert_allclose(sorted(z, key=np.imag), sorted(z2, key=np.imag)) assert_allclose(sorted(p, key=np.imag), sorted(p2, key=np.imag)) assert_allclose(k, k2, rtol=1e-14) def test_ba_output(self): b, a = butter(4, [100, 300], 'bandpass', analog=True) b2 = [1.6e+09, 0, 0, 0, 0] a2 = [1.000000000000000e+00, 5.226251859505511e+02, 2.565685424949238e+05, 6.794127417357160e+07, 1.519411254969542e+10, 2.038238225207147e+12, 2.309116882454312e+14, 1.411088002066486e+16, 8.099999999999991e+17] assert_allclose(b, b2, rtol=1e-14) assert_allclose(a, a2, rtol=1e-14) def test_fs_param(self): for fs in (900, 900.1, 1234.567): for N in (0, 1, 2, 3, 10): for fc in (100, 100.1, 432.12345): for btype in ('lp', 'hp'): ba1 = butter(N, fc, btype, fs=fs) ba2 = butter(N, fc/(fs/2), btype) assert_allclose(ba1, ba2) for fc in ((100, 200), (100.1, 200.2), (321.123, 432.123)): for btype in ('bp', 'bs'): ba1 = butter(N, fc, btype, fs=fs) for seq in (list, tuple, array): fcnorm = seq([f/(fs/2) for f in fc]) ba2 = butter(N, fcnorm, btype) assert_allclose(ba1, ba2) class TestCheby1: def test_degenerate(self): # 0-order filter is just a passthrough # Even-order filters have DC gain of -rp dB b, a = cheby1(0, 10*np.log10(2), 1, analog=True) assert_array_almost_equal(b, [1/np.sqrt(2)]) assert_array_equal(a, [1]) # 1-order filter is same for all types b, a = cheby1(1, 10*np.log10(2), 1, analog=True) assert_array_almost_equal(b, [1]) assert_array_almost_equal(a, [1, 1]) z, p, k = cheby1(1, 0.1, 0.3, output='zpk') assert_array_equal(z, [-1]) assert_allclose(p, [-5.390126972799615e-01], rtol=1e-14) assert_allclose(k, 7.695063486399808e-01, rtol=1e-14) def test_basic(self): for N in range(25): wn = 0.01 z, p, k = cheby1(N, 1, wn, 'low', analog=True, output='zpk') assert_array_almost_equal([], z) assert_(len(p) == N) assert_(all(np.real(p) <= 0)) # No poles in right half of S-plane for N in range(25): wn = 0.01 z, p, k = cheby1(N, 1, wn, 'high', analog=False, output='zpk') assert_array_equal(np.ones(N), z) # All zeros exactly at DC assert_(all(np.abs(p) <= 1)) # No poles outside unit circle # Same test as TestNormalize b, a = cheby1(8, 0.5, 0.048) assert_array_almost_equal(b, [ 2.150733144728282e-11, 1.720586515782626e-10, 6.022052805239190e-10, 1.204410561047838e-09, 1.505513201309798e-09, 1.204410561047838e-09, 6.022052805239190e-10, 1.720586515782626e-10, 2.150733144728282e-11], decimal=14) assert_array_almost_equal(a, [ 1.000000000000000e+00, -7.782402035027959e+00, 2.654354569747454e+01, -5.182182531666387e+01, 6.334127355102684e+01, -4.963358186631157e+01, 2.434862182949389e+01, -6.836925348604676e+00, 8.412934944449140e-01], decimal=14) b, a = cheby1(4, 1, [0.4, 0.7], btype='band') assert_array_almost_equal(b, [0.0084, 0, -0.0335, 0, 0.0502, 0, -0.0335, 0, 0.0084], decimal=4) assert_array_almost_equal(a, [1.0, 1.1191, 2.862, 2.2986, 3.4137, 1.8653, 1.8982, 0.5676, 0.4103], decimal=4) b2, a2 = cheby1(5, 3, 1, analog=True) assert_array_almost_equal(b2, [0.0626], decimal=4) assert_array_almost_equal(a2, [1, 0.5745, 1.4150, 0.5489, 0.4080, 0.0626], decimal=4) b, a = cheby1(8, 0.5, 0.1) assert_array_almost_equal(b, 1.0e-006 * np.array([ 0.00703924326028, 0.05631394608227, 0.19709881128793, 0.39419762257586, 0.49274702821983, 0.39419762257586, 0.19709881128793, 0.05631394608227, 0.00703924326028]), decimal=13) assert_array_almost_equal(a, [ 1.00000000000000, -7.44912258934158, 24.46749067762108, -46.27560200466141, 55.11160187999928, -42.31640010161038, 20.45543300484147, -5.69110270561444, 0.69770374759022], decimal=13) b, a = cheby1(8, 0.5, 0.25) assert_array_almost_equal(b, 1.0e-003 * np.array([ 0.00895261138923, 0.07162089111382, 0.25067311889837, 0.50134623779673, 0.62668279724591, 0.50134623779673, 0.25067311889837, 0.07162089111382, 0.00895261138923]), decimal=13) assert_array_almost_equal(a, [1.00000000000000, -5.97529229188545, 16.58122329202101, -27.71423273542923, 30.39509758355313, -22.34729670426879, 10.74509800434910, -3.08924633697497, 0.40707685889802], decimal=13) def test_highpass(self): # high even order z, p, k = cheby1(24, 0.7, 0.2, 'high', output='zpk') z2 = np.ones(24) p2 = [-6.136558509657073e-01 + 2.700091504942893e-01j, -6.136558509657073e-01 - 2.700091504942893e-01j, -3.303348340927516e-01 + 6.659400861114254e-01j, -3.303348340927516e-01 - 6.659400861114254e-01j, 8.779713780557169e-03 + 8.223108447483040e-01j, 8.779713780557169e-03 - 8.223108447483040e-01j, 2.742361123006911e-01 + 8.356666951611864e-01j, 2.742361123006911e-01 - 8.356666951611864e-01j, 4.562984557158206e-01 + 7.954276912303594e-01j, 4.562984557158206e-01 - 7.954276912303594e-01j, 5.777335494123628e-01 + 7.435821817961783e-01j, 5.777335494123628e-01 - 7.435821817961783e-01j, 6.593260977749194e-01 + 6.955390907990932e-01j, 6.593260977749194e-01 - 6.955390907990932e-01j, 7.149590948466562e-01 + 6.559437858502012e-01j, 7.149590948466562e-01 - 6.559437858502012e-01j, 7.532432388188739e-01 + 6.256158042292060e-01j, 7.532432388188739e-01 - 6.256158042292060e-01j, 7.794365244268271e-01 + 6.042099234813333e-01j, 7.794365244268271e-01 - 6.042099234813333e-01j, 7.967253874772997e-01 + 5.911966597313203e-01j, 7.967253874772997e-01 - 5.911966597313203e-01j, 8.069756417293870e-01 + 5.862214589217275e-01j, 8.069756417293870e-01 - 5.862214589217275e-01j] k2 = 6.190427617192018e-04 assert_array_equal(z, z2) assert_allclose(sorted(p, key=np.imag), sorted(p2, key=np.imag), rtol=1e-10) assert_allclose(k, k2, rtol=1e-10) # high odd order z, p, k = cheby1(23, 0.8, 0.3, 'high', output='zpk') z2 = np.ones(23) p2 = [-7.676400532011010e-01, -6.754621070166477e-01 + 3.970502605619561e-01j, -6.754621070166477e-01 - 3.970502605619561e-01j, -4.528880018446727e-01 + 6.844061483786332e-01j, -4.528880018446727e-01 - 6.844061483786332e-01j, -1.986009130216447e-01 + 8.382285942941594e-01j, -1.986009130216447e-01 - 8.382285942941594e-01j, 2.504673931532608e-02 + 8.958137635794080e-01j, 2.504673931532608e-02 - 8.958137635794080e-01j, 2.001089429976469e-01 + 9.010678290791480e-01j, 2.001089429976469e-01 - 9.010678290791480e-01j, 3.302410157191755e-01 + 8.835444665962544e-01j, 3.302410157191755e-01 - 8.835444665962544e-01j, 4.246662537333661e-01 + 8.594054226449009e-01j, 4.246662537333661e-01 - 8.594054226449009e-01j, 4.919620928120296e-01 + 8.366772762965786e-01j, 4.919620928120296e-01 - 8.366772762965786e-01j, 5.385746917494749e-01 + 8.191616180796720e-01j, 5.385746917494749e-01 - 8.191616180796720e-01j, 5.855636993537203e-01 + 8.060680937701062e-01j, 5.855636993537203e-01 - 8.060680937701062e-01j, 5.688812849391721e-01 + 8.086497795114683e-01j, 5.688812849391721e-01 - 8.086497795114683e-01j] k2 = 1.941697029206324e-05 assert_array_equal(z, z2) assert_allclose(sorted(p, key=np.imag), sorted(p2, key=np.imag), rtol=1e-10) assert_allclose(k, k2, rtol=1e-10) z, p, k = cheby1(10, 1, 1000, 'high', analog=True, output='zpk') z2 = np.zeros(10) p2 = [-3.144743169501551e+03 + 3.511680029092744e+03j, -3.144743169501551e+03 - 3.511680029092744e+03j, -5.633065604514602e+02 + 2.023615191183945e+03j, -5.633065604514602e+02 - 2.023615191183945e+03j, -1.946412183352025e+02 + 1.372309454274755e+03j, -1.946412183352025e+02 - 1.372309454274755e+03j, -7.987162953085479e+01 + 1.105207708045358e+03j, -7.987162953085479e+01 - 1.105207708045358e+03j, -2.250315039031946e+01 + 1.001723931471477e+03j, -2.250315039031946e+01 - 1.001723931471477e+03j] k2 = 8.912509381337453e-01 assert_array_equal(z, z2) assert_allclose(sorted(p, key=np.imag), sorted(p2, key=np.imag), rtol=1e-13) assert_allclose(k, k2, rtol=1e-15) def test_bandpass(self): z, p, k = cheby1(8, 1, [0.3, 0.4], 'bp', output='zpk') z2 = [1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1, -1] p2 = [3.077784854851463e-01 + 9.453307017592942e-01j, 3.077784854851463e-01 - 9.453307017592942e-01j, 3.280567400654425e-01 + 9.272377218689016e-01j, 3.280567400654425e-01 - 9.272377218689016e-01j, 3.677912763284301e-01 + 9.038008865279966e-01j, 3.677912763284301e-01 - 9.038008865279966e-01j, 4.194425632520948e-01 + 8.769407159656157e-01j, 4.194425632520948e-01 - 8.769407159656157e-01j, 4.740921994669189e-01 + 8.496508528630974e-01j, 4.740921994669189e-01 - 8.496508528630974e-01j, 5.234866481897429e-01 + 8.259608422808477e-01j, 5.234866481897429e-01 - 8.259608422808477e-01j, 5.844717632289875e-01 + 8.052901363500210e-01j, 5.844717632289875e-01 - 8.052901363500210e-01j, 5.615189063336070e-01 + 8.100667803850766e-01j, 5.615189063336070e-01 - 8.100667803850766e-01j] k2 = 5.007028718074307e-09 assert_array_equal(z, z2) assert_allclose(sorted(p, key=np.imag), sorted(p2, key=np.imag), rtol=1e-13) assert_allclose(k, k2, rtol=1e-13) def test_bandstop(self): z, p, k = cheby1(7, 1, [0.5, 0.6], 'stop', output='zpk') z2 = [-1.583844403245361e-01 + 9.873775210440450e-01j, -1.583844403245361e-01 - 9.873775210440450e-01j, -1.583844403245361e-01 + 9.873775210440450e-01j, -1.583844403245361e-01 - 9.873775210440450e-01j, -1.583844403245361e-01 + 9.873775210440450e-01j, -1.583844403245361e-01 - 9.873775210440450e-01j, -1.583844403245361e-01 + 9.873775210440450e-01j, -1.583844403245361e-01 - 9.873775210440450e-01j, -1.583844403245361e-01 + 9.873775210440450e-01j, -1.583844403245361e-01 - 9.873775210440450e-01j, -1.583844403245361e-01 + 9.873775210440450e-01j, -1.583844403245361e-01 - 9.873775210440450e-01j, -1.583844403245361e-01 + 9.873775210440450e-01j, -1.583844403245361e-01 - 9.873775210440450e-01j] p2 = [-8.942974551472813e-02 + 3.482480481185926e-01j, -8.942974551472813e-02 - 3.482480481185926e-01j, 1.293775154041798e-01 + 8.753499858081858e-01j, 1.293775154041798e-01 - 8.753499858081858e-01j, 3.399741945062013e-02 + 9.690316022705607e-01j, 3.399741945062013e-02 - 9.690316022705607e-01j, 4.167225522796539e-04 + 9.927338161087488e-01j, 4.167225522796539e-04 - 9.927338161087488e-01j, -3.912966549550960e-01 + 8.046122859255742e-01j, -3.912966549550960e-01 - 8.046122859255742e-01j, -3.307805547127368e-01 + 9.133455018206508e-01j, -3.307805547127368e-01 - 9.133455018206508e-01j, -3.072658345097743e-01 + 9.443589759799366e-01j, -3.072658345097743e-01 - 9.443589759799366e-01j] k2 = 3.619438310405028e-01 assert_allclose(sorted(z, key=np.imag), sorted(z2, key=np.imag), rtol=1e-13) assert_allclose(sorted(p, key=np.imag), sorted(p2, key=np.imag), rtol=1e-13) assert_allclose(k, k2, rtol=0, atol=5e-16) def test_ba_output(self): # with transfer function conversion, without digital conversion b, a = cheby1(5, 0.9, [210, 310], 'stop', analog=True) b2 = [1.000000000000006e+00, 0, 3.255000000000020e+05, 0, 4.238010000000026e+10, 0, 2.758944510000017e+15, 0, 8.980364380050052e+19, 0, 1.169243442282517e+24 ] a2 = [1.000000000000000e+00, 4.630555945694342e+02, 4.039266454794788e+05, 1.338060988610237e+08, 5.844333551294591e+10, 1.357346371637638e+13, 3.804661141892782e+15, 5.670715850340080e+17, 1.114411200988328e+20, 8.316815934908471e+21, 1.169243442282517e+24 ] assert_allclose(b, b2, rtol=1e-14) assert_allclose(a, a2, rtol=1e-14) def test_fs_param(self): for fs in (900, 900.1, 1234.567): for N in (0, 1, 2, 3, 10): for fc in (100, 100.1, 432.12345): for btype in ('lp', 'hp'): ba1 = cheby1(N, 1, fc, btype, fs=fs) ba2 = cheby1(N, 1, fc/(fs/2), btype) assert_allclose(ba1, ba2) for fc in ((100, 200), (100.1, 200.2), (321.123, 432.123)): for btype in ('bp', 'bs'): ba1 = cheby1(N, 1, fc, btype, fs=fs) for seq in (list, tuple, array): fcnorm = seq([f/(fs/2) for f in fc]) ba2 = cheby1(N, 1, fcnorm, btype) assert_allclose(ba1, ba2) class TestCheby2: def test_degenerate(self): # 0-order filter is just a passthrough # Stopband ripple factor doesn't matter b, a = cheby2(0, 123.456, 1, analog=True) assert_array_equal(b, [1]) assert_array_equal(a, [1]) # 1-order filter is same for all types b, a = cheby2(1, 10*np.log10(2), 1, analog=True) assert_array_almost_equal(b, [1]) assert_array_almost_equal(a, [1, 1]) z, p, k = cheby2(1, 50, 0.3, output='zpk') assert_array_equal(z, [-1]) assert_allclose(p, [9.967826460175649e-01], rtol=1e-14) assert_allclose(k, 1.608676991217512e-03, rtol=1e-14) def test_basic(self): for N in range(25): wn = 0.01 z, p, k = cheby2(N, 40, wn, 'low', analog=True, output='zpk') assert_(len(p) == N) assert_(all(np.real(p) <= 0)) # No poles in right half of S-plane for N in range(25): wn = 0.01 z, p, k = cheby2(N, 40, wn, 'high', analog=False, output='zpk') assert_(all(np.abs(p) <= 1)) # No poles outside unit circle B, A = cheby2(18, 100, 0.5) assert_array_almost_equal(B, [ 0.00167583914216, 0.01249479541868, 0.05282702120282, 0.15939804265706, 0.37690207631117, 0.73227013789108, 1.20191856962356, 1.69522872823393, 2.07598674519837, 2.21972389625291, 2.07598674519838, 1.69522872823395, 1.20191856962359, 0.73227013789110, 0.37690207631118, 0.15939804265707, 0.05282702120282, 0.01249479541868, 0.00167583914216], decimal=13) assert_array_almost_equal(A, [ 1.00000000000000, -0.27631970006174, 3.19751214254060, -0.15685969461355, 4.13926117356269, 0.60689917820044, 2.95082770636540, 0.89016501910416, 1.32135245849798, 0.51502467236824, 0.38906643866660, 0.15367372690642, 0.07255803834919, 0.02422454070134, 0.00756108751837, 0.00179848550988, 0.00033713574499, 0.00004258794833, 0.00000281030149], decimal=13) def test_highpass(self): # high even order z, p, k = cheby2(26, 60, 0.3, 'high', output='zpk') z2 = [9.981088955489852e-01 + 6.147058341984388e-02j, 9.981088955489852e-01 - 6.147058341984388e-02j, 9.832702870387426e-01 + 1.821525257215483e-01j, 9.832702870387426e-01 - 1.821525257215483e-01j, 9.550760158089112e-01 + 2.963609353922882e-01j, 9.550760158089112e-01 - 2.963609353922882e-01j, 9.162054748821922e-01 + 4.007087817803773e-01j, 9.162054748821922e-01 - 4.007087817803773e-01j, 8.700619897368064e-01 + 4.929423232136168e-01j, 8.700619897368064e-01 - 4.929423232136168e-01j, 5.889791753434985e-01 + 8.081482110427953e-01j, 5.889791753434985e-01 - 8.081482110427953e-01j, 5.984900456570295e-01 + 8.011302423760501e-01j, 5.984900456570295e-01 - 8.011302423760501e-01j, 6.172880888914629e-01 + 7.867371958365343e-01j, 6.172880888914629e-01 - 7.867371958365343e-01j, 6.448899971038180e-01 + 7.642754030030161e-01j, 6.448899971038180e-01 - 7.642754030030161e-01j, 6.804845629637927e-01 + 7.327624168637228e-01j, 6.804845629637927e-01 - 7.327624168637228e-01j, 8.202619107108660e-01 + 5.719881098737678e-01j, 8.202619107108660e-01 - 5.719881098737678e-01j, 7.228410452536148e-01 + 6.910143437705678e-01j, 7.228410452536148e-01 - 6.910143437705678e-01j, 7.702121399578629e-01 + 6.377877856007792e-01j, 7.702121399578629e-01 - 6.377877856007792e-01j] p2 = [7.365546198286450e-01 + 4.842085129329526e-02j, 7.365546198286450e-01 - 4.842085129329526e-02j, 7.292038510962885e-01 + 1.442201672097581e-01j, 7.292038510962885e-01 - 1.442201672097581e-01j, 7.151293788040354e-01 + 2.369925800458584e-01j, 7.151293788040354e-01 - 2.369925800458584e-01j, 6.955051820787286e-01 + 3.250341363856910e-01j, 6.955051820787286e-01 - 3.250341363856910e-01j, 6.719122956045220e-01 + 4.070475750638047e-01j, 6.719122956045220e-01 - 4.070475750638047e-01j, 6.461722130611300e-01 + 4.821965916689270e-01j, 6.461722130611300e-01 - 4.821965916689270e-01j, 5.528045062872224e-01 + 8.162920513838372e-01j, 5.528045062872224e-01 - 8.162920513838372e-01j, 5.464847782492791e-01 + 7.869899955967304e-01j, 5.464847782492791e-01 - 7.869899955967304e-01j, 5.488033111260949e-01 + 7.520442354055579e-01j, 5.488033111260949e-01 - 7.520442354055579e-01j, 6.201874719022955e-01 + 5.500894392527353e-01j, 6.201874719022955e-01 - 5.500894392527353e-01j, 5.586478152536709e-01 + 7.112676877332921e-01j, 5.586478152536709e-01 - 7.112676877332921e-01j, 5.958145844148228e-01 + 6.107074340842115e-01j, 5.958145844148228e-01 - 6.107074340842115e-01j, 5.747812938519067e-01 + 6.643001536914696e-01j, 5.747812938519067e-01 - 6.643001536914696e-01j] k2 = 9.932997786497189e-02 assert_allclose(sorted(z, key=np.angle), sorted(z2, key=np.angle), rtol=1e-13) assert_allclose(sorted(p, key=np.angle), sorted(p2, key=np.angle), rtol=1e-12) assert_allclose(k, k2, rtol=1e-11) # high odd order z, p, k = cheby2(25, 80, 0.5, 'high', output='zpk') z2 = [9.690690376586687e-01 + 2.467897896011971e-01j, 9.690690376586687e-01 - 2.467897896011971e-01j, 9.999999999999492e-01, 8.835111277191199e-01 + 4.684101698261429e-01j, 8.835111277191199e-01 - 4.684101698261429e-01j, 7.613142857900539e-01 + 6.483830335935022e-01j, 7.613142857900539e-01 - 6.483830335935022e-01j, 6.232625173626231e-01 + 7.820126817709752e-01j, 6.232625173626231e-01 - 7.820126817709752e-01j, 4.864456563413621e-01 + 8.737108351316745e-01j, 4.864456563413621e-01 - 8.737108351316745e-01j, 3.618368136816749e-01 + 9.322414495530347e-01j, 3.618368136816749e-01 - 9.322414495530347e-01j, 2.549486883466794e-01 + 9.669545833752675e-01j, 2.549486883466794e-01 - 9.669545833752675e-01j, 1.676175432109457e-01 + 9.858520980390212e-01j, 1.676175432109457e-01 - 9.858520980390212e-01j, 1.975218468277521e-03 + 9.999980492540941e-01j, 1.975218468277521e-03 - 9.999980492540941e-01j, 1.786959496651858e-02 + 9.998403260399917e-01j, 1.786959496651858e-02 - 9.998403260399917e-01j, 9.967933660557139e-02 + 9.950196127985684e-01j, 9.967933660557139e-02 - 9.950196127985684e-01j, 5.013970951219547e-02 + 9.987422137518890e-01j, 5.013970951219547e-02 - 9.987422137518890e-01j] p2 = [4.218866331906864e-01, 4.120110200127552e-01 + 1.361290593621978e-01j, 4.120110200127552e-01 - 1.361290593621978e-01j, 3.835890113632530e-01 + 2.664910809911026e-01j, 3.835890113632530e-01 - 2.664910809911026e-01j, 3.399195570456499e-01 + 3.863983538639875e-01j, 3.399195570456499e-01 - 3.863983538639875e-01j, 2.855977834508353e-01 + 4.929444399540688e-01j, 2.855977834508353e-01 - 4.929444399540688e-01j, 2.255765441339322e-01 + 5.851631870205766e-01j, 2.255765441339322e-01 - 5.851631870205766e-01j, 1.644087535815792e-01 + 6.637356937277153e-01j, 1.644087535815792e-01 - 6.637356937277153e-01j, -7.293633845273095e-02 + 9.739218252516307e-01j, -7.293633845273095e-02 - 9.739218252516307e-01j, 1.058259206358626e-01 + 7.304739464862978e-01j, 1.058259206358626e-01 - 7.304739464862978e-01j, -5.703971947785402e-02 + 9.291057542169088e-01j, -5.703971947785402e-02 - 9.291057542169088e-01j, 5.263875132656864e-02 + 7.877974334424453e-01j, 5.263875132656864e-02 - 7.877974334424453e-01j, -3.007943405982616e-02 + 8.846331716180016e-01j, -3.007943405982616e-02 - 8.846331716180016e-01j, 6.857277464483946e-03 + 8.383275456264492e-01j, 6.857277464483946e-03 - 8.383275456264492e-01j] k2 = 6.507068761705037e-03 assert_allclose(sorted(z, key=np.angle), sorted(z2, key=np.angle), rtol=1e-13) assert_allclose(sorted(p, key=np.angle), sorted(p2, key=np.angle), rtol=1e-12) assert_allclose(k, k2, rtol=1e-11) def test_bandpass(self): z, p, k = cheby2(9, 40, [0.07, 0.2], 'pass', output='zpk') z2 = [-9.999999999999999e-01, 3.676588029658514e-01 + 9.299607543341383e-01j, 3.676588029658514e-01 - 9.299607543341383e-01j, 7.009689684982283e-01 + 7.131917730894889e-01j, 7.009689684982283e-01 - 7.131917730894889e-01j, 7.815697973765858e-01 + 6.238178033919218e-01j, 7.815697973765858e-01 - 6.238178033919218e-01j, 8.063793628819866e-01 + 5.913986160941200e-01j, 8.063793628819866e-01 - 5.913986160941200e-01j, 1.000000000000001e+00, 9.944493019920448e-01 + 1.052168511576739e-01j, 9.944493019920448e-01 - 1.052168511576739e-01j, 9.854674703367308e-01 + 1.698642543566085e-01j, 9.854674703367308e-01 - 1.698642543566085e-01j, 9.762751735919308e-01 + 2.165335665157851e-01j, 9.762751735919308e-01 - 2.165335665157851e-01j, 9.792277171575134e-01 + 2.027636011479496e-01j, 9.792277171575134e-01 - 2.027636011479496e-01j] p2 = [8.143803410489621e-01 + 5.411056063397541e-01j, 8.143803410489621e-01 - 5.411056063397541e-01j, 7.650769827887418e-01 + 5.195412242095543e-01j, 7.650769827887418e-01 - 5.195412242095543e-01j, 6.096241204063443e-01 + 3.568440484659796e-01j, 6.096241204063443e-01 - 3.568440484659796e-01j, 6.918192770246239e-01 + 4.770463577106911e-01j, 6.918192770246239e-01 - 4.770463577106911e-01j, 6.986241085779207e-01 + 1.146512226180060e-01j, 6.986241085779207e-01 - 1.146512226180060e-01j, 8.654645923909734e-01 + 1.604208797063147e-01j, 8.654645923909734e-01 - 1.604208797063147e-01j, 9.164831670444591e-01 + 1.969181049384918e-01j, 9.164831670444591e-01 - 1.969181049384918e-01j, 9.630425777594550e-01 + 2.317513360702271e-01j, 9.630425777594550e-01 - 2.317513360702271e-01j, 9.438104703725529e-01 + 2.193509900269860e-01j, 9.438104703725529e-01 - 2.193509900269860e-01j] k2 = 9.345352824659604e-03 assert_allclose(sorted(z, key=np.angle), sorted(z2, key=np.angle), rtol=1e-13) assert_allclose(sorted(p, key=np.angle), sorted(p2, key=np.angle), rtol=1e-13) assert_allclose(k, k2, rtol=1e-11) def test_bandstop(self): z, p, k = cheby2(6, 55, [0.1, 0.9], 'stop', output='zpk') z2 = [6.230544895101009e-01 + 7.821784343111114e-01j, 6.230544895101009e-01 - 7.821784343111114e-01j, 9.086608545660115e-01 + 4.175349702471991e-01j, 9.086608545660115e-01 - 4.175349702471991e-01j, 9.478129721465802e-01 + 3.188268649763867e-01j, 9.478129721465802e-01 - 3.188268649763867e-01j, -6.230544895100982e-01 + 7.821784343111109e-01j, -6.230544895100982e-01 - 7.821784343111109e-01j, -9.086608545660116e-01 + 4.175349702472088e-01j, -9.086608545660116e-01 - 4.175349702472088e-01j, -9.478129721465784e-01 + 3.188268649763897e-01j, -9.478129721465784e-01 - 3.188268649763897e-01j] p2 = [-9.464094036167638e-01 + 1.720048695084344e-01j, -9.464094036167638e-01 - 1.720048695084344e-01j, -8.715844103386737e-01 + 1.370665039509297e-01j, -8.715844103386737e-01 - 1.370665039509297e-01j, -8.078751204586425e-01 + 5.729329866682983e-02j, -8.078751204586425e-01 - 5.729329866682983e-02j, 9.464094036167665e-01 + 1.720048695084332e-01j, 9.464094036167665e-01 - 1.720048695084332e-01j, 8.078751204586447e-01 + 5.729329866683007e-02j, 8.078751204586447e-01 - 5.729329866683007e-02j, 8.715844103386721e-01 + 1.370665039509331e-01j, 8.715844103386721e-01 - 1.370665039509331e-01j] k2 = 2.917823332763358e-03 assert_allclose(sorted(z, key=np.angle), sorted(z2, key=np.angle), rtol=1e-13) assert_allclose(sorted(p, key=np.angle), sorted(p2, key=np.angle), rtol=1e-13) assert_allclose(k, k2, rtol=1e-11) def test_ba_output(self): # with transfer function conversion, without digital conversion b, a = cheby2(5, 20, [2010, 2100], 'stop', True) b2 = [1.000000000000000e+00, 0, # Matlab: 6.683253076978249e-12, 2.111512500000000e+07, 0, # Matlab: 1.134325604589552e-04, 1.782966433781250e+14, 0, # Matlab: 7.216787944356781e+02, 7.525901316990656e+20, 0, # Matlab: 2.039829265789886e+09, 1.587960565565748e+27, 0, # Matlab: 2.161236218626134e+15, 1.339913493808585e+33] a2 = [1.000000000000000e+00, 1.849550755473371e+02, 2.113222918998538e+07, 3.125114149732283e+09, 1.785133457155609e+14, 1.979158697776348e+16, 7.535048322653831e+20, 5.567966191263037e+22, 1.589246884221346e+27, 5.871210648525566e+28, 1.339913493808590e+33] assert_allclose(b, b2, rtol=1e-14) assert_allclose(a, a2, rtol=1e-14) def test_fs_param(self): for fs in (900, 900.1, 1234.567): for N in (0, 1, 2, 3, 10): for fc in (100, 100.1, 432.12345): for btype in ('lp', 'hp'): ba1 = cheby2(N, 20, fc, btype, fs=fs) ba2 = cheby2(N, 20, fc/(fs/2), btype) assert_allclose(ba1, ba2) for fc in ((100, 200), (100.1, 200.2), (321.123, 432.123)): for btype in ('bp', 'bs'): ba1 = cheby2(N, 20, fc, btype, fs=fs) for seq in (list, tuple, array): fcnorm = seq([f/(fs/2) for f in fc]) ba2 = cheby2(N, 20, fcnorm, btype) assert_allclose(ba1, ba2) class TestEllip: def test_degenerate(self): # 0-order filter is just a passthrough # Even-order filters have DC gain of -rp dB # Stopband ripple factor doesn't matter b, a = ellip(0, 10*np.log10(2), 123.456, 1, analog=True) assert_array_almost_equal(b, [1/np.sqrt(2)]) assert_array_equal(a, [1]) # 1-order filter is same for all types b, a = ellip(1, 10*np.log10(2), 1, 1, analog=True) assert_array_almost_equal(b, [1]) assert_array_almost_equal(a, [1, 1]) z, p, k = ellip(1, 1, 55, 0.3, output='zpk') assert_allclose(z, [-9.999999999999998e-01], rtol=1e-14) assert_allclose(p, [-6.660721153525525e-04], rtol=1e-10) assert_allclose(k, 5.003330360576763e-01, rtol=1e-14) def test_basic(self): for N in range(25): wn = 0.01 z, p, k = ellip(N, 1, 40, wn, 'low', analog=True, output='zpk') assert_(len(p) == N) assert_(all(np.real(p) <= 0)) # No poles in right half of S-plane for N in range(25): wn = 0.01 z, p, k = ellip(N, 1, 40, wn, 'high', analog=False, output='zpk') assert_(all(np.abs(p) <= 1)) # No poles outside unit circle b3, a3 = ellip(5, 3, 26, 1, analog=True) assert_array_almost_equal(b3, [0.1420, 0, 0.3764, 0, 0.2409], decimal=4) assert_array_almost_equal(a3, [1, 0.5686, 1.8061, 0.8017, 0.8012, 0.2409], decimal=4) b, a = ellip(3, 1, 60, [0.4, 0.7], 'stop') assert_array_almost_equal(b, [0.3310, 0.3469, 1.1042, 0.7044, 1.1042, 0.3469, 0.3310], decimal=4) assert_array_almost_equal(a, [1.0000, 0.6973, 1.1441, 0.5878, 0.7323, 0.1131, -0.0060], decimal=4) def test_highpass(self): # high even order z, p, k = ellip(24, 1, 80, 0.3, 'high', output='zpk') z2 = [9.761875332501075e-01 + 2.169283290099910e-01j, 9.761875332501075e-01 - 2.169283290099910e-01j, 8.413503353963494e-01 + 5.404901600661900e-01j, 8.413503353963494e-01 - 5.404901600661900e-01j, 7.160082576305009e-01 + 6.980918098681732e-01j, 7.160082576305009e-01 - 6.980918098681732e-01j, 6.456533638965329e-01 + 7.636306264739803e-01j, 6.456533638965329e-01 - 7.636306264739803e-01j, 6.127321820971366e-01 + 7.902906256703928e-01j, 6.127321820971366e-01 - 7.902906256703928e-01j, 5.983607817490196e-01 + 8.012267936512676e-01j, 5.983607817490196e-01 - 8.012267936512676e-01j, 5.922577552594799e-01 + 8.057485658286990e-01j, 5.922577552594799e-01 - 8.057485658286990e-01j, 5.896952092563588e-01 + 8.076258788449631e-01j, 5.896952092563588e-01 - 8.076258788449631e-01j, 5.886248765538837e-01 + 8.084063054565607e-01j, 5.886248765538837e-01 - 8.084063054565607e-01j, 5.881802711123132e-01 + 8.087298490066037e-01j, 5.881802711123132e-01 - 8.087298490066037e-01j, 5.879995719101164e-01 + 8.088612386766461e-01j, 5.879995719101164e-01 - 8.088612386766461e-01j, 5.879354086709576e-01 + 8.089078780868164e-01j, 5.879354086709576e-01 - 8.089078780868164e-01j] p2 = [-3.184805259081650e-01 + 4.206951906775851e-01j, -3.184805259081650e-01 - 4.206951906775851e-01j, 1.417279173459985e-01 + 7.903955262836452e-01j, 1.417279173459985e-01 - 7.903955262836452e-01j, 4.042881216964651e-01 + 8.309042239116594e-01j, 4.042881216964651e-01 - 8.309042239116594e-01j, 5.128964442789670e-01 + 8.229563236799665e-01j, 5.128964442789670e-01 - 8.229563236799665e-01j, 5.569614712822724e-01 + 8.155957702908510e-01j, 5.569614712822724e-01 - 8.155957702908510e-01j, 5.750478870161392e-01 + 8.118633973883931e-01j, 5.750478870161392e-01 - 8.118633973883931e-01j, 5.825314018170804e-01 + 8.101960910679270e-01j, 5.825314018170804e-01 - 8.101960910679270e-01j, 5.856397379751872e-01 + 8.094825218722543e-01j, 5.856397379751872e-01 - 8.094825218722543e-01j, 5.869326035251949e-01 + 8.091827531557583e-01j, 5.869326035251949e-01 - 8.091827531557583e-01j, 5.874697218855733e-01 + 8.090593298213502e-01j, 5.874697218855733e-01 - 8.090593298213502e-01j, 5.876904783532237e-01 + 8.090127161018823e-01j, 5.876904783532237e-01 - 8.090127161018823e-01j, 5.877753105317594e-01 + 8.090050577978136e-01j, 5.877753105317594e-01 - 8.090050577978136e-01j] k2 = 4.918081266957108e-02 assert_allclose(sorted(z, key=np.angle), sorted(z2, key=np.angle), rtol=1e-4) assert_allclose(sorted(p, key=np.angle), sorted(p2, key=np.angle), rtol=1e-4) assert_allclose(k, k2, rtol=1e-3) # high odd order z, p, k = ellip(23, 1, 70, 0.5, 'high', output='zpk') z2 = [9.999999999998661e-01, 6.603717261750994e-01 + 7.509388678638675e-01j, 6.603717261750994e-01 - 7.509388678638675e-01j, 2.788635267510325e-01 + 9.603307416968041e-01j, 2.788635267510325e-01 - 9.603307416968041e-01j, 1.070215532544218e-01 + 9.942567008268131e-01j, 1.070215532544218e-01 - 9.942567008268131e-01j, 4.049427369978163e-02 + 9.991797705105507e-01j, 4.049427369978163e-02 - 9.991797705105507e-01j, 1.531059368627931e-02 + 9.998827859909265e-01j, 1.531059368627931e-02 - 9.998827859909265e-01j, 5.808061438534933e-03 + 9.999831330689181e-01j, 5.808061438534933e-03 - 9.999831330689181e-01j, 2.224277847754599e-03 + 9.999975262909676e-01j, 2.224277847754599e-03 - 9.999975262909676e-01j, 8.731857107534554e-04 + 9.999996187732845e-01j, 8.731857107534554e-04 - 9.999996187732845e-01j, 3.649057346914968e-04 + 9.999999334218996e-01j, 3.649057346914968e-04 - 9.999999334218996e-01j, 1.765538109802615e-04 + 9.999999844143768e-01j, 1.765538109802615e-04 - 9.999999844143768e-01j, 1.143655290967426e-04 + 9.999999934602630e-01j, 1.143655290967426e-04 - 9.999999934602630e-01j] p2 = [-6.322017026545028e-01, -4.648423756662754e-01 + 5.852407464440732e-01j, -4.648423756662754e-01 - 5.852407464440732e-01j, -2.249233374627773e-01 + 8.577853017985717e-01j, -2.249233374627773e-01 - 8.577853017985717e-01j, -9.234137570557621e-02 + 9.506548198678851e-01j, -9.234137570557621e-02 - 9.506548198678851e-01j, -3.585663561241373e-02 + 9.821494736043981e-01j, -3.585663561241373e-02 - 9.821494736043981e-01j, -1.363917242312723e-02 + 9.933844128330656e-01j, -1.363917242312723e-02 - 9.933844128330656e-01j, -5.131505238923029e-03 + 9.975221173308673e-01j, -5.131505238923029e-03 - 9.975221173308673e-01j, -1.904937999259502e-03 + 9.990680819857982e-01j, -1.904937999259502e-03 - 9.990680819857982e-01j, -6.859439885466834e-04 + 9.996492201426826e-01j, -6.859439885466834e-04 - 9.996492201426826e-01j, -2.269936267937089e-04 + 9.998686250679161e-01j, -2.269936267937089e-04 - 9.998686250679161e-01j, -5.687071588789117e-05 + 9.999527573294513e-01j, -5.687071588789117e-05 - 9.999527573294513e-01j, -6.948417068525226e-07 + 9.999882737700173e-01j, -6.948417068525226e-07 - 9.999882737700173e-01j] k2 = 1.220910020289434e-02 assert_allclose(sorted(z, key=np.angle), sorted(z2, key=np.angle), rtol=1e-4) assert_allclose(sorted(p, key=np.angle), sorted(p2, key=np.angle), rtol=1e-4) assert_allclose(k, k2, rtol=1e-3) def test_bandpass(self): z, p, k = ellip(7, 1, 40, [0.07, 0.2], 'pass', output='zpk') z2 = [-9.999999999999991e-01, 6.856610961780020e-01 + 7.279209168501619e-01j, 6.856610961780020e-01 - 7.279209168501619e-01j, 7.850346167691289e-01 + 6.194518952058737e-01j, 7.850346167691289e-01 - 6.194518952058737e-01j, 7.999038743173071e-01 + 6.001281461922627e-01j, 7.999038743173071e-01 - 6.001281461922627e-01j, 9.999999999999999e-01, 9.862938983554124e-01 + 1.649980183725925e-01j, 9.862938983554124e-01 - 1.649980183725925e-01j, 9.788558330548762e-01 + 2.045513580850601e-01j, 9.788558330548762e-01 - 2.045513580850601e-01j, 9.771155231720003e-01 + 2.127093189691258e-01j, 9.771155231720003e-01 - 2.127093189691258e-01j] p2 = [8.063992755498643e-01 + 5.858071374778874e-01j, 8.063992755498643e-01 - 5.858071374778874e-01j, 8.050395347071724e-01 + 5.639097428109795e-01j, 8.050395347071724e-01 - 5.639097428109795e-01j, 8.113124936559144e-01 + 4.855241143973142e-01j, 8.113124936559144e-01 - 4.855241143973142e-01j, 8.665595314082394e-01 + 3.334049560919331e-01j, 8.665595314082394e-01 - 3.334049560919331e-01j, 9.412369011968871e-01 + 2.457616651325908e-01j, 9.412369011968871e-01 - 2.457616651325908e-01j, 9.679465190411238e-01 + 2.228772501848216e-01j, 9.679465190411238e-01 - 2.228772501848216e-01j, 9.747235066273385e-01 + 2.178937926146544e-01j, 9.747235066273385e-01 - 2.178937926146544e-01j] k2 = 8.354782670263239e-03 assert_allclose(sorted(z, key=np.angle), sorted(z2, key=np.angle), rtol=1e-4) assert_allclose(sorted(p, key=np.angle), sorted(p2, key=np.angle), rtol=1e-4) assert_allclose(k, k2, rtol=1e-3) z, p, k = ellip(5, 1, 75, [90.5, 110.5], 'pass', True, 'zpk') z2 = [-5.583607317695175e-14 + 1.433755965989225e+02j, -5.583607317695175e-14 - 1.433755965989225e+02j, 5.740106416459296e-14 + 1.261678754570291e+02j, 5.740106416459296e-14 - 1.261678754570291e+02j, -2.199676239638652e-14 + 6.974861996895196e+01j, -2.199676239638652e-14 - 6.974861996895196e+01j, -3.372595657044283e-14 + 7.926145989044531e+01j, -3.372595657044283e-14 - 7.926145989044531e+01j, 0] p2 = [-8.814960004852743e-01 + 1.104124501436066e+02j, -8.814960004852743e-01 - 1.104124501436066e+02j, -2.477372459140184e+00 + 1.065638954516534e+02j, -2.477372459140184e+00 - 1.065638954516534e+02j, -3.072156842945799e+00 + 9.995404870405324e+01j, -3.072156842945799e+00 - 9.995404870405324e+01j, -2.180456023925693e+00 + 9.379206865455268e+01j, -2.180456023925693e+00 - 9.379206865455268e+01j, -7.230484977485752e-01 + 9.056598800801140e+01j, -7.230484977485752e-01 - 9.056598800801140e+01j] k2 = 3.774571622827070e-02 assert_allclose(sorted(z, key=np.imag), sorted(z2, key=np.imag), rtol=1e-4) assert_allclose(sorted(p, key=np.imag), sorted(p2, key=np.imag), rtol=1e-6) assert_allclose(k, k2, rtol=1e-3) def test_bandstop(self): z, p, k = ellip(8, 1, 65, [0.2, 0.4], 'stop', output='zpk') z2 = [3.528578094286510e-01 + 9.356769561794296e-01j, 3.528578094286510e-01 - 9.356769561794296e-01j, 3.769716042264783e-01 + 9.262248159096587e-01j, 3.769716042264783e-01 - 9.262248159096587e-01j, 4.406101783111199e-01 + 8.976985411420985e-01j, 4.406101783111199e-01 - 8.976985411420985e-01j, 5.539386470258847e-01 + 8.325574907062760e-01j, 5.539386470258847e-01 - 8.325574907062760e-01j, 6.748464963023645e-01 + 7.379581332490555e-01j, 6.748464963023645e-01 - 7.379581332490555e-01j, 7.489887970285254e-01 + 6.625826604475596e-01j, 7.489887970285254e-01 - 6.625826604475596e-01j, 7.913118471618432e-01 + 6.114127579150699e-01j, 7.913118471618432e-01 - 6.114127579150699e-01j, 7.806804740916381e-01 + 6.249303940216475e-01j, 7.806804740916381e-01 - 6.249303940216475e-01j] p2 = [-1.025299146693730e-01 + 5.662682444754943e-01j, -1.025299146693730e-01 - 5.662682444754943e-01j, 1.698463595163031e-01 + 8.926678667070186e-01j, 1.698463595163031e-01 - 8.926678667070186e-01j, 2.750532687820631e-01 + 9.351020170094005e-01j, 2.750532687820631e-01 - 9.351020170094005e-01j, 3.070095178909486e-01 + 9.457373499553291e-01j, 3.070095178909486e-01 - 9.457373499553291e-01j, 7.695332312152288e-01 + 2.792567212705257e-01j, 7.695332312152288e-01 - 2.792567212705257e-01j, 8.083818999225620e-01 + 4.990723496863960e-01j, 8.083818999225620e-01 - 4.990723496863960e-01j, 8.066158014414928e-01 + 5.649811440393374e-01j, 8.066158014414928e-01 - 5.649811440393374e-01j, 8.062787978834571e-01 + 5.855780880424964e-01j, 8.062787978834571e-01 - 5.855780880424964e-01j] k2 = 2.068622545291259e-01 assert_allclose(sorted(z, key=np.angle), sorted(z2, key=np.angle), rtol=1e-6) assert_allclose(sorted(p, key=np.angle), sorted(p2, key=np.angle), rtol=1e-5) assert_allclose(k, k2, rtol=1e-5) def test_ba_output(self): # with transfer function conversion, without digital conversion b, a = ellip(5, 1, 40, [201, 240], 'stop', True) b2 = [ 1.000000000000000e+00, 0, # Matlab: 1.743506051190569e-13, 2.426561778314366e+05, 0, # Matlab: 3.459426536825722e-08, 2.348218683400168e+10, 0, # Matlab: 2.559179747299313e-03, 1.132780692872241e+15, 0, # Matlab: 8.363229375535731e+01, 2.724038554089566e+19, 0, # Matlab: 1.018700994113120e+06, 2.612380874940186e+23 ] a2 = [ 1.000000000000000e+00, 1.337266601804649e+02, 2.486725353510667e+05, 2.628059713728125e+07, 2.436169536928770e+10, 1.913554568577315e+12, 1.175208184614438e+15, 6.115751452473410e+16, 2.791577695211466e+19, 7.241811142725384e+20, 2.612380874940182e+23 ] assert_allclose(b, b2, rtol=1e-6) assert_allclose(a, a2, rtol=1e-4) def test_fs_param(self): for fs in (900, 900.1, 1234.567): for N in (0, 1, 2, 3, 10): for fc in (100, 100.1, 432.12345): for btype in ('lp', 'hp'): ba1 = ellip(N, 1, 20, fc, btype, fs=fs) ba2 = ellip(N, 1, 20, fc/(fs/2), btype) assert_allclose(ba1, ba2) for fc in ((100, 200), (100.1, 200.2), (321.123, 432.123)): for btype in ('bp', 'bs'): ba1 = ellip(N, 1, 20, fc, btype, fs=fs) for seq in (list, tuple, array): fcnorm = seq([f/(fs/2) for f in fc]) ba2 = ellip(N, 1, 20, fcnorm, btype) assert_allclose(ba1, ba2) def test_sos_consistency(): # Consistency checks of output='sos' for the specialized IIR filter # design functions. design_funcs = [(bessel, (0.1,)), (butter, (0.1,)), (cheby1, (45.0, 0.1)), (cheby2, (0.087, 0.1)), (ellip, (0.087, 45, 0.1))] for func, args in design_funcs: name = func.__name__ b, a = func(2, *args, output='ba') sos = func(2, *args, output='sos') assert_allclose(sos, [np.hstack((b, a))], err_msg="%s(2,...)" % name) zpk = func(3, *args, output='zpk') sos = func(3, *args, output='sos') assert_allclose(sos, zpk2sos(*zpk), err_msg="%s(3,...)" % name) zpk = func(4, *args, output='zpk') sos = func(4, *args, output='sos') assert_allclose(sos, zpk2sos(*zpk), err_msg="%s(4,...)" % name) class TestIIRNotch: def test_ba_output(self): # Compare coeficients with Matlab ones # for the equivalent input: b, a = iirnotch(0.06, 30) b2 = [ 9.9686824e-01, -1.9584219e+00, 9.9686824e-01 ] a2 = [ 1.0000000e+00, -1.9584219e+00, 9.9373647e-01 ] assert_allclose(b, b2, rtol=1e-8) assert_allclose(a, a2, rtol=1e-8) def test_frequency_response(self): # Get filter coeficients b, a = iirnotch(0.3, 30) # Get frequency response w, h = freqz(b, a, 1000) # Pick 5 point p = [200, # w0 = 0.200 295, # w0 = 0.295 300, # w0 = 0.300 305, # w0 = 0.305 400] # w0 = 0.400 # Get frequency response correspondent to each of those points hp = h[p] # Check if the frequency response fulfill the specifications: # hp[0] and hp[4] correspond to frequencies distant from # w0 = 0.3 and should be close to 1 assert_allclose(abs(hp[0]), 1, rtol=1e-2) assert_allclose(abs(hp[4]), 1, rtol=1e-2) # hp[1] and hp[3] correspond to frequencies approximately # on the edges of the passband and should be close to -3dB assert_allclose(abs(hp[1]), 1/np.sqrt(2), rtol=1e-2) assert_allclose(abs(hp[3]), 1/np.sqrt(2), rtol=1e-2) # hp[2] correspond to the frequency that should be removed # the frequency response should be very close to 0 assert_allclose(abs(hp[2]), 0, atol=1e-10) def test_errors(self): # Exception should be raised if w0 > 1 or w0 <0 assert_raises(ValueError, iirnotch, w0=2, Q=30) assert_raises(ValueError, iirnotch, w0=-1, Q=30) # Exception should be raised if any of the parameters # are not float (or cannot be converted to one) assert_raises(ValueError, iirnotch, w0="blabla", Q=30) assert_raises(TypeError, iirnotch, w0=-1, Q=[1, 2, 3]) def test_fs_param(self): # Get filter coeficients b, a = iirnotch(1500, 30, fs=10000) # Get frequency response w, h = freqz(b, a, 1000, fs=10000) # Pick 5 point p = [200, # w0 = 1000 295, # w0 = 1475 300, # w0 = 1500 305, # w0 = 1525 400] # w0 = 2000 # Get frequency response correspondent to each of those points hp = h[p] # Check if the frequency response fulfill the specifications: # hp[0] and hp[4] correspond to frequencies distant from # w0 = 1500 and should be close to 1 assert_allclose(abs(hp[0]), 1, rtol=1e-2) assert_allclose(abs(hp[4]), 1, rtol=1e-2) # hp[1] and hp[3] correspond to frequencies approximately # on the edges of the passband and should be close to -3dB assert_allclose(abs(hp[1]), 1/np.sqrt(2), rtol=1e-2) assert_allclose(abs(hp[3]), 1/np.sqrt(2), rtol=1e-2) # hp[2] correspond to the frequency that should be removed # the frequency response should be very close to 0 assert_allclose(abs(hp[2]), 0, atol=1e-10) class TestIIRPeak: def test_ba_output(self): # Compare coeficients with Matlab ones # for the equivalent input: b, a = iirpeak(0.06, 30) b2 = [ 3.131764229e-03, 0, -3.131764229e-03 ] a2 = [ 1.0000000e+00, -1.958421917e+00, 9.9373647e-01 ] assert_allclose(b, b2, rtol=1e-8) assert_allclose(a, a2, rtol=1e-8) def test_frequency_response(self): # Get filter coeficients b, a = iirpeak(0.3, 30) # Get frequency response w, h = freqz(b, a, 1000) # Pick 5 point p = [30, # w0 = 0.030 295, # w0 = 0.295 300, # w0 = 0.300 305, # w0 = 0.305 800] # w0 = 0.800 # Get frequency response correspondent to each of those points hp = h[p] # Check if the frequency response fulfill the specifications: # hp[0] and hp[4] correspond to frequencies distant from # w0 = 0.3 and should be close to 0 assert_allclose(abs(hp[0]), 0, atol=1e-2) assert_allclose(abs(hp[4]), 0, atol=1e-2) # hp[1] and hp[3] correspond to frequencies approximately # on the edges of the passband and should be close to 10**(-3/20) assert_allclose(abs(hp[1]), 1/np.sqrt(2), rtol=1e-2) assert_allclose(abs(hp[3]), 1/np.sqrt(2), rtol=1e-2) # hp[2] correspond to the frequency that should be retained and # the frequency response should be very close to 1 assert_allclose(abs(hp[2]), 1, rtol=1e-10) def test_errors(self): # Exception should be raised if w0 > 1 or w0 <0 assert_raises(ValueError, iirpeak, w0=2, Q=30) assert_raises(ValueError, iirpeak, w0=-1, Q=30) # Exception should be raised if any of the parameters # are not float (or cannot be converted to one) assert_raises(ValueError, iirpeak, w0="blabla", Q=30) assert_raises(TypeError, iirpeak, w0=-1, Q=[1, 2, 3]) def test_fs_param(self): # Get filter coeficients b, a = iirpeak(1200, 30, fs=8000) # Get frequency response w, h = freqz(b, a, 1000, fs=8000) # Pick 5 point p = [30, # w0 = 120 295, # w0 = 1180 300, # w0 = 1200 305, # w0 = 1220 800] # w0 = 3200 # Get frequency response correspondent to each of those points hp = h[p] # Check if the frequency response fulfill the specifications: # hp[0] and hp[4] correspond to frequencies distant from # w0 = 1200 and should be close to 0 assert_allclose(abs(hp[0]), 0, atol=1e-2) assert_allclose(abs(hp[4]), 0, atol=1e-2) # hp[1] and hp[3] correspond to frequencies approximately # on the edges of the passband and should be close to 10**(-3/20) assert_allclose(abs(hp[1]), 1/np.sqrt(2), rtol=1e-2) assert_allclose(abs(hp[3]), 1/np.sqrt(2), rtol=1e-2) # hp[2] correspond to the frequency that should be retained and # the frequency response should be very close to 1 assert_allclose(abs(hp[2]), 1, rtol=1e-10) class TestIIRComb: # Test erroneous input cases def test_invalid_input(self): # w0 is <= 0 or >= fs / 2 fs = 1000 for args in [(-fs, 30), (0, 35), (fs / 2, 40), (fs, 35)]: with pytest.raises(ValueError, match='w0 must be between '): iircomb(*args, fs=fs) # fs is not divisible by w0 for args in [(120, 30), (157, 35)]: with pytest.raises(ValueError, match='fs must be divisible '): iircomb(*args, fs=fs) # https://github.com/scipy/scipy/issues/14043#issuecomment-1107349140 # Previously, fs=44100, w0=49.999 was rejected, but fs=2, # w0=49.999/int(44100/2) was accepted. Now it is rejected, too. with pytest.raises(ValueError, match='fs must be divisible '): iircomb(w0=49.999/int(44100/2), Q=30) with pytest.raises(ValueError, match='fs must be divisible '): iircomb(w0=49.999, Q=30, fs=44100) # Filter type is not notch or peak for args in [(0.2, 30, 'natch'), (0.5, 35, 'comb')]: with pytest.raises(ValueError, match='ftype must be '): iircomb(*args) # Verify that the filter's frequency response contains a # notch at the cutoff frequency @pytest.mark.parametrize('ftype', ('notch', 'peak')) def test_frequency_response(self, ftype): # Create a notching or peaking comb filter at 1000 Hz b, a = iircomb(1000, 30, ftype=ftype, fs=10000) # Compute the frequency response freqs, response = freqz(b, a, 1000, fs=10000) # Find the notch using argrelextrema comb_points = argrelextrema(abs(response), np.less)[0] # Verify that the first notch sits at 1000 Hz comb1 = comb_points[0] assert_allclose(freqs[comb1], 1000) # Verify pass_zero parameter @pytest.mark.parametrize('ftype,pass_zero,peak,notch', [('peak', True, 123.45, 61.725), ('peak', False, 61.725, 123.45), ('peak', None, 61.725, 123.45), ('notch', None, 61.725, 123.45), ('notch', True, 123.45, 61.725), ('notch', False, 61.725, 123.45)]) def test_pass_zero(self, ftype, pass_zero, peak, notch): # Create a notching or peaking comb filter b, a = iircomb(123.45, 30, ftype=ftype, fs=1234.5, pass_zero=pass_zero) # Compute the frequency response freqs, response = freqz(b, a, [peak, notch], fs=1234.5) # Verify that expected notches are notches and peaks are peaks assert abs(response[0]) > 0.99 assert abs(response[1]) < 1e-10 # All built-in IIR filters are real, so should have perfectly # symmetrical poles and zeros. Then ba representation (using # numpy.poly) will be purely real instead of having negligible # imaginary parts. def test_iir_symmetry(self): b, a = iircomb(400, 30, fs=24000) z, p, k = tf2zpk(b, a) assert_array_equal(sorted(z), sorted(z.conj())) assert_array_equal(sorted(p), sorted(p.conj())) assert_equal(k, np.real(k)) assert issubclass(b.dtype.type, np.floating) assert issubclass(a.dtype.type, np.floating) # Verify filter coefficients with MATLAB's iircomb function def test_ba_output(self): b_notch, a_notch = iircomb(60, 35, ftype='notch', fs=600) b_notch2 = [0.957020174408697, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.957020174408697] a_notch2 = [1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.914040348817395] assert_allclose(b_notch, b_notch2) assert_allclose(a_notch, a_notch2) b_peak, a_peak = iircomb(60, 35, ftype='peak', fs=600) b_peak2 = [0.0429798255913026, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.0429798255913026] a_peak2 = [1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.914040348817395] assert_allclose(b_peak, b_peak2) assert_allclose(a_peak, a_peak2) # Verify that https://github.com/scipy/scipy/issues/14043 is fixed def test_nearest_divisor(self): # Create a notching comb filter b, a = iircomb(50/int(44100/2), 50.0, ftype='notch') # Compute the frequency response at an upper harmonic of 50 freqs, response = freqz(b, a, [22000], fs=44100) # Before bug fix, this would produce N = 881, so that 22 kHz was ~0 dB. # Now N = 882 correctly and 22 kHz should be a notch <-220 dB assert abs(response[0]) < 1e-10 class TestIIRDesign: def test_exceptions(self): with pytest.raises(ValueError, match="the same shape"): iirdesign(0.2, [0.1, 0.3], 1, 40) with pytest.raises(ValueError, match="the same shape"): iirdesign(np.array([[0.3, 0.6], [0.3, 0.6]]), np.array([[0.4, 0.5], [0.4, 0.5]]), 1, 40) # discrete filter with non-positive frequency with pytest.raises(ValueError, match="must be greater than 0"): iirdesign(0, 0.5, 1, 40) with pytest.raises(ValueError, match="must be greater than 0"): iirdesign(-0.1, 0.5, 1, 40) with pytest.raises(ValueError, match="must be greater than 0"): iirdesign(0.1, 0, 1, 40) with pytest.raises(ValueError, match="must be greater than 0"): iirdesign(0.1, -0.5, 1, 40) with pytest.raises(ValueError, match="must be greater than 0"): iirdesign([0, 0.3], [0.1, 0.5], 1, 40) with pytest.raises(ValueError, match="must be greater than 0"): iirdesign([-0.1, 0.3], [0.1, 0.5], 1, 40) with pytest.raises(ValueError, match="must be greater than 0"): iirdesign([0.1, 0], [0.1, 0.5], 1, 40) with pytest.raises(ValueError, match="must be greater than 0"): iirdesign([0.1, -0.3], [0.1, 0.5], 1, 40) with pytest.raises(ValueError, match="must be greater than 0"): iirdesign([0.1, 0.3], [0, 0.5], 1, 40) with pytest.raises(ValueError, match="must be greater than 0"): iirdesign([0.1, 0.3], [-0.1, 0.5], 1, 40) with pytest.raises(ValueError, match="must be greater than 0"): iirdesign([0.1, 0.3], [0.1, 0], 1, 40) with pytest.raises(ValueError, match="must be greater than 0"): iirdesign([0.1, 0.3], [0.1, -0.5], 1, 40) # analog filter with negative frequency with pytest.raises(ValueError, match="must be greater than 0"): iirdesign(-0.1, 0.5, 1, 40, analog=True) with pytest.raises(ValueError, match="must be greater than 0"): iirdesign(0.1, -0.5, 1, 40, analog=True) with pytest.raises(ValueError, match="must be greater than 0"): iirdesign([-0.1, 0.3], [0.1, 0.5], 1, 40, analog=True) with pytest.raises(ValueError, match="must be greater than 0"): iirdesign([0.1, -0.3], [0.1, 0.5], 1, 40, analog=True) with pytest.raises(ValueError, match="must be greater than 0"): iirdesign([0.1, 0.3], [-0.1, 0.5], 1, 40, analog=True) with pytest.raises(ValueError, match="must be greater than 0"): iirdesign([0.1, 0.3], [0.1, -0.5], 1, 40, analog=True) # discrete filter with fs=None, freq > 1 with pytest.raises(ValueError, match="must be less than 1"): iirdesign(1, 0.5, 1, 40) with pytest.raises(ValueError, match="must be less than 1"): iirdesign(1.1, 0.5, 1, 40) with pytest.raises(ValueError, match="must be less than 1"): iirdesign(0.1, 1, 1, 40) with pytest.raises(ValueError, match="must be less than 1"): iirdesign(0.1, 1.5, 1, 40) with pytest.raises(ValueError, match="must be less than 1"): iirdesign([1, 0.3], [0.1, 0.5], 1, 40) with pytest.raises(ValueError, match="must be less than 1"): iirdesign([1.1, 0.3], [0.1, 0.5], 1, 40) with pytest.raises(ValueError, match="must be less than 1"): iirdesign([0.1, 1], [0.1, 0.5], 1, 40) with pytest.raises(ValueError, match="must be less than 1"): iirdesign([0.1, 1.1], [0.1, 0.5], 1, 40) with pytest.raises(ValueError, match="must be less than 1"): iirdesign([0.1, 0.3], [1, 0.5], 1, 40) with pytest.raises(ValueError, match="must be less than 1"): iirdesign([0.1, 0.3], [1.1, 0.5], 1, 40) with pytest.raises(ValueError, match="must be less than 1"): iirdesign([0.1, 0.3], [0.1, 1], 1, 40) with pytest.raises(ValueError, match="must be less than 1"): iirdesign([0.1, 0.3], [0.1, 1.5], 1, 40) # discrete filter with fs>2, wp, ws < fs/2 must pass iirdesign(100, 500, 1, 40, fs=2000) iirdesign(500, 100, 1, 40, fs=2000) iirdesign([200, 400], [100, 500], 1, 40, fs=2000) iirdesign([100, 500], [200, 400], 1, 40, fs=2000) # discrete filter with fs>2, freq > fs/2: this must raise with pytest.raises(ValueError, match="must be less than fs/2"): iirdesign(1000, 400, 1, 40, fs=2000) with pytest.raises(ValueError, match="must be less than fs/2"): iirdesign(1100, 500, 1, 40, fs=2000) with pytest.raises(ValueError, match="must be less than fs/2"): iirdesign(100, 1000, 1, 40, fs=2000) with pytest.raises(ValueError, match="must be less than fs/2"): iirdesign(100, 1100, 1, 40, fs=2000) with pytest.raises(ValueError, match="must be less than fs/2"): iirdesign([1000, 400], [100, 500], 1, 40, fs=2000) with pytest.raises(ValueError, match="must be less than fs/2"): iirdesign([1100, 400], [100, 500], 1, 40, fs=2000) with pytest.raises(ValueError, match="must be less than fs/2"): iirdesign([200, 1000], [100, 500], 1, 40, fs=2000) with pytest.raises(ValueError, match="must be less than fs/2"): iirdesign([200, 1100], [100, 500], 1, 40, fs=2000) with pytest.raises(ValueError, match="must be less than fs/2"): iirdesign([200, 400], [1000, 500], 1, 40, fs=2000) with pytest.raises(ValueError, match="must be less than fs/2"): iirdesign([200, 400], [1100, 500], 1, 40, fs=2000) with pytest.raises(ValueError, match="must be less than fs/2"): iirdesign([200, 400], [100, 1000], 1, 40, fs=2000) with pytest.raises(ValueError, match="must be less than fs/2"): iirdesign([200, 400], [100, 1100], 1, 40, fs=2000) with pytest.raises(ValueError, match="strictly inside stopband"): iirdesign([0.1, 0.4], [0.5, 0.6], 1, 40) with pytest.raises(ValueError, match="strictly inside stopband"): iirdesign([0.5, 0.6], [0.1, 0.4], 1, 40) with pytest.raises(ValueError, match="strictly inside stopband"): iirdesign([0.3, 0.6], [0.4, 0.7], 1, 40) with pytest.raises(ValueError, match="strictly inside stopband"): iirdesign([0.4, 0.7], [0.3, 0.6], 1, 40) class TestIIRFilter: def test_symmetry(self): # All built-in IIR filters are real, so should have perfectly # symmetrical poles and zeros. Then ba representation (using # numpy.poly) will be purely real instead of having negligible # imaginary parts. for N in np.arange(1, 26): for ftype in ('butter', 'bessel', 'cheby1', 'cheby2', 'ellip'): z, p, k = iirfilter(N, 1.1, 1, 20, 'low', analog=True, ftype=ftype, output='zpk') assert_array_equal(sorted(z), sorted(z.conj())) assert_array_equal(sorted(p), sorted(p.conj())) assert_equal(k, np.real(k)) b, a = iirfilter(N, 1.1, 1, 20, 'low', analog=True, ftype=ftype, output='ba') assert_(issubclass(b.dtype.type, np.floating)) assert_(issubclass(a.dtype.type, np.floating)) def test_int_inputs(self): # Using integer frequency arguments and large N should not produce # numpy integers that wraparound to negative numbers k = iirfilter(24, 100, btype='low', analog=True, ftype='bessel', output='zpk')[2] k2 = 9.999999999999989e+47 assert_allclose(k, k2) def test_invalid_wn_size(self): # low and high have 1 Wn, band and stop have 2 Wn assert_raises(ValueError, iirfilter, 1, [0.1, 0.9], btype='low') assert_raises(ValueError, iirfilter, 1, [0.2, 0.5], btype='high') assert_raises(ValueError, iirfilter, 1, 0.2, btype='bp') assert_raises(ValueError, iirfilter, 1, 400, btype='bs', analog=True) def test_invalid_wn_range(self): # For digital filters, 0 <= Wn <= 1 assert_raises(ValueError, iirfilter, 1, 2, btype='low') assert_raises(ValueError, iirfilter, 1, [0.5, 1], btype='band') assert_raises(ValueError, iirfilter, 1, [0., 0.5], btype='band') assert_raises(ValueError, iirfilter, 1, -1, btype='high') assert_raises(ValueError, iirfilter, 1, [1, 2], btype='band') assert_raises(ValueError, iirfilter, 1, [10, 20], btype='stop') # analog=True with non-positive critical frequencies with pytest.raises(ValueError, match="must be greater than 0"): iirfilter(2, 0, btype='low', analog=True) with pytest.raises(ValueError, match="must be greater than 0"): iirfilter(2, -1, btype='low', analog=True) with pytest.raises(ValueError, match="must be greater than 0"): iirfilter(2, [0, 100], analog=True) with pytest.raises(ValueError, match="must be greater than 0"): iirfilter(2, [-1, 100], analog=True) with pytest.raises(ValueError, match="must be greater than 0"): iirfilter(2, [10, 0], analog=True) with pytest.raises(ValueError, match="must be greater than 0"): iirfilter(2, [10, -1], analog=True) def test_analog_sos(self): # first order Butterworth filter with Wn = 1 has tf 1/(s+1) sos = [[0., 0., 1., 0., 1., 1.]] sos2 = iirfilter(N=1, Wn=1, btype='low', analog=True, output='sos') assert_array_almost_equal(sos, sos2) def test_wn1_ge_wn0(self): # gh-15773: should raise error if Wn[0] >= Wn[1] with pytest.raises(ValueError, match=r"Wn\[0\] must be less than Wn\[1\]"): iirfilter(2, [0.5, 0.5]) with pytest.raises(ValueError, match=r"Wn\[0\] must be less than Wn\[1\]"): iirfilter(2, [0.6, 0.5]) class TestGroupDelay: def test_identity_filter(self): w, gd = group_delay((1, 1)) assert_array_almost_equal(w, pi * np.arange(512) / 512) assert_array_almost_equal(gd, np.zeros(512)) w, gd = group_delay((1, 1), whole=True) assert_array_almost_equal(w, 2 * pi * np.arange(512) / 512) assert_array_almost_equal(gd, np.zeros(512)) def test_fir(self): # Let's design linear phase FIR and check that the group delay # is constant. N = 100 b = firwin(N + 1, 0.1) w, gd = group_delay((b, 1)) assert_allclose(gd, 0.5 * N) def test_iir(self): # Let's design Butterworth filter and test the group delay at # some points against MATLAB answer. b, a = butter(4, 0.1) w = np.linspace(0, pi, num=10, endpoint=False) w, gd = group_delay((b, a), w=w) matlab_gd = np.array([8.249313898506037, 11.958947880907104, 2.452325615326005, 1.048918665702008, 0.611382575635897, 0.418293269460578, 0.317932917836572, 0.261371844762525, 0.229038045801298, 0.212185774208521]) assert_array_almost_equal(gd, matlab_gd) def test_singular(self): # Let's create a filter with zeros and poles on the unit circle and # check if warnings are raised at those frequencies. z1 = np.exp(1j * 0.1 * pi) z2 = np.exp(1j * 0.25 * pi) p1 = np.exp(1j * 0.5 * pi) p2 = np.exp(1j * 0.8 * pi) b = np.convolve([1, -z1], [1, -z2]) a = np.convolve([1, -p1], [1, -p2]) w = np.array([0.1 * pi, 0.25 * pi, -0.5 * pi, -0.8 * pi]) w, gd = assert_warns(UserWarning, group_delay, (b, a), w=w) def test_backward_compat(self): # For backward compatibility, test if None act as a wrapper for default w1, gd1 = group_delay((1, 1)) w2, gd2 = group_delay((1, 1), None) assert_array_almost_equal(w1, w2) assert_array_almost_equal(gd1, gd2) def test_fs_param(self): # Let's design Butterworth filter and test the group delay at # some points against the normalized frequency answer. b, a = butter(4, 4800, fs=96000) w = np.linspace(0, 96000/2, num=10, endpoint=False) w, gd = group_delay((b, a), w=w, fs=96000) norm_gd = np.array([8.249313898506037, 11.958947880907104, 2.452325615326005, 1.048918665702008, 0.611382575635897, 0.418293269460578, 0.317932917836572, 0.261371844762525, 0.229038045801298, 0.212185774208521]) assert_array_almost_equal(gd, norm_gd) def test_w_or_N_types(self): # Measure at 8 equally-spaced points for N in (8, np.int8(8), np.int16(8), np.int32(8), np.int64(8), np.array(8)): w, gd = group_delay((1, 1), N) assert_array_almost_equal(w, pi * np.arange(8) / 8) assert_array_almost_equal(gd, np.zeros(8)) # Measure at frequency 8 rad/sec for w in (8.0, 8.0+0j): w_out, gd = group_delay((1, 1), w) assert_array_almost_equal(w_out, [8]) assert_array_almost_equal(gd, [0]) class TestGammatone: # Test erroneus input cases. def test_invalid_input(self): # Cutoff frequency is <= 0 or >= fs / 2. fs = 16000 for args in [(-fs, 'iir'), (0, 'fir'), (fs / 2, 'iir'), (fs, 'fir')]: with pytest.raises(ValueError, match='The frequency must be ' 'between '): gammatone(*args, fs=fs) # Filter type is not fir or iir for args in [(440, 'fie'), (220, 'it')]: with pytest.raises(ValueError, match='ftype must be '): gammatone(*args, fs=fs) # Order is <= 0 or > 24 for FIR filter. for args in [(440, 'fir', -50), (220, 'fir', 0), (110, 'fir', 25), (55, 'fir', 50)]: with pytest.raises(ValueError, match='Invalid order: '): gammatone(*args, numtaps=None, fs=fs) # Verify that the filter's frequency response is approximately # 1 at the cutoff frequency. def test_frequency_response(self): fs = 16000 ftypes = ['fir', 'iir'] for ftype in ftypes: # Create a gammatone filter centered at 1000 Hz. b, a = gammatone(1000, ftype, fs=fs) # Calculate the frequency response. freqs, response = freqz(b, a) # Determine peak magnitude of the response # and corresponding frequency. response_max = np.max(np.abs(response)) freq_hz = freqs[np.argmax(np.abs(response))] / ((2 * np.pi) / fs) # Check that the peak magnitude is 1 and the frequency is 1000 Hz. response_max == pytest.approx(1, rel=1e-2) freq_hz == pytest.approx(1000, rel=1e-2) # All built-in IIR filters are real, so should have perfectly # symmetrical poles and zeros. Then ba representation (using # numpy.poly) will be purely real instead of having negligible # imaginary parts. def test_iir_symmetry(self): b, a = gammatone(440, 'iir', fs=24000) z, p, k = tf2zpk(b, a) assert_array_equal(sorted(z), sorted(z.conj())) assert_array_equal(sorted(p), sorted(p.conj())) assert_equal(k, np.real(k)) assert_(issubclass(b.dtype.type, np.floating)) assert_(issubclass(a.dtype.type, np.floating)) # Verify FIR filter coefficients with the paper's # Mathematica implementation def test_fir_ba_output(self): b, _ = gammatone(15, 'fir', fs=1000) b2 = [0.0, 2.2608075649884e-04, 1.5077903981357e-03, 4.2033687753998e-03, 8.1508962726503e-03, 1.2890059089154e-02, 1.7833890391666e-02, 2.2392613558564e-02, 2.6055195863104e-02, 2.8435872863284e-02, 2.9293319149544e-02, 2.852976858014e-02, 2.6176557156294e-02, 2.2371510270395e-02, 1.7332485267759e-02] assert_allclose(b, b2) # Verify IIR filter coefficients with the paper's MATLAB implementation def test_iir_ba_output(self): b, a = gammatone(440, 'iir', fs=16000) b2 = [1.31494461367464e-06, -5.03391196645395e-06, 7.00649426000897e-06, -4.18951968419854e-06, 9.02614910412011e-07] a2 = [1.0, -7.65646235454218, 25.7584699322366, -49.7319214483238, 60.2667361289181, -46.9399590980486, 22.9474798808461, -6.43799381299034, 0.793651554625368] assert_allclose(b, b2) assert_allclose(a, a2) class TestOrderFilter: def test_doc_example(self): x = np.arange(25).reshape(5, 5) domain = np.identity(3) # minimum of elements 1,3,9 (zero-padded) on phone pad # 7,5,3 on numpad expected = np.array( [[0., 0., 0., 0., 0.], [0., 0., 1., 2., 0.], [0., 5., 6., 7., 0.], [0., 10., 11., 12., 0.], [0., 0., 0., 0., 0.]], ) assert_allclose(order_filter(x, domain, 0), expected) # maximum of elements 1,3,9 (zero-padded) on phone pad # 7,5,3 on numpad expected = np.array( [[6., 7., 8., 9., 4.], [11., 12., 13., 14., 9.], [16., 17., 18., 19., 14.], [21., 22., 23., 24., 19.], [20., 21., 22., 23., 24.]], ) assert_allclose(order_filter(x, domain, 2), expected) # and, just to complete the set, median of zero-padded elements expected = np.array( [[0, 1, 2, 3, 0], [5, 6, 7, 8, 3], [10, 11, 12, 13, 8], [15, 16, 17, 18, 13], [0, 15, 16, 17, 18]], ) assert_allclose(order_filter(x, domain, 1), expected) def test_medfilt_order_filter(self): x = np.arange(25).reshape(5, 5) # median of zero-padded elements 1,5,9 on phone pad # 7,5,3 on numpad expected = np.array( [[0, 1, 2, 3, 0], [1, 6, 7, 8, 4], [6, 11, 12, 13, 9], [11, 16, 17, 18, 14], [0, 16, 17, 18, 0]], ) assert_allclose(medfilt(x, 3), expected) assert_allclose( order_filter(x, np.ones((3, 3)), 4), expected ) def test_order_filter_asymmetric(self): x = np.arange(25).reshape(5, 5) domain = np.array( [[1, 1, 0], [0, 1, 0], [0, 0, 0]], ) expected = np.array( [[0, 0, 0, 0, 0], [0, 0, 1, 2, 3], [0, 5, 6, 7, 8], [0, 10, 11, 12, 13], [0, 15, 16, 17, 18]] ) assert_allclose(order_filter(x, domain, 0), expected) expected = np.array( [[0, 0, 0, 0, 0], [0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14], [15, 16, 17, 18, 19]] ) assert_allclose(order_filter(x, domain, 1), expected)
189,415
43.307836
254
py
scipy
scipy-main/scipy/signal/tests/test_waveforms.py
import numpy as np from numpy.testing import (assert_almost_equal, assert_equal, assert_, assert_allclose, assert_array_equal) from pytest import raises as assert_raises import scipy.signal._waveforms as waveforms # These chirp_* functions are the instantaneous frequencies of the signals # returned by chirp(). def chirp_linear(t, f0, f1, t1): f = f0 + (f1 - f0) * t / t1 return f def chirp_quadratic(t, f0, f1, t1, vertex_zero=True): if vertex_zero: f = f0 + (f1 - f0) * t**2 / t1**2 else: f = f1 - (f1 - f0) * (t1 - t)**2 / t1**2 return f def chirp_geometric(t, f0, f1, t1): f = f0 * (f1/f0)**(t/t1) return f def chirp_hyperbolic(t, f0, f1, t1): f = f0*f1*t1 / ((f0 - f1)*t + f1*t1) return f def compute_frequency(t, theta): """ Compute theta'(t)/(2*pi), where theta'(t) is the derivative of theta(t). """ # Assume theta and t are 1-D NumPy arrays. # Assume that t is uniformly spaced. dt = t[1] - t[0] f = np.diff(theta)/(2*np.pi) / dt tf = 0.5*(t[1:] + t[:-1]) return tf, f class TestChirp: def test_linear_at_zero(self): w = waveforms.chirp(t=0, f0=1.0, f1=2.0, t1=1.0, method='linear') assert_almost_equal(w, 1.0) def test_linear_freq_01(self): method = 'linear' f0 = 1.0 f1 = 2.0 t1 = 1.0 t = np.linspace(0, t1, 100) phase = waveforms._chirp_phase(t, f0, t1, f1, method) tf, f = compute_frequency(t, phase) abserr = np.max(np.abs(f - chirp_linear(tf, f0, f1, t1))) assert_(abserr < 1e-6) def test_linear_freq_02(self): method = 'linear' f0 = 200.0 f1 = 100.0 t1 = 10.0 t = np.linspace(0, t1, 100) phase = waveforms._chirp_phase(t, f0, t1, f1, method) tf, f = compute_frequency(t, phase) abserr = np.max(np.abs(f - chirp_linear(tf, f0, f1, t1))) assert_(abserr < 1e-6) def test_quadratic_at_zero(self): w = waveforms.chirp(t=0, f0=1.0, f1=2.0, t1=1.0, method='quadratic') assert_almost_equal(w, 1.0) def test_quadratic_at_zero2(self): w = waveforms.chirp(t=0, f0=1.0, f1=2.0, t1=1.0, method='quadratic', vertex_zero=False) assert_almost_equal(w, 1.0) def test_quadratic_freq_01(self): method = 'quadratic' f0 = 1.0 f1 = 2.0 t1 = 1.0 t = np.linspace(0, t1, 2000) phase = waveforms._chirp_phase(t, f0, t1, f1, method) tf, f = compute_frequency(t, phase) abserr = np.max(np.abs(f - chirp_quadratic(tf, f0, f1, t1))) assert_(abserr < 1e-6) def test_quadratic_freq_02(self): method = 'quadratic' f0 = 20.0 f1 = 10.0 t1 = 10.0 t = np.linspace(0, t1, 2000) phase = waveforms._chirp_phase(t, f0, t1, f1, method) tf, f = compute_frequency(t, phase) abserr = np.max(np.abs(f - chirp_quadratic(tf, f0, f1, t1))) assert_(abserr < 1e-6) def test_logarithmic_at_zero(self): w = waveforms.chirp(t=0, f0=1.0, f1=2.0, t1=1.0, method='logarithmic') assert_almost_equal(w, 1.0) def test_logarithmic_freq_01(self): method = 'logarithmic' f0 = 1.0 f1 = 2.0 t1 = 1.0 t = np.linspace(0, t1, 10000) phase = waveforms._chirp_phase(t, f0, t1, f1, method) tf, f = compute_frequency(t, phase) abserr = np.max(np.abs(f - chirp_geometric(tf, f0, f1, t1))) assert_(abserr < 1e-6) def test_logarithmic_freq_02(self): method = 'logarithmic' f0 = 200.0 f1 = 100.0 t1 = 10.0 t = np.linspace(0, t1, 10000) phase = waveforms._chirp_phase(t, f0, t1, f1, method) tf, f = compute_frequency(t, phase) abserr = np.max(np.abs(f - chirp_geometric(tf, f0, f1, t1))) assert_(abserr < 1e-6) def test_logarithmic_freq_03(self): method = 'logarithmic' f0 = 100.0 f1 = 100.0 t1 = 10.0 t = np.linspace(0, t1, 10000) phase = waveforms._chirp_phase(t, f0, t1, f1, method) tf, f = compute_frequency(t, phase) abserr = np.max(np.abs(f - chirp_geometric(tf, f0, f1, t1))) assert_(abserr < 1e-6) def test_hyperbolic_at_zero(self): w = waveforms.chirp(t=0, f0=10.0, f1=1.0, t1=1.0, method='hyperbolic') assert_almost_equal(w, 1.0) def test_hyperbolic_freq_01(self): method = 'hyperbolic' t1 = 1.0 t = np.linspace(0, t1, 10000) # f0 f1 cases = [[10.0, 1.0], [1.0, 10.0], [-10.0, -1.0], [-1.0, -10.0]] for f0, f1 in cases: phase = waveforms._chirp_phase(t, f0, t1, f1, method) tf, f = compute_frequency(t, phase) expected = chirp_hyperbolic(tf, f0, f1, t1) assert_allclose(f, expected) def test_hyperbolic_zero_freq(self): # f0=0 or f1=0 must raise a ValueError. method = 'hyperbolic' t1 = 1.0 t = np.linspace(0, t1, 5) assert_raises(ValueError, waveforms.chirp, t, 0, t1, 1, method) assert_raises(ValueError, waveforms.chirp, t, 1, t1, 0, method) def test_unknown_method(self): method = "foo" f0 = 10.0 f1 = 20.0 t1 = 1.0 t = np.linspace(0, t1, 10) assert_raises(ValueError, waveforms.chirp, t, f0, t1, f1, method) def test_integer_t1(self): f0 = 10.0 f1 = 20.0 t = np.linspace(-1, 1, 11) t1 = 3.0 float_result = waveforms.chirp(t, f0, t1, f1) t1 = 3 int_result = waveforms.chirp(t, f0, t1, f1) err_msg = "Integer input 't1=3' gives wrong result" assert_equal(int_result, float_result, err_msg=err_msg) def test_integer_f0(self): f1 = 20.0 t1 = 3.0 t = np.linspace(-1, 1, 11) f0 = 10.0 float_result = waveforms.chirp(t, f0, t1, f1) f0 = 10 int_result = waveforms.chirp(t, f0, t1, f1) err_msg = "Integer input 'f0=10' gives wrong result" assert_equal(int_result, float_result, err_msg=err_msg) def test_integer_f1(self): f0 = 10.0 t1 = 3.0 t = np.linspace(-1, 1, 11) f1 = 20.0 float_result = waveforms.chirp(t, f0, t1, f1) f1 = 20 int_result = waveforms.chirp(t, f0, t1, f1) err_msg = "Integer input 'f1=20' gives wrong result" assert_equal(int_result, float_result, err_msg=err_msg) def test_integer_all(self): f0 = 10 t1 = 3 f1 = 20 t = np.linspace(-1, 1, 11) float_result = waveforms.chirp(t, float(f0), float(t1), float(f1)) int_result = waveforms.chirp(t, f0, t1, f1) err_msg = "Integer input 'f0=10, t1=3, f1=20' gives wrong result" assert_equal(int_result, float_result, err_msg=err_msg) class TestSweepPoly: def test_sweep_poly_quad1(self): p = np.poly1d([1.0, 0.0, 1.0]) t = np.linspace(0, 3.0, 10000) phase = waveforms._sweep_poly_phase(t, p) tf, f = compute_frequency(t, phase) expected = p(tf) abserr = np.max(np.abs(f - expected)) assert_(abserr < 1e-6) def test_sweep_poly_const(self): p = np.poly1d(2.0) t = np.linspace(0, 3.0, 10000) phase = waveforms._sweep_poly_phase(t, p) tf, f = compute_frequency(t, phase) expected = p(tf) abserr = np.max(np.abs(f - expected)) assert_(abserr < 1e-6) def test_sweep_poly_linear(self): p = np.poly1d([-1.0, 10.0]) t = np.linspace(0, 3.0, 10000) phase = waveforms._sweep_poly_phase(t, p) tf, f = compute_frequency(t, phase) expected = p(tf) abserr = np.max(np.abs(f - expected)) assert_(abserr < 1e-6) def test_sweep_poly_quad2(self): p = np.poly1d([1.0, 0.0, -2.0]) t = np.linspace(0, 3.0, 10000) phase = waveforms._sweep_poly_phase(t, p) tf, f = compute_frequency(t, phase) expected = p(tf) abserr = np.max(np.abs(f - expected)) assert_(abserr < 1e-6) def test_sweep_poly_cubic(self): p = np.poly1d([2.0, 1.0, 0.0, -2.0]) t = np.linspace(0, 2.0, 10000) phase = waveforms._sweep_poly_phase(t, p) tf, f = compute_frequency(t, phase) expected = p(tf) abserr = np.max(np.abs(f - expected)) assert_(abserr < 1e-6) def test_sweep_poly_cubic2(self): """Use an array of coefficients instead of a poly1d.""" p = np.array([2.0, 1.0, 0.0, -2.0]) t = np.linspace(0, 2.0, 10000) phase = waveforms._sweep_poly_phase(t, p) tf, f = compute_frequency(t, phase) expected = np.poly1d(p)(tf) abserr = np.max(np.abs(f - expected)) assert_(abserr < 1e-6) def test_sweep_poly_cubic3(self): """Use a list of coefficients instead of a poly1d.""" p = [2.0, 1.0, 0.0, -2.0] t = np.linspace(0, 2.0, 10000) phase = waveforms._sweep_poly_phase(t, p) tf, f = compute_frequency(t, phase) expected = np.poly1d(p)(tf) abserr = np.max(np.abs(f - expected)) assert_(abserr < 1e-6) class TestGaussPulse: def test_integer_fc(self): float_result = waveforms.gausspulse('cutoff', fc=1000.0) int_result = waveforms.gausspulse('cutoff', fc=1000) err_msg = "Integer input 'fc=1000' gives wrong result" assert_equal(int_result, float_result, err_msg=err_msg) def test_integer_bw(self): float_result = waveforms.gausspulse('cutoff', bw=1.0) int_result = waveforms.gausspulse('cutoff', bw=1) err_msg = "Integer input 'bw=1' gives wrong result" assert_equal(int_result, float_result, err_msg=err_msg) def test_integer_bwr(self): float_result = waveforms.gausspulse('cutoff', bwr=-6.0) int_result = waveforms.gausspulse('cutoff', bwr=-6) err_msg = "Integer input 'bwr=-6' gives wrong result" assert_equal(int_result, float_result, err_msg=err_msg) def test_integer_tpr(self): float_result = waveforms.gausspulse('cutoff', tpr=-60.0) int_result = waveforms.gausspulse('cutoff', tpr=-60) err_msg = "Integer input 'tpr=-60' gives wrong result" assert_equal(int_result, float_result, err_msg=err_msg) class TestUnitImpulse: def test_no_index(self): assert_array_equal(waveforms.unit_impulse(7), [1, 0, 0, 0, 0, 0, 0]) assert_array_equal(waveforms.unit_impulse((3, 3)), [[1, 0, 0], [0, 0, 0], [0, 0, 0]]) def test_index(self): assert_array_equal(waveforms.unit_impulse(10, 3), [0, 0, 0, 1, 0, 0, 0, 0, 0, 0]) assert_array_equal(waveforms.unit_impulse((3, 3), (1, 1)), [[0, 0, 0], [0, 1, 0], [0, 0, 0]]) # Broadcasting imp = waveforms.unit_impulse((4, 4), 2) assert_array_equal(imp, np.array([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1, 0], [0, 0, 0, 0]])) def test_mid(self): assert_array_equal(waveforms.unit_impulse((3, 3), 'mid'), [[0, 0, 0], [0, 1, 0], [0, 0, 0]]) assert_array_equal(waveforms.unit_impulse(9, 'mid'), [0, 0, 0, 0, 1, 0, 0, 0, 0]) def test_dtype(self): imp = waveforms.unit_impulse(7) assert_(np.issubdtype(imp.dtype, np.floating)) imp = waveforms.unit_impulse(5, 3, dtype=int) assert_(np.issubdtype(imp.dtype, np.integer)) imp = waveforms.unit_impulse((5, 2), (3, 1), dtype=complex) assert_(np.issubdtype(imp.dtype, np.complexfloating))
11,975
33.022727
78
py
scipy
scipy-main/scipy/signal/windows/setup.py
def configuration(parent_package='', top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('windows', parent_package, top_path) config.add_data_dir('tests') return config
227
24.333333
63
py
scipy
scipy-main/scipy/signal/windows/_windows.py
"""The suite of window functions.""" import operator import warnings import numpy as np from scipy import linalg, special, fft as sp_fft __all__ = ['boxcar', 'triang', 'parzen', 'bohman', 'blackman', 'nuttall', 'blackmanharris', 'flattop', 'bartlett', 'barthann', 'hamming', 'kaiser', 'kaiser_bessel_derived', 'gaussian', 'general_cosine', 'general_gaussian', 'general_hamming', 'chebwin', 'cosine', 'hann', 'exponential', 'tukey', 'taylor', 'dpss', 'get_window', 'lanczos'] def _len_guards(M): """Handle small or incorrect window lengths""" if int(M) != M or M < 0: raise ValueError('Window length M must be a non-negative integer') return M <= 1 def _extend(M, sym): """Extend window by 1 sample if needed for DFT-even symmetry""" if not sym: return M + 1, True else: return M, False def _truncate(w, needed): """Truncate window by 1 sample if needed for DFT-even symmetry""" if needed: return w[:-1] else: return w def general_cosine(M, a, sym=True): r""" Generic weighted sum of cosine terms window Parameters ---------- M : int Number of points in the output window a : array_like Sequence of weighting coefficients. This uses the convention of being centered on the origin, so these will typically all be positive numbers, not alternating sign. sym : bool, optional When True (default), generates a symmetric window, for use in filter design. When False, generates a periodic window, for use in spectral analysis. Returns ------- w : ndarray The array of window values. References ---------- .. [1] A. Nuttall, "Some windows with very good sidelobe behavior," IEEE Transactions on Acoustics, Speech, and Signal Processing, vol. 29, no. 1, pp. 84-91, Feb 1981. :doi:`10.1109/TASSP.1981.1163506`. .. [2] Heinzel G. et al., "Spectrum and spectral density estimation by the Discrete Fourier transform (DFT), including a comprehensive list of window functions and some new flat-top windows", February 15, 2002 https://holometer.fnal.gov/GH_FFT.pdf Examples -------- Heinzel describes a flat-top window named "HFT90D" with formula: [2]_ .. math:: w_j = 1 - 1.942604 \cos(z) + 1.340318 \cos(2z) - 0.440811 \cos(3z) + 0.043097 \cos(4z) where .. math:: z = \frac{2 \pi j}{N}, j = 0...N - 1 Since this uses the convention of starting at the origin, to reproduce the window, we need to convert every other coefficient to a positive number: >>> HFT90D = [1, 1.942604, 1.340318, 0.440811, 0.043097] The paper states that the highest sidelobe is at -90.2 dB. Reproduce Figure 42 by plotting the window and its frequency response, and confirm the sidelobe level in red: >>> import numpy as np >>> from scipy.signal.windows import general_cosine >>> from scipy.fft import fft, fftshift >>> import matplotlib.pyplot as plt >>> window = general_cosine(1000, HFT90D, sym=False) >>> plt.plot(window) >>> plt.title("HFT90D window") >>> plt.ylabel("Amplitude") >>> plt.xlabel("Sample") >>> plt.figure() >>> A = fft(window, 10000) / (len(window)/2.0) >>> freq = np.linspace(-0.5, 0.5, len(A)) >>> response = np.abs(fftshift(A / abs(A).max())) >>> response = 20 * np.log10(np.maximum(response, 1e-10)) >>> plt.plot(freq, response) >>> plt.axis([-50/1000, 50/1000, -140, 0]) >>> plt.title("Frequency response of the HFT90D window") >>> plt.ylabel("Normalized magnitude [dB]") >>> plt.xlabel("Normalized frequency [cycles per sample]") >>> plt.axhline(-90.2, color='red') >>> plt.show() """ if _len_guards(M): return np.ones(M) M, needs_trunc = _extend(M, sym) fac = np.linspace(-np.pi, np.pi, M) w = np.zeros(M) for k in range(len(a)): w += a[k] * np.cos(k * fac) return _truncate(w, needs_trunc) def boxcar(M, sym=True): """Return a boxcar or rectangular window. Also known as a rectangular window or Dirichlet window, this is equivalent to no window at all. Parameters ---------- M : int Number of points in the output window. If zero, an empty array is returned. An exception is thrown when it is negative. sym : bool, optional Whether the window is symmetric. (Has no effect for boxcar.) Returns ------- w : ndarray The window, with the maximum value normalized to 1. Examples -------- Plot the window and its frequency response: >>> import numpy as np >>> from scipy import signal >>> from scipy.fft import fft, fftshift >>> import matplotlib.pyplot as plt >>> window = signal.windows.boxcar(51) >>> plt.plot(window) >>> plt.title("Boxcar window") >>> plt.ylabel("Amplitude") >>> plt.xlabel("Sample") >>> plt.figure() >>> A = fft(window, 2048) / (len(window)/2.0) >>> freq = np.linspace(-0.5, 0.5, len(A)) >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) >>> plt.plot(freq, response) >>> plt.axis([-0.5, 0.5, -120, 0]) >>> plt.title("Frequency response of the boxcar window") >>> plt.ylabel("Normalized magnitude [dB]") >>> plt.xlabel("Normalized frequency [cycles per sample]") """ if _len_guards(M): return np.ones(M) M, needs_trunc = _extend(M, sym) w = np.ones(M, float) return _truncate(w, needs_trunc) def triang(M, sym=True): """Return a triangular window. Parameters ---------- M : int Number of points in the output window. If zero, an empty array is returned. An exception is thrown when it is negative. sym : bool, optional When True (default), generates a symmetric window, for use in filter design. When False, generates a periodic window, for use in spectral analysis. Returns ------- w : ndarray The window, with the maximum value normalized to 1 (though the value 1 does not appear if `M` is even and `sym` is True). See Also -------- bartlett : A triangular window that touches zero Examples -------- Plot the window and its frequency response: >>> import numpy as np >>> from scipy import signal >>> from scipy.fft import fft, fftshift >>> import matplotlib.pyplot as plt >>> window = signal.windows.triang(51) >>> plt.plot(window) >>> plt.title("Triangular window") >>> plt.ylabel("Amplitude") >>> plt.xlabel("Sample") >>> plt.figure() >>> A = fft(window, 2048) / (len(window)/2.0) >>> freq = np.linspace(-0.5, 0.5, len(A)) >>> response = np.abs(fftshift(A / abs(A).max())) >>> response = 20 * np.log10(np.maximum(response, 1e-10)) >>> plt.plot(freq, response) >>> plt.axis([-0.5, 0.5, -120, 0]) >>> plt.title("Frequency response of the triangular window") >>> plt.ylabel("Normalized magnitude [dB]") >>> plt.xlabel("Normalized frequency [cycles per sample]") """ if _len_guards(M): return np.ones(M) M, needs_trunc = _extend(M, sym) n = np.arange(1, (M + 1) // 2 + 1) if M % 2 == 0: w = (2 * n - 1.0) / M w = np.r_[w, w[::-1]] else: w = 2 * n / (M + 1.0) w = np.r_[w, w[-2::-1]] return _truncate(w, needs_trunc) def parzen(M, sym=True): """Return a Parzen window. Parameters ---------- M : int Number of points in the output window. If zero, an empty array is returned. An exception is thrown when it is negative. sym : bool, optional When True (default), generates a symmetric window, for use in filter design. When False, generates a periodic window, for use in spectral analysis. Returns ------- w : ndarray The window, with the maximum value normalized to 1 (though the value 1 does not appear if `M` is even and `sym` is True). References ---------- .. [1] E. Parzen, "Mathematical Considerations in the Estimation of Spectra", Technometrics, Vol. 3, No. 2 (May, 1961), pp. 167-190 Examples -------- Plot the window and its frequency response: >>> import numpy as np >>> from scipy import signal >>> from scipy.fft import fft, fftshift >>> import matplotlib.pyplot as plt >>> window = signal.windows.parzen(51) >>> plt.plot(window) >>> plt.title("Parzen window") >>> plt.ylabel("Amplitude") >>> plt.xlabel("Sample") >>> plt.figure() >>> A = fft(window, 2048) / (len(window)/2.0) >>> freq = np.linspace(-0.5, 0.5, len(A)) >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) >>> plt.plot(freq, response) >>> plt.axis([-0.5, 0.5, -120, 0]) >>> plt.title("Frequency response of the Parzen window") >>> plt.ylabel("Normalized magnitude [dB]") >>> plt.xlabel("Normalized frequency [cycles per sample]") """ if _len_guards(M): return np.ones(M) M, needs_trunc = _extend(M, sym) n = np.arange(-(M - 1) / 2.0, (M - 1) / 2.0 + 0.5, 1.0) na = np.extract(n < -(M - 1) / 4.0, n) nb = np.extract(abs(n) <= (M - 1) / 4.0, n) wa = 2 * (1 - np.abs(na) / (M / 2.0)) ** 3.0 wb = (1 - 6 * (np.abs(nb) / (M / 2.0)) ** 2.0 + 6 * (np.abs(nb) / (M / 2.0)) ** 3.0) w = np.r_[wa, wb, wa[::-1]] return _truncate(w, needs_trunc) def bohman(M, sym=True): """Return a Bohman window. Parameters ---------- M : int Number of points in the output window. If zero, an empty array is returned. An exception is thrown when it is negative. sym : bool, optional When True (default), generates a symmetric window, for use in filter design. When False, generates a periodic window, for use in spectral analysis. Returns ------- w : ndarray The window, with the maximum value normalized to 1 (though the value 1 does not appear if `M` is even and `sym` is True). Examples -------- Plot the window and its frequency response: >>> import numpy as np >>> from scipy import signal >>> from scipy.fft import fft, fftshift >>> import matplotlib.pyplot as plt >>> window = signal.windows.bohman(51) >>> plt.plot(window) >>> plt.title("Bohman window") >>> plt.ylabel("Amplitude") >>> plt.xlabel("Sample") >>> plt.figure() >>> A = fft(window, 2047) / (len(window)/2.0) >>> freq = np.linspace(-0.5, 0.5, len(A)) >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) >>> plt.plot(freq, response) >>> plt.axis([-0.5, 0.5, -120, 0]) >>> plt.title("Frequency response of the Bohman window") >>> plt.ylabel("Normalized magnitude [dB]") >>> plt.xlabel("Normalized frequency [cycles per sample]") """ if _len_guards(M): return np.ones(M) M, needs_trunc = _extend(M, sym) fac = np.abs(np.linspace(-1, 1, M)[1:-1]) w = (1 - fac) * np.cos(np.pi * fac) + 1.0 / np.pi * np.sin(np.pi * fac) w = np.r_[0, w, 0] return _truncate(w, needs_trunc) def blackman(M, sym=True): r""" Return a Blackman window. The Blackman window is a taper formed by using the first three terms of a summation of cosines. It was designed to have close to the minimal leakage possible. It is close to optimal, only slightly worse than a Kaiser window. Parameters ---------- M : int Number of points in the output window. If zero, an empty array is returned. An exception is thrown when it is negative. sym : bool, optional When True (default), generates a symmetric window, for use in filter design. When False, generates a periodic window, for use in spectral analysis. Returns ------- w : ndarray The window, with the maximum value normalized to 1 (though the value 1 does not appear if `M` is even and `sym` is True). Notes ----- The Blackman window is defined as .. math:: w(n) = 0.42 - 0.5 \cos(2\pi n/M) + 0.08 \cos(4\pi n/M) The "exact Blackman" window was designed to null out the third and fourth sidelobes, but has discontinuities at the boundaries, resulting in a 6 dB/oct fall-off. This window is an approximation of the "exact" window, which does not null the sidelobes as well, but is smooth at the edges, improving the fall-off rate to 18 dB/oct. [3]_ Most references to the Blackman window come from the signal processing literature, where it is used as one of many windowing functions for smoothing values. It is also known as an apodization (which means "removing the foot", i.e. smoothing discontinuities at the beginning and end of the sampled signal) or tapering function. It is known as a "near optimal" tapering function, almost as good (by some measures) as the Kaiser window. References ---------- .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra, Dover Publications, New York. .. [2] Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing. Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471. .. [3] Harris, Fredric J. (Jan 1978). "On the use of Windows for Harmonic Analysis with the Discrete Fourier Transform". Proceedings of the IEEE 66 (1): 51-83. :doi:`10.1109/PROC.1978.10837`. Examples -------- Plot the window and its frequency response: >>> import numpy as np >>> from scipy import signal >>> from scipy.fft import fft, fftshift >>> import matplotlib.pyplot as plt >>> window = signal.windows.blackman(51) >>> plt.plot(window) >>> plt.title("Blackman window") >>> plt.ylabel("Amplitude") >>> plt.xlabel("Sample") >>> plt.figure() >>> A = fft(window, 2048) / (len(window)/2.0) >>> freq = np.linspace(-0.5, 0.5, len(A)) >>> response = np.abs(fftshift(A / abs(A).max())) >>> response = 20 * np.log10(np.maximum(response, 1e-10)) >>> plt.plot(freq, response) >>> plt.axis([-0.5, 0.5, -120, 0]) >>> plt.title("Frequency response of the Blackman window") >>> plt.ylabel("Normalized magnitude [dB]") >>> plt.xlabel("Normalized frequency [cycles per sample]") """ # Docstring adapted from NumPy's blackman function return general_cosine(M, [0.42, 0.50, 0.08], sym) def nuttall(M, sym=True): """Return a minimum 4-term Blackman-Harris window according to Nuttall. This variation is called "Nuttall4c" by Heinzel. [2]_ Parameters ---------- M : int Number of points in the output window. If zero, an empty array is returned. An exception is thrown when it is negative. sym : bool, optional When True (default), generates a symmetric window, for use in filter design. When False, generates a periodic window, for use in spectral analysis. Returns ------- w : ndarray The window, with the maximum value normalized to 1 (though the value 1 does not appear if `M` is even and `sym` is True). References ---------- .. [1] A. Nuttall, "Some windows with very good sidelobe behavior," IEEE Transactions on Acoustics, Speech, and Signal Processing, vol. 29, no. 1, pp. 84-91, Feb 1981. :doi:`10.1109/TASSP.1981.1163506`. .. [2] Heinzel G. et al., "Spectrum and spectral density estimation by the Discrete Fourier transform (DFT), including a comprehensive list of window functions and some new flat-top windows", February 15, 2002 https://holometer.fnal.gov/GH_FFT.pdf Examples -------- Plot the window and its frequency response: >>> import numpy as np >>> from scipy import signal >>> from scipy.fft import fft, fftshift >>> import matplotlib.pyplot as plt >>> window = signal.windows.nuttall(51) >>> plt.plot(window) >>> plt.title("Nuttall window") >>> plt.ylabel("Amplitude") >>> plt.xlabel("Sample") >>> plt.figure() >>> A = fft(window, 2048) / (len(window)/2.0) >>> freq = np.linspace(-0.5, 0.5, len(A)) >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) >>> plt.plot(freq, response) >>> plt.axis([-0.5, 0.5, -120, 0]) >>> plt.title("Frequency response of the Nuttall window") >>> plt.ylabel("Normalized magnitude [dB]") >>> plt.xlabel("Normalized frequency [cycles per sample]") """ return general_cosine(M, [0.3635819, 0.4891775, 0.1365995, 0.0106411], sym) def blackmanharris(M, sym=True): """Return a minimum 4-term Blackman-Harris window. Parameters ---------- M : int Number of points in the output window. If zero, an empty array is returned. An exception is thrown when it is negative. sym : bool, optional When True (default), generates a symmetric window, for use in filter design. When False, generates a periodic window, for use in spectral analysis. Returns ------- w : ndarray The window, with the maximum value normalized to 1 (though the value 1 does not appear if `M` is even and `sym` is True). Examples -------- Plot the window and its frequency response: >>> import numpy as np >>> from scipy import signal >>> from scipy.fft import fft, fftshift >>> import matplotlib.pyplot as plt >>> window = signal.windows.blackmanharris(51) >>> plt.plot(window) >>> plt.title("Blackman-Harris window") >>> plt.ylabel("Amplitude") >>> plt.xlabel("Sample") >>> plt.figure() >>> A = fft(window, 2048) / (len(window)/2.0) >>> freq = np.linspace(-0.5, 0.5, len(A)) >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) >>> plt.plot(freq, response) >>> plt.axis([-0.5, 0.5, -120, 0]) >>> plt.title("Frequency response of the Blackman-Harris window") >>> plt.ylabel("Normalized magnitude [dB]") >>> plt.xlabel("Normalized frequency [cycles per sample]") """ return general_cosine(M, [0.35875, 0.48829, 0.14128, 0.01168], sym) def flattop(M, sym=True): """Return a flat top window. Parameters ---------- M : int Number of points in the output window. If zero, an empty array is returned. An exception is thrown when it is negative. sym : bool, optional When True (default), generates a symmetric window, for use in filter design. When False, generates a periodic window, for use in spectral analysis. Returns ------- w : ndarray The window, with the maximum value normalized to 1 (though the value 1 does not appear if `M` is even and `sym` is True). Notes ----- Flat top windows are used for taking accurate measurements of signal amplitude in the frequency domain, with minimal scalloping error from the center of a frequency bin to its edges, compared to others. This is a 5th-order cosine window, with the 5 terms optimized to make the main lobe maximally flat. [1]_ References ---------- .. [1] D'Antona, Gabriele, and A. Ferrero, "Digital Signal Processing for Measurement Systems", Springer Media, 2006, p. 70 :doi:`10.1007/0-387-28666-7`. Examples -------- Plot the window and its frequency response: >>> import numpy as np >>> from scipy import signal >>> from scipy.fft import fft, fftshift >>> import matplotlib.pyplot as plt >>> window = signal.windows.flattop(51) >>> plt.plot(window) >>> plt.title("Flat top window") >>> plt.ylabel("Amplitude") >>> plt.xlabel("Sample") >>> plt.figure() >>> A = fft(window, 2048) / (len(window)/2.0) >>> freq = np.linspace(-0.5, 0.5, len(A)) >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) >>> plt.plot(freq, response) >>> plt.axis([-0.5, 0.5, -120, 0]) >>> plt.title("Frequency response of the flat top window") >>> plt.ylabel("Normalized magnitude [dB]") >>> plt.xlabel("Normalized frequency [cycles per sample]") """ a = [0.21557895, 0.41663158, 0.277263158, 0.083578947, 0.006947368] return general_cosine(M, a, sym) def bartlett(M, sym=True): r""" Return a Bartlett window. The Bartlett window is very similar to a triangular window, except that the end points are at zero. It is often used in signal processing for tapering a signal, without generating too much ripple in the frequency domain. Parameters ---------- M : int Number of points in the output window. If zero, an empty array is returned. An exception is thrown when it is negative. sym : bool, optional When True (default), generates a symmetric window, for use in filter design. When False, generates a periodic window, for use in spectral analysis. Returns ------- w : ndarray The triangular window, with the first and last samples equal to zero and the maximum value normalized to 1 (though the value 1 does not appear if `M` is even and `sym` is True). See Also -------- triang : A triangular window that does not touch zero at the ends Notes ----- The Bartlett window is defined as .. math:: w(n) = \frac{2}{M-1} \left( \frac{M-1}{2} - \left|n - \frac{M-1}{2}\right| \right) Most references to the Bartlett window come from the signal processing literature, where it is used as one of many windowing functions for smoothing values. Note that convolution with this window produces linear interpolation. It is also known as an apodization (which means"removing the foot", i.e. smoothing discontinuities at the beginning and end of the sampled signal) or tapering function. The Fourier transform of the Bartlett is the product of two sinc functions. Note the excellent discussion in Kanasewich. [2]_ References ---------- .. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra", Biometrika 37, 1-16, 1950. .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The University of Alberta Press, 1975, pp. 109-110. .. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal Processing", Prentice-Hall, 1999, pp. 468-471. .. [4] Wikipedia, "Window function", https://en.wikipedia.org/wiki/Window_function .. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, "Numerical Recipes", Cambridge University Press, 1986, page 429. Examples -------- Plot the window and its frequency response: >>> import numpy as np >>> from scipy import signal >>> from scipy.fft import fft, fftshift >>> import matplotlib.pyplot as plt >>> window = signal.windows.bartlett(51) >>> plt.plot(window) >>> plt.title("Bartlett window") >>> plt.ylabel("Amplitude") >>> plt.xlabel("Sample") >>> plt.figure() >>> A = fft(window, 2048) / (len(window)/2.0) >>> freq = np.linspace(-0.5, 0.5, len(A)) >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) >>> plt.plot(freq, response) >>> plt.axis([-0.5, 0.5, -120, 0]) >>> plt.title("Frequency response of the Bartlett window") >>> plt.ylabel("Normalized magnitude [dB]") >>> plt.xlabel("Normalized frequency [cycles per sample]") """ # Docstring adapted from NumPy's bartlett function if _len_guards(M): return np.ones(M) M, needs_trunc = _extend(M, sym) n = np.arange(0, M) w = np.where(np.less_equal(n, (M - 1) / 2.0), 2.0 * n / (M - 1), 2.0 - 2.0 * n / (M - 1)) return _truncate(w, needs_trunc) def hann(M, sym=True): r""" Return a Hann window. The Hann window is a taper formed by using a raised cosine or sine-squared with ends that touch zero. Parameters ---------- M : int Number of points in the output window. If zero, an empty array is returned. An exception is thrown when it is negative. sym : bool, optional When True (default), generates a symmetric window, for use in filter design. When False, generates a periodic window, for use in spectral analysis. Returns ------- w : ndarray The window, with the maximum value normalized to 1 (though the value 1 does not appear if `M` is even and `sym` is True). Notes ----- The Hann window is defined as .. math:: w(n) = 0.5 - 0.5 \cos\left(\frac{2\pi{n}}{M-1}\right) \qquad 0 \leq n \leq M-1 The window was named for Julius von Hann, an Austrian meteorologist. It is also known as the Cosine Bell. It is sometimes erroneously referred to as the "Hanning" window, from the use of "hann" as a verb in the original paper and confusion with the very similar Hamming window. Most references to the Hann window come from the signal processing literature, where it is used as one of many windowing functions for smoothing values. It is also known as an apodization (which means "removing the foot", i.e. smoothing discontinuities at the beginning and end of the sampled signal) or tapering function. References ---------- .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra, Dover Publications, New York. .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The University of Alberta Press, 1975, pp. 106-108. .. [3] Wikipedia, "Window function", https://en.wikipedia.org/wiki/Window_function .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, "Numerical Recipes", Cambridge University Press, 1986, page 425. Examples -------- Plot the window and its frequency response: >>> import numpy as np >>> from scipy import signal >>> from scipy.fft import fft, fftshift >>> import matplotlib.pyplot as plt >>> window = signal.windows.hann(51) >>> plt.plot(window) >>> plt.title("Hann window") >>> plt.ylabel("Amplitude") >>> plt.xlabel("Sample") >>> plt.figure() >>> A = fft(window, 2048) / (len(window)/2.0) >>> freq = np.linspace(-0.5, 0.5, len(A)) >>> response = np.abs(fftshift(A / abs(A).max())) >>> response = 20 * np.log10(np.maximum(response, 1e-10)) >>> plt.plot(freq, response) >>> plt.axis([-0.5, 0.5, -120, 0]) >>> plt.title("Frequency response of the Hann window") >>> plt.ylabel("Normalized magnitude [dB]") >>> plt.xlabel("Normalized frequency [cycles per sample]") """ # Docstring adapted from NumPy's hanning function return general_hamming(M, 0.5, sym) def tukey(M, alpha=0.5, sym=True): r"""Return a Tukey window, also known as a tapered cosine window. Parameters ---------- M : int Number of points in the output window. If zero, an empty array is returned. An exception is thrown when it is negative. alpha : float, optional Shape parameter of the Tukey window, representing the fraction of the window inside the cosine tapered region. If zero, the Tukey window is equivalent to a rectangular window. If one, the Tukey window is equivalent to a Hann window. sym : bool, optional When True (default), generates a symmetric window, for use in filter design. When False, generates a periodic window, for use in spectral analysis. Returns ------- w : ndarray The window, with the maximum value normalized to 1 (though the value 1 does not appear if `M` is even and `sym` is True). References ---------- .. [1] Harris, Fredric J. (Jan 1978). "On the use of Windows for Harmonic Analysis with the Discrete Fourier Transform". Proceedings of the IEEE 66 (1): 51-83. :doi:`10.1109/PROC.1978.10837` .. [2] Wikipedia, "Window function", https://en.wikipedia.org/wiki/Window_function#Tukey_window Examples -------- Plot the window and its frequency response: >>> import numpy as np >>> from scipy import signal >>> from scipy.fft import fft, fftshift >>> import matplotlib.pyplot as plt >>> window = signal.windows.tukey(51) >>> plt.plot(window) >>> plt.title("Tukey window") >>> plt.ylabel("Amplitude") >>> plt.xlabel("Sample") >>> plt.ylim([0, 1.1]) >>> plt.figure() >>> A = fft(window, 2048) / (len(window)/2.0) >>> freq = np.linspace(-0.5, 0.5, len(A)) >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) >>> plt.plot(freq, response) >>> plt.axis([-0.5, 0.5, -120, 0]) >>> plt.title("Frequency response of the Tukey window") >>> plt.ylabel("Normalized magnitude [dB]") >>> plt.xlabel("Normalized frequency [cycles per sample]") """ if _len_guards(M): return np.ones(M) if alpha <= 0: return np.ones(M, 'd') elif alpha >= 1.0: return hann(M, sym=sym) M, needs_trunc = _extend(M, sym) n = np.arange(0, M) width = int(np.floor(alpha*(M-1)/2.0)) n1 = n[0:width+1] n2 = n[width+1:M-width-1] n3 = n[M-width-1:] w1 = 0.5 * (1 + np.cos(np.pi * (-1 + 2.0*n1/alpha/(M-1)))) w2 = np.ones(n2.shape) w3 = 0.5 * (1 + np.cos(np.pi * (-2.0/alpha + 1 + 2.0*n3/alpha/(M-1)))) w = np.concatenate((w1, w2, w3)) return _truncate(w, needs_trunc) def barthann(M, sym=True): """Return a modified Bartlett-Hann window. Parameters ---------- M : int Number of points in the output window. If zero, an empty array is returned. An exception is thrown when it is negative. sym : bool, optional When True (default), generates a symmetric window, for use in filter design. When False, generates a periodic window, for use in spectral analysis. Returns ------- w : ndarray The window, with the maximum value normalized to 1 (though the value 1 does not appear if `M` is even and `sym` is True). Examples -------- Plot the window and its frequency response: >>> import numpy as np >>> from scipy import signal >>> from scipy.fft import fft, fftshift >>> import matplotlib.pyplot as plt >>> window = signal.windows.barthann(51) >>> plt.plot(window) >>> plt.title("Bartlett-Hann window") >>> plt.ylabel("Amplitude") >>> plt.xlabel("Sample") >>> plt.figure() >>> A = fft(window, 2048) / (len(window)/2.0) >>> freq = np.linspace(-0.5, 0.5, len(A)) >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) >>> plt.plot(freq, response) >>> plt.axis([-0.5, 0.5, -120, 0]) >>> plt.title("Frequency response of the Bartlett-Hann window") >>> plt.ylabel("Normalized magnitude [dB]") >>> plt.xlabel("Normalized frequency [cycles per sample]") """ if _len_guards(M): return np.ones(M) M, needs_trunc = _extend(M, sym) n = np.arange(0, M) fac = np.abs(n / (M - 1.0) - 0.5) w = 0.62 - 0.48 * fac + 0.38 * np.cos(2 * np.pi * fac) return _truncate(w, needs_trunc) def general_hamming(M, alpha, sym=True): r"""Return a generalized Hamming window. The generalized Hamming window is constructed by multiplying a rectangular window by one period of a cosine function [1]_. Parameters ---------- M : int Number of points in the output window. If zero, an empty array is returned. An exception is thrown when it is negative. alpha : float The window coefficient, :math:`\alpha` sym : bool, optional When True (default), generates a symmetric window, for use in filter design. When False, generates a periodic window, for use in spectral analysis. Returns ------- w : ndarray The window, with the maximum value normalized to 1 (though the value 1 does not appear if `M` is even and `sym` is True). See Also -------- hamming, hann Notes ----- The generalized Hamming window is defined as .. math:: w(n) = \alpha - \left(1 - \alpha\right) \cos\left(\frac{2\pi{n}}{M-1}\right) \qquad 0 \leq n \leq M-1 Both the common Hamming window and Hann window are special cases of the generalized Hamming window with :math:`\alpha` = 0.54 and :math:`\alpha` = 0.5, respectively [2]_. References ---------- .. [1] DSPRelated, "Generalized Hamming Window Family", https://www.dsprelated.com/freebooks/sasp/Generalized_Hamming_Window_Family.html .. [2] Wikipedia, "Window function", https://en.wikipedia.org/wiki/Window_function .. [3] Riccardo Piantanida ESA, "Sentinel-1 Level 1 Detailed Algorithm Definition", https://sentinel.esa.int/documents/247904/1877131/Sentinel-1-Level-1-Detailed-Algorithm-Definition .. [4] Matthieu Bourbigot ESA, "Sentinel-1 Product Definition", https://sentinel.esa.int/documents/247904/1877131/Sentinel-1-Product-Definition Examples -------- The Sentinel-1A/B Instrument Processing Facility uses generalized Hamming windows in the processing of spaceborne Synthetic Aperture Radar (SAR) data [3]_. The facility uses various values for the :math:`\alpha` parameter based on operating mode of the SAR instrument. Some common :math:`\alpha` values include 0.75, 0.7 and 0.52 [4]_. As an example, we plot these different windows. >>> import numpy as np >>> from scipy.signal.windows import general_hamming >>> from scipy.fft import fft, fftshift >>> import matplotlib.pyplot as plt >>> fig1, spatial_plot = plt.subplots() >>> spatial_plot.set_title("Generalized Hamming Windows") >>> spatial_plot.set_ylabel("Amplitude") >>> spatial_plot.set_xlabel("Sample") >>> fig2, freq_plot = plt.subplots() >>> freq_plot.set_title("Frequency Responses") >>> freq_plot.set_ylabel("Normalized magnitude [dB]") >>> freq_plot.set_xlabel("Normalized frequency [cycles per sample]") >>> for alpha in [0.75, 0.7, 0.52]: ... window = general_hamming(41, alpha) ... spatial_plot.plot(window, label="{:.2f}".format(alpha)) ... A = fft(window, 2048) / (len(window)/2.0) ... freq = np.linspace(-0.5, 0.5, len(A)) ... response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) ... freq_plot.plot(freq, response, label="{:.2f}".format(alpha)) >>> freq_plot.legend(loc="upper right") >>> spatial_plot.legend(loc="upper right") """ return general_cosine(M, [alpha, 1. - alpha], sym) def hamming(M, sym=True): r"""Return a Hamming window. The Hamming window is a taper formed by using a raised cosine with non-zero endpoints, optimized to minimize the nearest side lobe. Parameters ---------- M : int Number of points in the output window. If zero, an empty array is returned. An exception is thrown when it is negative. sym : bool, optional When True (default), generates a symmetric window, for use in filter design. When False, generates a periodic window, for use in spectral analysis. Returns ------- w : ndarray The window, with the maximum value normalized to 1 (though the value 1 does not appear if `M` is even and `sym` is True). Notes ----- The Hamming window is defined as .. math:: w(n) = 0.54 - 0.46 \cos\left(\frac{2\pi{n}}{M-1}\right) \qquad 0 \leq n \leq M-1 The Hamming was named for R. W. Hamming, an associate of J. W. Tukey and is described in Blackman and Tukey. It was recommended for smoothing the truncated autocovariance function in the time domain. Most references to the Hamming window come from the signal processing literature, where it is used as one of many windowing functions for smoothing values. It is also known as an apodization (which means "removing the foot", i.e. smoothing discontinuities at the beginning and end of the sampled signal) or tapering function. References ---------- .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra, Dover Publications, New York. .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The University of Alberta Press, 1975, pp. 109-110. .. [3] Wikipedia, "Window function", https://en.wikipedia.org/wiki/Window_function .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, "Numerical Recipes", Cambridge University Press, 1986, page 425. Examples -------- Plot the window and its frequency response: >>> import numpy as np >>> from scipy import signal >>> from scipy.fft import fft, fftshift >>> import matplotlib.pyplot as plt >>> window = signal.windows.hamming(51) >>> plt.plot(window) >>> plt.title("Hamming window") >>> plt.ylabel("Amplitude") >>> plt.xlabel("Sample") >>> plt.figure() >>> A = fft(window, 2048) / (len(window)/2.0) >>> freq = np.linspace(-0.5, 0.5, len(A)) >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) >>> plt.plot(freq, response) >>> plt.axis([-0.5, 0.5, -120, 0]) >>> plt.title("Frequency response of the Hamming window") >>> plt.ylabel("Normalized magnitude [dB]") >>> plt.xlabel("Normalized frequency [cycles per sample]") """ # Docstring adapted from NumPy's hamming function return general_hamming(M, 0.54, sym) def kaiser(M, beta, sym=True): r"""Return a Kaiser window. The Kaiser window is a taper formed by using a Bessel function. Parameters ---------- M : int Number of points in the output window. If zero, an empty array is returned. An exception is thrown when it is negative. beta : float Shape parameter, determines trade-off between main-lobe width and side lobe level. As beta gets large, the window narrows. sym : bool, optional When True (default), generates a symmetric window, for use in filter design. When False, generates a periodic window, for use in spectral analysis. Returns ------- w : ndarray The window, with the maximum value normalized to 1 (though the value 1 does not appear if `M` is even and `sym` is True). Notes ----- The Kaiser window is defined as .. math:: w(n) = I_0\left( \beta \sqrt{1-\frac{4n^2}{(M-1)^2}} \right)/I_0(\beta) with .. math:: \quad -\frac{M-1}{2} \leq n \leq \frac{M-1}{2}, where :math:`I_0` is the modified zeroth-order Bessel function. The Kaiser was named for Jim Kaiser, who discovered a simple approximation to the DPSS window based on Bessel functions. The Kaiser window is a very good approximation to the Digital Prolate Spheroidal Sequence, or Slepian window, which is the transform which maximizes the energy in the main lobe of the window relative to total energy. The Kaiser can approximate other windows by varying the beta parameter. (Some literature uses alpha = beta/pi.) [4]_ ==== ======================= beta Window shape ==== ======================= 0 Rectangular 5 Similar to a Hamming 6 Similar to a Hann 8.6 Similar to a Blackman ==== ======================= A beta value of 14 is probably a good starting point. Note that as beta gets large, the window narrows, and so the number of samples needs to be large enough to sample the increasingly narrow spike, otherwise NaNs will be returned. Most references to the Kaiser window come from the signal processing literature, where it is used as one of many windowing functions for smoothing values. It is also known as an apodization (which means "removing the foot", i.e. smoothing discontinuities at the beginning and end of the sampled signal) or tapering function. References ---------- .. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285. John Wiley and Sons, New York, (1966). .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The University of Alberta Press, 1975, pp. 177-178. .. [3] Wikipedia, "Window function", https://en.wikipedia.org/wiki/Window_function .. [4] F. J. Harris, "On the use of windows for harmonic analysis with the discrete Fourier transform," Proceedings of the IEEE, vol. 66, no. 1, pp. 51-83, Jan. 1978. :doi:`10.1109/PROC.1978.10837`. Examples -------- Plot the window and its frequency response: >>> import numpy as np >>> from scipy import signal >>> from scipy.fft import fft, fftshift >>> import matplotlib.pyplot as plt >>> window = signal.windows.kaiser(51, beta=14) >>> plt.plot(window) >>> plt.title(r"Kaiser window ($\beta$=14)") >>> plt.ylabel("Amplitude") >>> plt.xlabel("Sample") >>> plt.figure() >>> A = fft(window, 2048) / (len(window)/2.0) >>> freq = np.linspace(-0.5, 0.5, len(A)) >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) >>> plt.plot(freq, response) >>> plt.axis([-0.5, 0.5, -120, 0]) >>> plt.title(r"Frequency response of the Kaiser window ($\beta$=14)") >>> plt.ylabel("Normalized magnitude [dB]") >>> plt.xlabel("Normalized frequency [cycles per sample]") """ # Docstring adapted from NumPy's kaiser function if _len_guards(M): return np.ones(M) M, needs_trunc = _extend(M, sym) n = np.arange(0, M) alpha = (M - 1) / 2.0 w = (special.i0(beta * np.sqrt(1 - ((n - alpha) / alpha) ** 2.0)) / special.i0(beta)) return _truncate(w, needs_trunc) def kaiser_bessel_derived(M, beta, *, sym=True): """Return a Kaiser-Bessel derived window. Parameters ---------- M : int Number of points in the output window. If zero, an empty array is returned. An exception is thrown when it is negative. Note that this window is only defined for an even number of points. beta : float Kaiser window shape parameter. sym : bool, optional This parameter only exists to comply with the interface offered by the other window functions and to be callable by `get_window`. When True (default), generates a symmetric window, for use in filter design. Returns ------- w : ndarray The window, normalized to fulfil the Princen-Bradley condition. See Also -------- kaiser Notes ----- It is designed to be suitable for use with the modified discrete cosine transform (MDCT) and is mainly used in audio signal processing and audio coding. .. versionadded:: 1.9.0 References ---------- .. [1] Bosi, Marina, and Richard E. Goldberg. Introduction to Digital Audio Coding and Standards. Dordrecht: Kluwer, 2003. .. [2] Wikipedia, "Kaiser window", https://en.wikipedia.org/wiki/Kaiser_window Examples -------- Plot the Kaiser-Bessel derived window based on the wikipedia reference [2]_: >>> import numpy as np >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> fig, ax = plt.subplots() >>> N = 50 >>> for alpha in [0.64, 2.55, 7.64, 31.83]: ... ax.plot(signal.windows.kaiser_bessel_derived(2*N, np.pi*alpha), ... label=f"{alpha=}") >>> ax.grid(True) >>> ax.set_title("Kaiser-Bessel derived window") >>> ax.set_ylabel("Amplitude") >>> ax.set_xlabel("Sample") >>> ax.set_xticks([0, N, 2*N-1]) >>> ax.set_xticklabels(["0", "N", "2N+1"]) # doctest: +SKIP >>> ax.set_yticks([0.0, 0.2, 0.4, 0.6, 0.707, 0.8, 1.0]) >>> fig.legend(loc="center") >>> fig.tight_layout() >>> fig.show() """ if not sym: raise ValueError( "Kaiser-Bessel Derived windows are only defined for symmetric " "shapes" ) elif M < 1: return np.array([]) elif M % 2: raise ValueError( "Kaiser-Bessel Derived windows are only defined for even number " "of points" ) kaiser_window = kaiser(M // 2 + 1, beta) csum = np.cumsum(kaiser_window) half_window = np.sqrt(csum[:-1] / csum[-1]) w = np.concatenate((half_window, half_window[::-1]), axis=0) return w def gaussian(M, std, sym=True): r"""Return a Gaussian window. Parameters ---------- M : int Number of points in the output window. If zero, an empty array is returned. An exception is thrown when it is negative. std : float The standard deviation, sigma. sym : bool, optional When True (default), generates a symmetric window, for use in filter design. When False, generates a periodic window, for use in spectral analysis. Returns ------- w : ndarray The window, with the maximum value normalized to 1 (though the value 1 does not appear if `M` is even and `sym` is True). Notes ----- The Gaussian window is defined as .. math:: w(n) = e^{ -\frac{1}{2}\left(\frac{n}{\sigma}\right)^2 } Examples -------- Plot the window and its frequency response: >>> import numpy as np >>> from scipy import signal >>> from scipy.fft import fft, fftshift >>> import matplotlib.pyplot as plt >>> window = signal.windows.gaussian(51, std=7) >>> plt.plot(window) >>> plt.title(r"Gaussian window ($\sigma$=7)") >>> plt.ylabel("Amplitude") >>> plt.xlabel("Sample") >>> plt.figure() >>> A = fft(window, 2048) / (len(window)/2.0) >>> freq = np.linspace(-0.5, 0.5, len(A)) >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) >>> plt.plot(freq, response) >>> plt.axis([-0.5, 0.5, -120, 0]) >>> plt.title(r"Frequency response of the Gaussian window ($\sigma$=7)") >>> plt.ylabel("Normalized magnitude [dB]") >>> plt.xlabel("Normalized frequency [cycles per sample]") """ if _len_guards(M): return np.ones(M) M, needs_trunc = _extend(M, sym) n = np.arange(0, M) - (M - 1.0) / 2.0 sig2 = 2 * std * std w = np.exp(-n ** 2 / sig2) return _truncate(w, needs_trunc) def general_gaussian(M, p, sig, sym=True): r"""Return a window with a generalized Gaussian shape. Parameters ---------- M : int Number of points in the output window. If zero, an empty array is returned. An exception is thrown when it is negative. p : float Shape parameter. p = 1 is identical to `gaussian`, p = 0.5 is the same shape as the Laplace distribution. sig : float The standard deviation, sigma. sym : bool, optional When True (default), generates a symmetric window, for use in filter design. When False, generates a periodic window, for use in spectral analysis. Returns ------- w : ndarray The window, with the maximum value normalized to 1 (though the value 1 does not appear if `M` is even and `sym` is True). Notes ----- The generalized Gaussian window is defined as .. math:: w(n) = e^{ -\frac{1}{2}\left|\frac{n}{\sigma}\right|^{2p} } the half-power point is at .. math:: (2 \log(2))^{1/(2 p)} \sigma Examples -------- Plot the window and its frequency response: >>> import numpy as np >>> from scipy import signal >>> from scipy.fft import fft, fftshift >>> import matplotlib.pyplot as plt >>> window = signal.windows.general_gaussian(51, p=1.5, sig=7) >>> plt.plot(window) >>> plt.title(r"Generalized Gaussian window (p=1.5, $\sigma$=7)") >>> plt.ylabel("Amplitude") >>> plt.xlabel("Sample") >>> plt.figure() >>> A = fft(window, 2048) / (len(window)/2.0) >>> freq = np.linspace(-0.5, 0.5, len(A)) >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) >>> plt.plot(freq, response) >>> plt.axis([-0.5, 0.5, -120, 0]) >>> plt.title(r"Freq. resp. of the gen. Gaussian " ... r"window (p=1.5, $\sigma$=7)") >>> plt.ylabel("Normalized magnitude [dB]") >>> plt.xlabel("Normalized frequency [cycles per sample]") """ if _len_guards(M): return np.ones(M) M, needs_trunc = _extend(M, sym) n = np.arange(0, M) - (M - 1.0) / 2.0 w = np.exp(-0.5 * np.abs(n / sig) ** (2 * p)) return _truncate(w, needs_trunc) # `chebwin` contributed by Kumar Appaiah. def chebwin(M, at, sym=True): r"""Return a Dolph-Chebyshev window. Parameters ---------- M : int Number of points in the output window. If zero, an empty array is returned. An exception is thrown when it is negative. at : float Attenuation (in dB). sym : bool, optional When True (default), generates a symmetric window, for use in filter design. When False, generates a periodic window, for use in spectral analysis. Returns ------- w : ndarray The window, with the maximum value always normalized to 1 Notes ----- This window optimizes for the narrowest main lobe width for a given order `M` and sidelobe equiripple attenuation `at`, using Chebyshev polynomials. It was originally developed by Dolph to optimize the directionality of radio antenna arrays. Unlike most windows, the Dolph-Chebyshev is defined in terms of its frequency response: .. math:: W(k) = \frac {\cos\{M \cos^{-1}[\beta \cos(\frac{\pi k}{M})]\}} {\cosh[M \cosh^{-1}(\beta)]} where .. math:: \beta = \cosh \left [\frac{1}{M} \cosh^{-1}(10^\frac{A}{20}) \right ] and 0 <= abs(k) <= M-1. A is the attenuation in decibels (`at`). The time domain window is then generated using the IFFT, so power-of-two `M` are the fastest to generate, and prime number `M` are the slowest. The equiripple condition in the frequency domain creates impulses in the time domain, which appear at the ends of the window. References ---------- .. [1] C. Dolph, "A current distribution for broadside arrays which optimizes the relationship between beam width and side-lobe level", Proceedings of the IEEE, Vol. 34, Issue 6 .. [2] Peter Lynch, "The Dolph-Chebyshev Window: A Simple Optimal Filter", American Meteorological Society (April 1997) http://mathsci.ucd.ie/~plynch/Publications/Dolph.pdf .. [3] F. J. Harris, "On the use of windows for harmonic analysis with the discrete Fourier transforms", Proceedings of the IEEE, Vol. 66, No. 1, January 1978 Examples -------- Plot the window and its frequency response: >>> import numpy as np >>> from scipy import signal >>> from scipy.fft import fft, fftshift >>> import matplotlib.pyplot as plt >>> window = signal.windows.chebwin(51, at=100) >>> plt.plot(window) >>> plt.title("Dolph-Chebyshev window (100 dB)") >>> plt.ylabel("Amplitude") >>> plt.xlabel("Sample") >>> plt.figure() >>> A = fft(window, 2048) / (len(window)/2.0) >>> freq = np.linspace(-0.5, 0.5, len(A)) >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) >>> plt.plot(freq, response) >>> plt.axis([-0.5, 0.5, -120, 0]) >>> plt.title("Frequency response of the Dolph-Chebyshev window (100 dB)") >>> plt.ylabel("Normalized magnitude [dB]") >>> plt.xlabel("Normalized frequency [cycles per sample]") """ if np.abs(at) < 45: warnings.warn("This window is not suitable for spectral analysis " "for attenuation values lower than about 45dB because " "the equivalent noise bandwidth of a Chebyshev window " "does not grow monotonically with increasing sidelobe " "attenuation when the attenuation is smaller than " "about 45 dB.") if _len_guards(M): return np.ones(M) M, needs_trunc = _extend(M, sym) # compute the parameter beta order = M - 1.0 beta = np.cosh(1.0 / order * np.arccosh(10 ** (np.abs(at) / 20.))) k = np.r_[0:M] * 1.0 x = beta * np.cos(np.pi * k / M) # Find the window's DFT coefficients # Use analytic definition of Chebyshev polynomial instead of expansion # from scipy.special. Using the expansion in scipy.special leads to errors. p = np.zeros(x.shape) p[x > 1] = np.cosh(order * np.arccosh(x[x > 1])) p[x < -1] = (2 * (M % 2) - 1) * np.cosh(order * np.arccosh(-x[x < -1])) p[np.abs(x) <= 1] = np.cos(order * np.arccos(x[np.abs(x) <= 1])) # Appropriate IDFT and filling up # depending on even/odd M if M % 2: w = np.real(sp_fft.fft(p)) n = (M + 1) // 2 w = w[:n] w = np.concatenate((w[n - 1:0:-1], w)) else: p = p * np.exp(1.j * np.pi / M * np.r_[0:M]) w = np.real(sp_fft.fft(p)) n = M // 2 + 1 w = np.concatenate((w[n - 1:0:-1], w[1:n])) w = w / max(w) return _truncate(w, needs_trunc) def cosine(M, sym=True): """Return a window with a simple cosine shape. Parameters ---------- M : int Number of points in the output window. If zero, an empty array is returned. An exception is thrown when it is negative. sym : bool, optional When True (default), generates a symmetric window, for use in filter design. When False, generates a periodic window, for use in spectral analysis. Returns ------- w : ndarray The window, with the maximum value normalized to 1 (though the value 1 does not appear if `M` is even and `sym` is True). Notes ----- .. versionadded:: 0.13.0 Examples -------- Plot the window and its frequency response: >>> import numpy as np >>> from scipy import signal >>> from scipy.fft import fft, fftshift >>> import matplotlib.pyplot as plt >>> window = signal.windows.cosine(51) >>> plt.plot(window) >>> plt.title("Cosine window") >>> plt.ylabel("Amplitude") >>> plt.xlabel("Sample") >>> plt.figure() >>> A = fft(window, 2047) / (len(window)/2.0) >>> freq = np.linspace(-0.5, 0.5, len(A)) >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) >>> plt.plot(freq, response) >>> plt.axis([-0.5, 0.5, -120, 0]) >>> plt.title("Frequency response of the cosine window") >>> plt.ylabel("Normalized magnitude [dB]") >>> plt.xlabel("Normalized frequency [cycles per sample]") >>> plt.show() """ if _len_guards(M): return np.ones(M) M, needs_trunc = _extend(M, sym) w = np.sin(np.pi / M * (np.arange(0, M) + .5)) return _truncate(w, needs_trunc) def exponential(M, center=None, tau=1., sym=True): r"""Return an exponential (or Poisson) window. Parameters ---------- M : int Number of points in the output window. If zero, an empty array is returned. An exception is thrown when it is negative. center : float, optional Parameter defining the center location of the window function. The default value if not given is ``center = (M-1) / 2``. This parameter must take its default value for symmetric windows. tau : float, optional Parameter defining the decay. For ``center = 0`` use ``tau = -(M-1) / ln(x)`` if ``x`` is the fraction of the window remaining at the end. sym : bool, optional When True (default), generates a symmetric window, for use in filter design. When False, generates a periodic window, for use in spectral analysis. Returns ------- w : ndarray The window, with the maximum value normalized to 1 (though the value 1 does not appear if `M` is even and `sym` is True). Notes ----- The Exponential window is defined as .. math:: w(n) = e^{-|n-center| / \tau} References ---------- .. [1] S. Gade and H. Herlufsen, "Windows to FFT analysis (Part I)", Technical Review 3, Bruel & Kjaer, 1987. Examples -------- Plot the symmetric window and its frequency response: >>> import numpy as np >>> from scipy import signal >>> from scipy.fft import fft, fftshift >>> import matplotlib.pyplot as plt >>> M = 51 >>> tau = 3.0 >>> window = signal.windows.exponential(M, tau=tau) >>> plt.plot(window) >>> plt.title("Exponential Window (tau=3.0)") >>> plt.ylabel("Amplitude") >>> plt.xlabel("Sample") >>> plt.figure() >>> A = fft(window, 2048) / (len(window)/2.0) >>> freq = np.linspace(-0.5, 0.5, len(A)) >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) >>> plt.plot(freq, response) >>> plt.axis([-0.5, 0.5, -35, 0]) >>> plt.title("Frequency response of the Exponential window (tau=3.0)") >>> plt.ylabel("Normalized magnitude [dB]") >>> plt.xlabel("Normalized frequency [cycles per sample]") This function can also generate non-symmetric windows: >>> tau2 = -(M-1) / np.log(0.01) >>> window2 = signal.windows.exponential(M, 0, tau2, False) >>> plt.figure() >>> plt.plot(window2) >>> plt.ylabel("Amplitude") >>> plt.xlabel("Sample") """ if sym and center is not None: raise ValueError("If sym==True, center must be None.") if _len_guards(M): return np.ones(M) M, needs_trunc = _extend(M, sym) if center is None: center = (M-1) / 2 n = np.arange(0, M) w = np.exp(-np.abs(n-center) / tau) return _truncate(w, needs_trunc) def taylor(M, nbar=4, sll=30, norm=True, sym=True): """ Return a Taylor window. The Taylor window taper function approximates the Dolph-Chebyshev window's constant sidelobe level for a parameterized number of near-in sidelobes, but then allows a taper beyond [2]_. The SAR (synthetic aperature radar) community commonly uses Taylor weighting for image formation processing because it provides strong, selectable sidelobe suppression with minimum broadening of the mainlobe [1]_. Parameters ---------- M : int Number of points in the output window. If zero, an empty array is returned. An exception is thrown when it is negative. nbar : int, optional Number of nearly constant level sidelobes adjacent to the mainlobe. sll : float, optional Desired suppression of sidelobe level in decibels (dB) relative to the DC gain of the mainlobe. This should be a positive number. norm : bool, optional When True (default), divides the window by the largest (middle) value for odd-length windows or the value that would occur between the two repeated middle values for even-length windows such that all values are less than or equal to 1. When False the DC gain will remain at 1 (0 dB) and the sidelobes will be `sll` dB down. sym : bool, optional When True (default), generates a symmetric window, for use in filter design. When False, generates a periodic window, for use in spectral analysis. Returns ------- out : array The window. When `norm` is True (default), the maximum value is normalized to 1 (though the value 1 does not appear if `M` is even and `sym` is True). See Also -------- chebwin, kaiser, bartlett, blackman, hamming, hann References ---------- .. [1] W. Carrara, R. Goodman, and R. Majewski, "Spotlight Synthetic Aperture Radar: Signal Processing Algorithms" Pages 512-513, July 1995. .. [2] Armin Doerry, "Catalog of Window Taper Functions for Sidelobe Control", 2017. https://www.researchgate.net/profile/Armin_Doerry/publication/316281181_Catalog_of_Window_Taper_Functions_for_Sidelobe_Control/links/58f92cb2a6fdccb121c9d54d/Catalog-of-Window-Taper-Functions-for-Sidelobe-Control.pdf Examples -------- Plot the window and its frequency response: >>> import numpy as np >>> from scipy import signal >>> from scipy.fft import fft, fftshift >>> import matplotlib.pyplot as plt >>> window = signal.windows.taylor(51, nbar=20, sll=100, norm=False) >>> plt.plot(window) >>> plt.title("Taylor window (100 dB)") >>> plt.ylabel("Amplitude") >>> plt.xlabel("Sample") >>> plt.figure() >>> A = fft(window, 2048) / (len(window)/2.0) >>> freq = np.linspace(-0.5, 0.5, len(A)) >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) >>> plt.plot(freq, response) >>> plt.axis([-0.5, 0.5, -120, 0]) >>> plt.title("Frequency response of the Taylor window (100 dB)") >>> plt.ylabel("Normalized magnitude [dB]") >>> plt.xlabel("Normalized frequency [cycles per sample]") """ # noqa: E501 if _len_guards(M): return np.ones(M) M, needs_trunc = _extend(M, sym) # Original text uses a negative sidelobe level parameter and then negates # it in the calculation of B. To keep consistent with other methods we # assume the sidelobe level parameter to be positive. B = 10**(sll / 20) A = np.arccosh(B) / np.pi s2 = nbar**2 / (A**2 + (nbar - 0.5)**2) ma = np.arange(1, nbar) Fm = np.empty(nbar-1) signs = np.empty_like(ma) signs[::2] = 1 signs[1::2] = -1 m2 = ma*ma for mi, m in enumerate(ma): numer = signs[mi] * np.prod(1 - m2[mi]/s2/(A**2 + (ma - 0.5)**2)) denom = 2 * np.prod(1 - m2[mi]/m2[:mi]) * np.prod(1 - m2[mi]/m2[mi+1:]) Fm[mi] = numer / denom def W(n): return 1 + 2*np.dot(Fm, np.cos( 2*np.pi*ma[:, np.newaxis]*(n-M/2.+0.5)/M)) w = W(np.arange(M)) # normalize (Note that this is not described in the original text [1]) if norm: scale = 1.0 / W((M - 1) / 2) w *= scale return _truncate(w, needs_trunc) def dpss(M, NW, Kmax=None, sym=True, norm=None, return_ratios=False): """ Compute the Discrete Prolate Spheroidal Sequences (DPSS). DPSS (or Slepian sequences) are often used in multitaper power spectral density estimation (see [1]_). The first window in the sequence can be used to maximize the energy concentration in the main lobe, and is also called the Slepian window. Parameters ---------- M : int Window length. NW : float Standardized half bandwidth corresponding to ``2*NW = BW/f0 = BW*M*dt`` where ``dt`` is taken as 1. Kmax : int | None, optional Number of DPSS windows to return (orders ``0`` through ``Kmax-1``). If None (default), return only a single window of shape ``(M,)`` instead of an array of windows of shape ``(Kmax, M)``. sym : bool, optional When True (default), generates a symmetric window, for use in filter design. When False, generates a periodic window, for use in spectral analysis. norm : {2, 'approximate', 'subsample'} | None, optional If 'approximate' or 'subsample', then the windows are normalized by the maximum, and a correction scale-factor for even-length windows is applied either using ``M**2/(M**2+NW)`` ("approximate") or a FFT-based subsample shift ("subsample"), see Notes for details. If None, then "approximate" is used when ``Kmax=None`` and 2 otherwise (which uses the l2 norm). return_ratios : bool, optional If True, also return the concentration ratios in addition to the windows. Returns ------- v : ndarray, shape (Kmax, M) or (M,) The DPSS windows. Will be 1D if `Kmax` is None. r : ndarray, shape (Kmax,) or float, optional The concentration ratios for the windows. Only returned if `return_ratios` evaluates to True. Will be 0D if `Kmax` is None. Notes ----- This computation uses the tridiagonal eigenvector formulation given in [2]_. The default normalization for ``Kmax=None``, i.e. window-generation mode, simply using the l-infinity norm would create a window with two unity values, which creates slight normalization differences between even and odd orders. The approximate correction of ``M**2/float(M**2+NW)`` for even sample numbers is used to counteract this effect (see Examples below). For very long signals (e.g., 1e6 elements), it can be useful to compute windows orders of magnitude shorter and use interpolation (e.g., `scipy.interpolate.interp1d`) to obtain tapers of length `M`, but this in general will not preserve orthogonality between the tapers. .. versionadded:: 1.1 References ---------- .. [1] Percival DB, Walden WT. Spectral Analysis for Physical Applications: Multitaper and Conventional Univariate Techniques. Cambridge University Press; 1993. .. [2] Slepian, D. Prolate spheroidal wave functions, Fourier analysis, and uncertainty V: The discrete case. Bell System Technical Journal, Volume 57 (1978), 1371430. .. [3] Kaiser, JF, Schafer RW. On the Use of the I0-Sinh Window for Spectrum Analysis. IEEE Transactions on Acoustics, Speech and Signal Processing. ASSP-28 (1): 105-107; 1980. Examples -------- We can compare the window to `kaiser`, which was invented as an alternative that was easier to calculate [3]_ (example adapted from `here <https://ccrma.stanford.edu/~jos/sasp/Kaiser_DPSS_Windows_Compared.html>`_): >>> import numpy as np >>> import matplotlib.pyplot as plt >>> from scipy.signal import windows, freqz >>> M = 51 >>> fig, axes = plt.subplots(3, 2, figsize=(5, 7)) >>> for ai, alpha in enumerate((1, 3, 5)): ... win_dpss = windows.dpss(M, alpha) ... beta = alpha*np.pi ... win_kaiser = windows.kaiser(M, beta) ... for win, c in ((win_dpss, 'k'), (win_kaiser, 'r')): ... win /= win.sum() ... axes[ai, 0].plot(win, color=c, lw=1.) ... axes[ai, 0].set(xlim=[0, M-1], title=r'$\\alpha$ = %s' % alpha, ... ylabel='Amplitude') ... w, h = freqz(win) ... axes[ai, 1].plot(w, 20 * np.log10(np.abs(h)), color=c, lw=1.) ... axes[ai, 1].set(xlim=[0, np.pi], ... title=r'$\\beta$ = %0.2f' % beta, ... ylabel='Magnitude (dB)') >>> for ax in axes.ravel(): ... ax.grid(True) >>> axes[2, 1].legend(['DPSS', 'Kaiser']) >>> fig.tight_layout() >>> plt.show() And here are examples of the first four windows, along with their concentration ratios: >>> M = 512 >>> NW = 2.5 >>> win, eigvals = windows.dpss(M, NW, 4, return_ratios=True) >>> fig, ax = plt.subplots(1) >>> ax.plot(win.T, linewidth=1.) >>> ax.set(xlim=[0, M-1], ylim=[-0.1, 0.1], xlabel='Samples', ... title='DPSS, M=%d, NW=%0.1f' % (M, NW)) >>> ax.legend(['win[%d] (%0.4f)' % (ii, ratio) ... for ii, ratio in enumerate(eigvals)]) >>> fig.tight_layout() >>> plt.show() Using a standard :math:`l_{\\infty}` norm would produce two unity values for even `M`, but only one unity value for odd `M`. This produces uneven window power that can be counteracted by the approximate correction ``M**2/float(M**2+NW)``, which can be selected by using ``norm='approximate'`` (which is the same as ``norm=None`` when ``Kmax=None``, as is the case here). Alternatively, the slower ``norm='subsample'`` can be used, which uses subsample shifting in the frequency domain (FFT) to compute the correction: >>> Ms = np.arange(1, 41) >>> factors = (50, 20, 10, 5, 2.0001) >>> energy = np.empty((3, len(Ms), len(factors))) >>> for mi, M in enumerate(Ms): ... for fi, factor in enumerate(factors): ... NW = M / float(factor) ... # Corrected using empirical approximation (default) ... win = windows.dpss(M, NW) ... energy[0, mi, fi] = np.sum(win ** 2) / np.sqrt(M) ... # Corrected using subsample shifting ... win = windows.dpss(M, NW, norm='subsample') ... energy[1, mi, fi] = np.sum(win ** 2) / np.sqrt(M) ... # Uncorrected (using l-infinity norm) ... win /= win.max() ... energy[2, mi, fi] = np.sum(win ** 2) / np.sqrt(M) >>> fig, ax = plt.subplots(1) >>> hs = ax.plot(Ms, energy[2], '-o', markersize=4, ... markeredgecolor='none') >>> leg = [hs[-1]] >>> for hi, hh in enumerate(hs): ... h1 = ax.plot(Ms, energy[0, :, hi], '-o', markersize=4, ... color=hh.get_color(), markeredgecolor='none', ... alpha=0.66) ... h2 = ax.plot(Ms, energy[1, :, hi], '-o', markersize=4, ... color=hh.get_color(), markeredgecolor='none', ... alpha=0.33) ... if hi == len(hs) - 1: ... leg.insert(0, h1[0]) ... leg.insert(0, h2[0]) >>> ax.set(xlabel='M (samples)', ylabel=r'Power / $\\sqrt{M}$') >>> ax.legend(leg, ['Uncorrected', r'Corrected: $\\frac{M^2}{M^2+NW}$', ... 'Corrected (subsample)']) >>> fig.tight_layout() """ # noqa: E501 if _len_guards(M): return np.ones(M) if norm is None: norm = 'approximate' if Kmax is None else 2 known_norms = (2, 'approximate', 'subsample') if norm not in known_norms: raise ValueError('norm must be one of %s, got %s' % (known_norms, norm)) if Kmax is None: singleton = True Kmax = 1 else: singleton = False Kmax = operator.index(Kmax) if not 0 < Kmax <= M: raise ValueError('Kmax must be greater than 0 and less than M') if NW >= M/2.: raise ValueError('NW must be less than M/2.') if NW <= 0: raise ValueError('NW must be positive') M, needs_trunc = _extend(M, sym) W = float(NW) / M nidx = np.arange(M) # Here we want to set up an optimization problem to find a sequence # whose energy is maximally concentrated within band [-W,W]. # Thus, the measure lambda(T,W) is the ratio between the energy within # that band, and the total energy. This leads to the eigen-system # (A - (l1)I)v = 0, where the eigenvector corresponding to the largest # eigenvalue is the sequence with maximally concentrated energy. The # collection of eigenvectors of this system are called Slepian # sequences, or discrete prolate spheroidal sequences (DPSS). Only the # first K, K = 2NW/dt orders of DPSS will exhibit good spectral # concentration # [see https://en.wikipedia.org/wiki/Spectral_concentration_problem] # Here we set up an alternative symmetric tri-diagonal eigenvalue # problem such that # (B - (l2)I)v = 0, and v are our DPSS (but eigenvalues l2 != l1) # the main diagonal = ([M-1-2*t]/2)**2 cos(2PIW), t=[0,1,2,...,M-1] # and the first off-diagonal = t(M-t)/2, t=[1,2,...,M-1] # [see Percival and Walden, 1993] d = ((M - 1 - 2 * nidx) / 2.) ** 2 * np.cos(2 * np.pi * W) e = nidx[1:] * (M - nidx[1:]) / 2. # only calculate the highest Kmax eigenvalues w, windows = linalg.eigh_tridiagonal( d, e, select='i', select_range=(M - Kmax, M - 1)) w = w[::-1] windows = windows[:, ::-1].T # By convention (Percival and Walden, 1993 pg 379) # * symmetric tapers (k=0,2,4,...) should have a positive average. fix_even = (windows[::2].sum(axis=1) < 0) for i, f in enumerate(fix_even): if f: windows[2 * i] *= -1 # * antisymmetric tapers should begin with a positive lobe # (this depends on the definition of "lobe", here we'll take the first # point above the numerical noise, which should be good enough for # sufficiently smooth functions, and more robust than relying on an # algorithm that uses max(abs(w)), which is susceptible to numerical # noise problems) thresh = max(1e-7, 1. / M) for i, w in enumerate(windows[1::2]): if w[w * w > thresh][0] < 0: windows[2 * i + 1] *= -1 # Now find the eigenvalues of the original spectral concentration problem # Use the autocorr sequence technique from Percival and Walden, 1993 pg 390 if return_ratios: dpss_rxx = _fftautocorr(windows) r = 4 * W * np.sinc(2 * W * nidx) r[0] = 2 * W ratios = np.dot(dpss_rxx, r) if singleton: ratios = ratios[0] # Deal with sym and Kmax=None if norm != 2: windows /= windows.max() if M % 2 == 0: if norm == 'approximate': correction = M**2 / float(M**2 + NW) else: s = sp_fft.rfft(windows[0]) shift = -(1 - 1./M) * np.arange(1, M//2 + 1) s[1:] *= 2 * np.exp(-1j * np.pi * shift) correction = M / s.real.sum() windows *= correction # else we're already l2 normed, so do nothing if needs_trunc: windows = windows[:, :-1] if singleton: windows = windows[0] return (windows, ratios) if return_ratios else windows def lanczos(M, *, sym=True): r"""Return a Lanczos window also known as a sinc window. Parameters ---------- M : int Number of points in the output window. If zero, an empty array is returned. An exception is thrown when it is negative. sym : bool, optional When True (default), generates a symmetric window, for use in filter design. When False, generates a periodic window, for use in spectral analysis. Returns ------- w : ndarray The window, with the maximum value normalized to 1 (though the value 1 does not appear if `M` is even and `sym` is True). Notes ----- The Lanczos window is defined as .. math:: w(n) = sinc \left( \frac{2n}{M - 1} - 1 \right) where .. math:: sinc(x) = \frac{\sin(\pi x)}{\pi x} The Lanczos window has reduced Gibbs oscillations and is widely used for filtering climate timeseries with good properties in the physical and spectral domains. .. versionadded:: 1.10 References ---------- .. [1] Lanczos, C., and Teichmann, T. (1957). Applied analysis. Physics Today, 10, 44. .. [2] Duchon C. E. (1979) Lanczos Filtering in One and Two Dimensions. Journal of Applied Meteorology, Vol 18, pp 1016-1022. .. [3] Thomson, R. E. and Emery, W. J. (2014) Data Analysis Methods in Physical Oceanography (Third Edition), Elsevier, pp 593-637. .. [4] Wikipedia, "Window function", http://en.wikipedia.org/wiki/Window_function Examples -------- Plot the window >>> import numpy as np >>> from scipy.signal.windows import lanczos >>> from scipy.fft import fft, fftshift >>> import matplotlib.pyplot as plt >>> fig, ax = plt.subplots(1) >>> window = lanczos(51) >>> ax.plot(window) >>> ax.set_title("Lanczos window") >>> ax.set_ylabel("Amplitude") >>> ax.set_xlabel("Sample") >>> fig.tight_layout() >>> plt.show() and its frequency response: >>> fig, ax = plt.subplots(1) >>> A = fft(window, 2048) / (len(window)/2.0) >>> freq = np.linspace(-0.5, 0.5, len(A)) >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) >>> ax.plot(freq, response) >>> ax.set_xlim(-0.5, 0.5) >>> ax.set_ylim(-120, 0) >>> ax.set_title("Frequency response of the lanczos window") >>> ax.set_ylabel("Normalized magnitude [dB]") >>> ax.set_xlabel("Normalized frequency [cycles per sample]") >>> fig.tight_layout() >>> plt.show() """ if _len_guards(M): return np.ones(M) M, needs_trunc = _extend(M, sym) # To make sure that the window is symmetric, we concatenate the right hand # half of the window and the flipped one which is the left hand half of # the window. def _calc_right_side_lanczos(n, m): return np.sinc(2. * np.arange(n, m) / (m - 1) - 1.0) if M % 2 == 0: wh = _calc_right_side_lanczos(M/2, M) w = np.r_[np.flip(wh), wh] else: wh = _calc_right_side_lanczos((M+1)/2, M) w = np.r_[np.flip(wh), 1.0, wh] return _truncate(w, needs_trunc) def _fftautocorr(x): """Compute the autocorrelation of a real array and crop the result.""" N = x.shape[-1] use_N = sp_fft.next_fast_len(2*N-1) x_fft = sp_fft.rfft(x, use_N, axis=-1) cxy = sp_fft.irfft(x_fft * x_fft.conj(), n=use_N)[:, :N] # Or equivalently (but in most cases slower): # cxy = np.array([np.convolve(xx, yy[::-1], mode='full') # for xx, yy in zip(x, x)])[:, N-1:2*N-1] return cxy _win_equiv_raw = { ('barthann', 'brthan', 'bth'): (barthann, False), ('bartlett', 'bart', 'brt'): (bartlett, False), ('blackman', 'black', 'blk'): (blackman, False), ('blackmanharris', 'blackharr', 'bkh'): (blackmanharris, False), ('bohman', 'bman', 'bmn'): (bohman, False), ('boxcar', 'box', 'ones', 'rect', 'rectangular'): (boxcar, False), ('chebwin', 'cheb'): (chebwin, True), ('cosine', 'halfcosine'): (cosine, False), ('dpss',): (dpss, True), ('exponential', 'poisson'): (exponential, False), ('flattop', 'flat', 'flt'): (flattop, False), ('gaussian', 'gauss', 'gss'): (gaussian, True), ('general cosine', 'general_cosine'): (general_cosine, True), ('general gaussian', 'general_gaussian', 'general gauss', 'general_gauss', 'ggs'): (general_gaussian, True), ('general hamming', 'general_hamming'): (general_hamming, True), ('hamming', 'hamm', 'ham'): (hamming, False), ('hann', 'han'): (hann, False), ('kaiser', 'ksr'): (kaiser, True), ('kaiser bessel derived', 'kbd'): (kaiser_bessel_derived, True), ('lanczos', 'sinc'): (lanczos, False), ('nuttall', 'nutl', 'nut'): (nuttall, False), ('parzen', 'parz', 'par'): (parzen, False), ('taylor', 'taylorwin'): (taylor, False), ('triangle', 'triang', 'tri'): (triang, False), ('tukey', 'tuk'): (tukey, False), } # Fill dict with all valid window name strings _win_equiv = {} for k, v in _win_equiv_raw.items(): for key in k: _win_equiv[key] = v[0] # Keep track of which windows need additional parameters _needs_param = set() for k, v in _win_equiv_raw.items(): if v[1]: _needs_param.update(k) def get_window(window, Nx, fftbins=True): """ Return a window of a given length and type. Parameters ---------- window : string, float, or tuple The type of window to create. See below for more details. Nx : int The number of samples in the window. fftbins : bool, optional If True (default), create a "periodic" window, ready to use with `ifftshift` and be multiplied by the result of an FFT (see also :func:`~scipy.fft.fftfreq`). If False, create a "symmetric" window, for use in filter design. Returns ------- get_window : ndarray Returns a window of length `Nx` and type `window` Notes ----- Window types: - `~scipy.signal.windows.boxcar` - `~scipy.signal.windows.triang` - `~scipy.signal.windows.blackman` - `~scipy.signal.windows.hamming` - `~scipy.signal.windows.hann` - `~scipy.signal.windows.bartlett` - `~scipy.signal.windows.flattop` - `~scipy.signal.windows.parzen` - `~scipy.signal.windows.bohman` - `~scipy.signal.windows.blackmanharris` - `~scipy.signal.windows.nuttall` - `~scipy.signal.windows.barthann` - `~scipy.signal.windows.cosine` - `~scipy.signal.windows.exponential` - `~scipy.signal.windows.tukey` - `~scipy.signal.windows.taylor` - `~scipy.signal.windows.lanczos` - `~scipy.signal.windows.kaiser` (needs beta) - `~scipy.signal.windows.kaiser_bessel_derived` (needs beta) - `~scipy.signal.windows.gaussian` (needs standard deviation) - `~scipy.signal.windows.general_cosine` (needs weighting coefficients) - `~scipy.signal.windows.general_gaussian` (needs power, width) - `~scipy.signal.windows.general_hamming` (needs window coefficient) - `~scipy.signal.windows.dpss` (needs normalized half-bandwidth) - `~scipy.signal.windows.chebwin` (needs attenuation) If the window requires no parameters, then `window` can be a string. If the window requires parameters, then `window` must be a tuple with the first argument the string name of the window, and the next arguments the needed parameters. If `window` is a floating point number, it is interpreted as the beta parameter of the `~scipy.signal.windows.kaiser` window. Each of the window types listed above is also the name of a function that can be called directly to create a window of that type. Examples -------- >>> from scipy import signal >>> signal.get_window('triang', 7) array([ 0.125, 0.375, 0.625, 0.875, 0.875, 0.625, 0.375]) >>> signal.get_window(('kaiser', 4.0), 9) array([ 0.08848053, 0.29425961, 0.56437221, 0.82160913, 0.97885093, 0.97885093, 0.82160913, 0.56437221, 0.29425961]) >>> signal.get_window(('exponential', None, 1.), 9) array([ 0.011109 , 0.03019738, 0.082085 , 0.22313016, 0.60653066, 0.60653066, 0.22313016, 0.082085 , 0.03019738]) >>> signal.get_window(4.0, 9) array([ 0.08848053, 0.29425961, 0.56437221, 0.82160913, 0.97885093, 0.97885093, 0.82160913, 0.56437221, 0.29425961]) """ sym = not fftbins try: beta = float(window) except (TypeError, ValueError) as e: args = () if isinstance(window, tuple): winstr = window[0] if len(window) > 1: args = window[1:] elif isinstance(window, str): if window in _needs_param: raise ValueError("The '" + window + "' window needs one or " "more parameters -- pass a tuple.") from e else: winstr = window else: raise ValueError("%s as window type is not supported." % str(type(window))) from e try: winfunc = _win_equiv[winstr] except KeyError as e: raise ValueError("Unknown window type.") from e if winfunc is dpss: params = (Nx,) + args + (None,) else: params = (Nx,) + args else: winfunc = kaiser params = (Nx, beta) return winfunc(*params, sym=sym)
83,617
34.207579
227
py
scipy
scipy-main/scipy/signal/windows/windows.py
# This file is not meant for public use and will be removed in SciPy v2.0.0. # Use the `scipy.signal.windows` namespace for importing the functions # included below. import warnings from . import _windows __all__ = [ # noqa: F822 'boxcar', 'triang', 'parzen', 'bohman', 'blackman', 'nuttall', 'blackmanharris', 'flattop', 'bartlett', 'barthann', 'hamming', 'kaiser', 'gaussian', 'general_cosine', 'general_gaussian', 'general_hamming', 'chebwin', 'cosine', 'hann', 'exponential', 'tukey', 'taylor', 'dpss', 'get_window', 'linalg', 'sp_fft', 'k', 'v', 'key' ] def __dir__(): return __all__ def __getattr__(name): if name not in __all__: raise AttributeError( "scipy.signal.windows.windows is deprecated and has no attribute " f"{name}. Try looking in scipy.signal.windows instead.") warnings.warn(f"Please use `{name}` from the `scipy.signal.windows` namespace, " "the `scipy.signal.windows.windows` namespace is deprecated.", category=DeprecationWarning, stacklevel=2) return getattr(_windows, name)
1,117
32.878788
84
py
scipy
scipy-main/scipy/signal/windows/__init__.py
""" Window functions (:mod:`scipy.signal.windows`) ============================================== The suite of window functions for filtering and spectral estimation. .. currentmodule:: scipy.signal.windows .. autosummary:: :toctree: generated/ get_window -- Return a window of a given length and type. barthann -- Bartlett-Hann window bartlett -- Bartlett window blackman -- Blackman window blackmanharris -- Minimum 4-term Blackman-Harris window bohman -- Bohman window boxcar -- Boxcar window chebwin -- Dolph-Chebyshev window cosine -- Cosine window dpss -- Discrete prolate spheroidal sequences exponential -- Exponential window flattop -- Flat top window gaussian -- Gaussian window general_cosine -- Generalized Cosine window general_gaussian -- Generalized Gaussian window general_hamming -- Generalized Hamming window hamming -- Hamming window hann -- Hann window kaiser -- Kaiser window kaiser_bessel_derived -- Kaiser-Bessel derived window lanczos -- Lanczos window also known as a sinc window nuttall -- Nuttall's minimum 4-term Blackman-Harris window parzen -- Parzen window taylor -- Taylor window triang -- Triangular window tukey -- Tukey window """ from ._windows import * # Deprecated namespaces, to be removed in v2.0.0 from . import windows __all__ = ['boxcar', 'triang', 'parzen', 'bohman', 'blackman', 'nuttall', 'blackmanharris', 'flattop', 'bartlett', 'barthann', 'hamming', 'kaiser', 'kaiser_bessel_derived', 'gaussian', 'general_gaussian', 'general_cosine', 'general_hamming', 'chebwin', 'cosine', 'hann', 'exponential', 'tukey', 'taylor', 'get_window', 'dpss', 'lanczos']
2,119
39
77
py
scipy
scipy-main/scipy/ndimage/measurements.py
# This file is not meant for public use and will be removed in SciPy v2.0.0. # Use the `scipy.ndimage` namespace for importing the functions # included below. import warnings from . import _measurements __all__ = [ # noqa: F822 'label', 'find_objects', 'labeled_comprehension', 'sum', 'mean', 'variance', 'standard_deviation', 'minimum', 'maximum', 'median', 'minimum_position', 'maximum_position', 'extrema', 'center_of_mass', 'histogram', 'watershed_ift', 'sum_labels' ] def __dir__(): return __all__ def __getattr__(name): if name not in __all__: raise AttributeError( "scipy.ndimage.measurements is deprecated and has no attribute " f"{name}. Try looking in scipy.ndimage instead.") warnings.warn(f"Please use `{name}` from the `scipy.ndimage` namespace, " "the `scipy.ndimage.measurements` namespace is deprecated.", category=DeprecationWarning, stacklevel=2) return getattr(_measurements, name)
1,015
29.787879
78
py
scipy
scipy-main/scipy/ndimage/_fourier.py
# Copyright (C) 2003-2005 Peter J. Verveer # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # # 3. The name of the author may not be used to endorse or promote # products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS # OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE # GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import numpy from scipy._lib._util import normalize_axis_index from . import _ni_support from . import _nd_image __all__ = ['fourier_gaussian', 'fourier_uniform', 'fourier_ellipsoid', 'fourier_shift'] def _get_output_fourier(output, input): if output is None: if input.dtype.type in [numpy.complex64, numpy.complex128, numpy.float32]: output = numpy.zeros(input.shape, dtype=input.dtype) else: output = numpy.zeros(input.shape, dtype=numpy.float64) elif type(output) is type: if output not in [numpy.complex64, numpy.complex128, numpy.float32, numpy.float64]: raise RuntimeError("output type not supported") output = numpy.zeros(input.shape, dtype=output) elif output.shape != input.shape: raise RuntimeError("output shape not correct") return output def _get_output_fourier_complex(output, input): if output is None: if input.dtype.type in [numpy.complex64, numpy.complex128]: output = numpy.zeros(input.shape, dtype=input.dtype) else: output = numpy.zeros(input.shape, dtype=numpy.complex128) elif type(output) is type: if output not in [numpy.complex64, numpy.complex128]: raise RuntimeError("output type not supported") output = numpy.zeros(input.shape, dtype=output) elif output.shape != input.shape: raise RuntimeError("output shape not correct") return output def fourier_gaussian(input, sigma, n=-1, axis=-1, output=None): """ Multidimensional Gaussian fourier filter. The array is multiplied with the fourier transform of a Gaussian kernel. Parameters ---------- input : array_like The input array. sigma : float or sequence The sigma of the Gaussian kernel. If a float, `sigma` is the same for all axes. If a sequence, `sigma` has to contain one value for each axis. n : int, optional If `n` is negative (default), then the input is assumed to be the result of a complex fft. If `n` is larger than or equal to zero, the input is assumed to be the result of a real fft, and `n` gives the length of the array before transformation along the real transform direction. axis : int, optional The axis of the real transform. output : ndarray, optional If given, the result of filtering the input is placed in this array. Returns ------- fourier_gaussian : ndarray The filtered input. Examples -------- >>> from scipy import ndimage, datasets >>> import numpy.fft >>> import matplotlib.pyplot as plt >>> fig, (ax1, ax2) = plt.subplots(1, 2) >>> plt.gray() # show the filtered result in grayscale >>> ascent = datasets.ascent() >>> input_ = numpy.fft.fft2(ascent) >>> result = ndimage.fourier_gaussian(input_, sigma=4) >>> result = numpy.fft.ifft2(result) >>> ax1.imshow(ascent) >>> ax2.imshow(result.real) # the imaginary part is an artifact >>> plt.show() """ input = numpy.asarray(input) output = _get_output_fourier(output, input) axis = normalize_axis_index(axis, input.ndim) sigmas = _ni_support._normalize_sequence(sigma, input.ndim) sigmas = numpy.asarray(sigmas, dtype=numpy.float64) if not sigmas.flags.contiguous: sigmas = sigmas.copy() _nd_image.fourier_filter(input, sigmas, n, axis, output, 0) return output def fourier_uniform(input, size, n=-1, axis=-1, output=None): """ Multidimensional uniform fourier filter. The array is multiplied with the Fourier transform of a box of given size. Parameters ---------- input : array_like The input array. size : float or sequence The size of the box used for filtering. If a float, `size` is the same for all axes. If a sequence, `size` has to contain one value for each axis. n : int, optional If `n` is negative (default), then the input is assumed to be the result of a complex fft. If `n` is larger than or equal to zero, the input is assumed to be the result of a real fft, and `n` gives the length of the array before transformation along the real transform direction. axis : int, optional The axis of the real transform. output : ndarray, optional If given, the result of filtering the input is placed in this array. Returns ------- fourier_uniform : ndarray The filtered input. Examples -------- >>> from scipy import ndimage, datasets >>> import numpy.fft >>> import matplotlib.pyplot as plt >>> fig, (ax1, ax2) = plt.subplots(1, 2) >>> plt.gray() # show the filtered result in grayscale >>> ascent = datasets.ascent() >>> input_ = numpy.fft.fft2(ascent) >>> result = ndimage.fourier_uniform(input_, size=20) >>> result = numpy.fft.ifft2(result) >>> ax1.imshow(ascent) >>> ax2.imshow(result.real) # the imaginary part is an artifact >>> plt.show() """ input = numpy.asarray(input) output = _get_output_fourier(output, input) axis = normalize_axis_index(axis, input.ndim) sizes = _ni_support._normalize_sequence(size, input.ndim) sizes = numpy.asarray(sizes, dtype=numpy.float64) if not sizes.flags.contiguous: sizes = sizes.copy() _nd_image.fourier_filter(input, sizes, n, axis, output, 1) return output def fourier_ellipsoid(input, size, n=-1, axis=-1, output=None): """ Multidimensional ellipsoid Fourier filter. The array is multiplied with the fourier transform of an ellipsoid of given sizes. Parameters ---------- input : array_like The input array. size : float or sequence The size of the box used for filtering. If a float, `size` is the same for all axes. If a sequence, `size` has to contain one value for each axis. n : int, optional If `n` is negative (default), then the input is assumed to be the result of a complex fft. If `n` is larger than or equal to zero, the input is assumed to be the result of a real fft, and `n` gives the length of the array before transformation along the real transform direction. axis : int, optional The axis of the real transform. output : ndarray, optional If given, the result of filtering the input is placed in this array. Returns ------- fourier_ellipsoid : ndarray The filtered input. Notes ----- This function is implemented for arrays of rank 1, 2, or 3. Examples -------- >>> from scipy import ndimage, datasets >>> import numpy.fft >>> import matplotlib.pyplot as plt >>> fig, (ax1, ax2) = plt.subplots(1, 2) >>> plt.gray() # show the filtered result in grayscale >>> ascent = datasets.ascent() >>> input_ = numpy.fft.fft2(ascent) >>> result = ndimage.fourier_ellipsoid(input_, size=20) >>> result = numpy.fft.ifft2(result) >>> ax1.imshow(ascent) >>> ax2.imshow(result.real) # the imaginary part is an artifact >>> plt.show() """ input = numpy.asarray(input) if input.ndim > 3: raise NotImplementedError("Only 1d, 2d and 3d inputs are supported") output = _get_output_fourier(output, input) if output.size == 0: # The C code has a bug that can result in a segfault with arrays # that have size 0 (gh-17270), so check here. return output axis = normalize_axis_index(axis, input.ndim) sizes = _ni_support._normalize_sequence(size, input.ndim) sizes = numpy.asarray(sizes, dtype=numpy.float64) if not sizes.flags.contiguous: sizes = sizes.copy() _nd_image.fourier_filter(input, sizes, n, axis, output, 2) return output def fourier_shift(input, shift, n=-1, axis=-1, output=None): """ Multidimensional Fourier shift filter. The array is multiplied with the Fourier transform of a shift operation. Parameters ---------- input : array_like The input array. shift : float or sequence The size of the box used for filtering. If a float, `shift` is the same for all axes. If a sequence, `shift` has to contain one value for each axis. n : int, optional If `n` is negative (default), then the input is assumed to be the result of a complex fft. If `n` is larger than or equal to zero, the input is assumed to be the result of a real fft, and `n` gives the length of the array before transformation along the real transform direction. axis : int, optional The axis of the real transform. output : ndarray, optional If given, the result of shifting the input is placed in this array. Returns ------- fourier_shift : ndarray The shifted input. Examples -------- >>> from scipy import ndimage, datasets >>> import matplotlib.pyplot as plt >>> import numpy.fft >>> fig, (ax1, ax2) = plt.subplots(1, 2) >>> plt.gray() # show the filtered result in grayscale >>> ascent = datasets.ascent() >>> input_ = numpy.fft.fft2(ascent) >>> result = ndimage.fourier_shift(input_, shift=200) >>> result = numpy.fft.ifft2(result) >>> ax1.imshow(ascent) >>> ax2.imshow(result.real) # the imaginary part is an artifact >>> plt.show() """ input = numpy.asarray(input) output = _get_output_fourier_complex(output, input) axis = normalize_axis_index(axis, input.ndim) shifts = _ni_support._normalize_sequence(shift, input.ndim) shifts = numpy.asarray(shifts, dtype=numpy.float64) if not shifts.flags.contiguous: shifts = shifts.copy() _nd_image.fourier_shift(input, shifts, n, axis, output) return output
11,385
35.967532
78
py
scipy
scipy-main/scipy/ndimage/setup.py
import os from numpy.distutils.core import setup from numpy.distutils.misc_util import Configuration from numpy import get_include from scipy._build_utils import numpy_nodepr_api def configuration(parent_package='', top_path=None): config = Configuration('ndimage', parent_package, top_path) include_dirs = ['src', get_include(), os.path.join(os.path.dirname(__file__), '..', '_lib', 'src')] config.add_extension("_nd_image", sources=["src/nd_image.c", "src/ni_filters.c", "src/ni_fourier.c", "src/ni_interpolation.c", "src/ni_measure.c", "src/ni_morphology.c", "src/ni_splines.c", "src/ni_support.c"], include_dirs=include_dirs, **numpy_nodepr_api) # Cython wants the .c and .pyx to have the underscore. config.add_extension("_ni_label", sources=["src/_ni_label.c",], include_dirs=['src']+[get_include()]) config.add_extension("_ctest", sources=["src/_ctest.c"], include_dirs=[get_include()], **numpy_nodepr_api) config.add_extension("_cytest", sources=["src/_cytest.c"]) config.add_data_dir('tests') return config if __name__ == '__main__': setup(**configuration(top_path='').todict())
1,474
29.102041
81
py
scipy
scipy-main/scipy/ndimage/_measurements.py
# Copyright (C) 2003-2005 Peter J. Verveer # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # # 3. The name of the author may not be used to endorse or promote # products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS # OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE # GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import numpy import numpy as np from . import _ni_support from . import _ni_label from . import _nd_image from . import _morphology __all__ = ['label', 'find_objects', 'labeled_comprehension', 'sum', 'mean', 'variance', 'standard_deviation', 'minimum', 'maximum', 'median', 'minimum_position', 'maximum_position', 'extrema', 'center_of_mass', 'histogram', 'watershed_ift', 'sum_labels', 'value_indices'] def label(input, structure=None, output=None): """ Label features in an array. Parameters ---------- input : array_like An array-like object to be labeled. Any non-zero values in `input` are counted as features and zero values are considered the background. structure : array_like, optional A structuring element that defines feature connections. `structure` must be centrosymmetric (see Notes). If no structuring element is provided, one is automatically generated with a squared connectivity equal to one. That is, for a 2-D `input` array, the default structuring element is:: [[0,1,0], [1,1,1], [0,1,0]] output : (None, data-type, array_like), optional If `output` is a data type, it specifies the type of the resulting labeled feature array. If `output` is an array-like object, then `output` will be updated with the labeled features from this function. This function can operate in-place, by passing output=input. Note that the output must be able to store the largest label, or this function will raise an Exception. Returns ------- label : ndarray or int An integer ndarray where each unique feature in `input` has a unique label in the returned array. num_features : int How many objects were found. If `output` is None, this function returns a tuple of (`labeled_array`, `num_features`). If `output` is a ndarray, then it will be updated with values in `labeled_array` and only `num_features` will be returned by this function. See Also -------- find_objects : generate a list of slices for the labeled features (or objects); useful for finding features' position or dimensions Notes ----- A centrosymmetric matrix is a matrix that is symmetric about the center. See [1]_ for more information. The `structure` matrix must be centrosymmetric to ensure two-way connections. For instance, if the `structure` matrix is not centrosymmetric and is defined as:: [[0,1,0], [1,1,0], [0,0,0]] and the `input` is:: [[1,2], [0,3]] then the structure matrix would indicate the entry 2 in the input is connected to 1, but 1 is not connected to 2. References ---------- .. [1] James R. Weaver, "Centrosymmetric (cross-symmetric) matrices, their basic properties, eigenvalues, and eigenvectors." The American Mathematical Monthly 92.10 (1985): 711-717. Examples -------- Create an image with some features, then label it using the default (cross-shaped) structuring element: >>> from scipy.ndimage import label, generate_binary_structure >>> import numpy as np >>> a = np.array([[0,0,1,1,0,0], ... [0,0,0,1,0,0], ... [1,1,0,0,1,0], ... [0,0,0,1,0,0]]) >>> labeled_array, num_features = label(a) Each of the 4 features are labeled with a different integer: >>> num_features 4 >>> labeled_array array([[0, 0, 1, 1, 0, 0], [0, 0, 0, 1, 0, 0], [2, 2, 0, 0, 3, 0], [0, 0, 0, 4, 0, 0]]) Generate a structuring element that will consider features connected even if they touch diagonally: >>> s = generate_binary_structure(2,2) or, >>> s = [[1,1,1], ... [1,1,1], ... [1,1,1]] Label the image using the new structuring element: >>> labeled_array, num_features = label(a, structure=s) Show the 2 labeled features (note that features 1, 3, and 4 from above are now considered a single feature): >>> num_features 2 >>> labeled_array array([[0, 0, 1, 1, 0, 0], [0, 0, 0, 1, 0, 0], [2, 2, 0, 0, 1, 0], [0, 0, 0, 1, 0, 0]]) """ input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') if structure is None: structure = _morphology.generate_binary_structure(input.ndim, 1) structure = numpy.asarray(structure, dtype=bool) if structure.ndim != input.ndim: raise RuntimeError('structure and input must have equal rank') for ii in structure.shape: if ii != 3: raise ValueError('structure dimensions must be equal to 3') # Use 32 bits if it's large enough for this image. # _ni_label.label() needs two entries for background and # foreground tracking need_64bits = input.size >= (2**31 - 2) if isinstance(output, numpy.ndarray): if output.shape != input.shape: raise ValueError("output shape not correct") caller_provided_output = True else: caller_provided_output = False if output is None: output = np.empty(input.shape, np.intp if need_64bits else np.int32) else: output = np.empty(input.shape, output) # handle scalars, 0-D arrays if input.ndim == 0 or input.size == 0: if input.ndim == 0: # scalar maxlabel = 1 if (input != 0) else 0 output[...] = maxlabel else: # 0-D maxlabel = 0 if caller_provided_output: return maxlabel else: return output, maxlabel try: max_label = _ni_label._label(input, structure, output) except _ni_label.NeedMoreBits as e: # Make another attempt with enough bits, then try to cast to the # new type. tmp_output = np.empty(input.shape, np.intp if need_64bits else np.int32) max_label = _ni_label._label(input, structure, tmp_output) output[...] = tmp_output[...] if not np.all(output == tmp_output): # refuse to return bad results raise RuntimeError( "insufficient bit-depth in requested output type" ) from e if caller_provided_output: # result was written in-place return max_label else: return output, max_label def find_objects(input, max_label=0): """ Find objects in a labeled array. Parameters ---------- input : ndarray of ints Array containing objects defined by different labels. Labels with value 0 are ignored. max_label : int, optional Maximum label to be searched for in `input`. If max_label is not given, the positions of all objects are returned. Returns ------- object_slices : list of tuples A list of tuples, with each tuple containing N slices (with N the dimension of the input array). Slices correspond to the minimal parallelepiped that contains the object. If a number is missing, None is returned instead of a slice. The label ``l`` corresponds to the index ``l-1`` in the returned list. See Also -------- label, center_of_mass Notes ----- This function is very useful for isolating a volume of interest inside a 3-D array, that cannot be "seen through". Examples -------- >>> from scipy import ndimage >>> import numpy as np >>> a = np.zeros((6,6), dtype=int) >>> a[2:4, 2:4] = 1 >>> a[4, 4] = 1 >>> a[:2, :3] = 2 >>> a[0, 5] = 3 >>> a array([[2, 2, 2, 0, 0, 3], [2, 2, 2, 0, 0, 0], [0, 0, 1, 1, 0, 0], [0, 0, 1, 1, 0, 0], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0]]) >>> ndimage.find_objects(a) [(slice(2, 5, None), slice(2, 5, None)), (slice(0, 2, None), slice(0, 3, None)), (slice(0, 1, None), slice(5, 6, None))] >>> ndimage.find_objects(a, max_label=2) [(slice(2, 5, None), slice(2, 5, None)), (slice(0, 2, None), slice(0, 3, None))] >>> ndimage.find_objects(a == 1, max_label=2) [(slice(2, 5, None), slice(2, 5, None)), None] >>> loc = ndimage.find_objects(a)[0] >>> a[loc] array([[1, 1, 0], [1, 1, 0], [0, 0, 1]]) """ input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') if max_label < 1: max_label = input.max() return _nd_image.find_objects(input, max_label) def value_indices(arr, *, ignore_value=None): """ Find indices of each distinct value in given array. Parameters ---------- arr : ndarray of ints Array containing integer values. ignore_value : int, optional This value will be ignored in searching the `arr` array. If not given, all values found will be included in output. Default is None. Returns ------- indices : dictionary A Python dictionary of array indices for each distinct value. The dictionary is keyed by the distinct values, the entries are array index tuples covering all occurrences of the value within the array. This dictionary can occupy significant memory, usually several times the size of the input array. See Also -------- label, maximum, median, minimum_position, extrema, sum, mean, variance, standard_deviation, numpy.where, numpy.unique Notes ----- For a small array with few distinct values, one might use `numpy.unique()` to find all possible values, and ``(arr == val)`` to locate each value within that array. However, for large arrays, with many distinct values, this can become extremely inefficient, as locating each value would require a new search through the entire array. Using this function, there is essentially one search, with the indices saved for all distinct values. This is useful when matching a categorical image (e.g. a segmentation or classification) to an associated image of other data, allowing any per-class statistic(s) to then be calculated. Provides a more flexible alternative to functions like ``scipy.ndimage.mean()`` and ``scipy.ndimage.variance()``. Some other closely related functionality, with different strengths and weaknesses, can also be found in ``scipy.stats.binned_statistic()`` and the `scikit-image <https://scikit-image.org/>`_ function ``skimage.measure.regionprops()``. Note for IDL users: this provides functionality equivalent to IDL's REVERSE_INDICES option (as per the IDL documentation for the `HISTOGRAM <https://www.l3harrisgeospatial.com/docs/histogram.html>`_ function). .. versionadded:: 1.10.0 Examples -------- >>> import numpy as np >>> from scipy import ndimage >>> a = np.zeros((6, 6), dtype=int) >>> a[2:4, 2:4] = 1 >>> a[4, 4] = 1 >>> a[:2, :3] = 2 >>> a[0, 5] = 3 >>> a array([[2, 2, 2, 0, 0, 3], [2, 2, 2, 0, 0, 0], [0, 0, 1, 1, 0, 0], [0, 0, 1, 1, 0, 0], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0]]) >>> val_indices = ndimage.value_indices(a) The dictionary `val_indices` will have an entry for each distinct value in the input array. >>> val_indices.keys() dict_keys([0, 1, 2, 3]) The entry for each value is an index tuple, locating the elements with that value. >>> ndx1 = val_indices[1] >>> ndx1 (array([2, 2, 3, 3, 4]), array([2, 3, 2, 3, 4])) This can be used to index into the original array, or any other array with the same shape. >>> a[ndx1] array([1, 1, 1, 1, 1]) If the zeros were to be ignored, then the resulting dictionary would no longer have an entry for zero. >>> val_indices = ndimage.value_indices(a, ignore_value=0) >>> val_indices.keys() dict_keys([1, 2, 3]) """ # Cope with ignore_value being None, without too much extra complexity # in the C code. If not None, the value is passed in as a numpy array # with the same dtype as arr. ignore_value_arr = numpy.zeros((1,), dtype=arr.dtype) ignoreIsNone = (ignore_value is None) if not ignoreIsNone: ignore_value_arr[0] = ignore_value_arr.dtype.type(ignore_value) val_indices = _nd_image.value_indices(arr, ignoreIsNone, ignore_value_arr) return val_indices def labeled_comprehension(input, labels, index, func, out_dtype, default, pass_positions=False): """ Roughly equivalent to [func(input[labels == i]) for i in index]. Sequentially applies an arbitrary function (that works on array_like input) to subsets of an N-D image array specified by `labels` and `index`. The option exists to provide the function with positional parameters as the second argument. Parameters ---------- input : array_like Data from which to select `labels` to process. labels : array_like or None Labels to objects in `input`. If not None, array must be same shape as `input`. If None, `func` is applied to raveled `input`. index : int, sequence of ints or None Subset of `labels` to which to apply `func`. If a scalar, a single value is returned. If None, `func` is applied to all non-zero values of `labels`. func : callable Python function to apply to `labels` from `input`. out_dtype : dtype Dtype to use for `result`. default : int, float or None Default return value when a element of `index` does not exist in `labels`. pass_positions : bool, optional If True, pass linear indices to `func` as a second argument. Default is False. Returns ------- result : ndarray Result of applying `func` to each of `labels` to `input` in `index`. Examples -------- >>> import numpy as np >>> a = np.array([[1, 2, 0, 0], ... [5, 3, 0, 4], ... [0, 0, 0, 7], ... [9, 3, 0, 0]]) >>> from scipy import ndimage >>> lbl, nlbl = ndimage.label(a) >>> lbls = np.arange(1, nlbl+1) >>> ndimage.labeled_comprehension(a, lbl, lbls, np.mean, float, 0) array([ 2.75, 5.5 , 6. ]) Falling back to `default`: >>> lbls = np.arange(1, nlbl+2) >>> ndimage.labeled_comprehension(a, lbl, lbls, np.mean, float, -1) array([ 2.75, 5.5 , 6. , -1. ]) Passing positions: >>> def fn(val, pos): ... print("fn says: %s : %s" % (val, pos)) ... return (val.sum()) if (pos.sum() % 2 == 0) else (-val.sum()) ... >>> ndimage.labeled_comprehension(a, lbl, lbls, fn, float, 0, True) fn says: [1 2 5 3] : [0 1 4 5] fn says: [4 7] : [ 7 11] fn says: [9 3] : [12 13] array([ 11., 11., -12., 0.]) """ as_scalar = numpy.isscalar(index) input = numpy.asarray(input) if pass_positions: positions = numpy.arange(input.size).reshape(input.shape) if labels is None: if index is not None: raise ValueError("index without defined labels") if not pass_positions: return func(input.ravel()) else: return func(input.ravel(), positions.ravel()) try: input, labels = numpy.broadcast_arrays(input, labels) except ValueError as e: raise ValueError("input and labels must have the same shape " "(excepting dimensions with width 1)") from e if index is None: if not pass_positions: return func(input[labels > 0]) else: return func(input[labels > 0], positions[labels > 0]) index = numpy.atleast_1d(index) if np.any(index.astype(labels.dtype).astype(index.dtype) != index): raise ValueError("Cannot convert index values from <%s> to <%s> " "(labels' type) without loss of precision" % (index.dtype, labels.dtype)) index = index.astype(labels.dtype) # optimization: find min/max in index, and select those parts of labels, input, and positions lo = index.min() hi = index.max() mask = (labels >= lo) & (labels <= hi) # this also ravels the arrays labels = labels[mask] input = input[mask] if pass_positions: positions = positions[mask] # sort everything by labels label_order = labels.argsort() labels = labels[label_order] input = input[label_order] if pass_positions: positions = positions[label_order] index_order = index.argsort() sorted_index = index[index_order] def do_map(inputs, output): """labels must be sorted""" nidx = sorted_index.size # Find boundaries for each stretch of constant labels # This could be faster, but we already paid N log N to sort labels. lo = numpy.searchsorted(labels, sorted_index, side='left') hi = numpy.searchsorted(labels, sorted_index, side='right') for i, l, h in zip(range(nidx), lo, hi): if l == h: continue output[i] = func(*[inp[l:h] for inp in inputs]) temp = numpy.empty(index.shape, out_dtype) temp[:] = default if not pass_positions: do_map([input], temp) else: do_map([input, positions], temp) output = numpy.zeros(index.shape, out_dtype) output[index_order] = temp if as_scalar: output = output[0] return output def _safely_castable_to_int(dt): """Test whether the NumPy data type `dt` can be safely cast to an int.""" int_size = np.dtype(int).itemsize safe = ((np.issubdtype(dt, np.signedinteger) and dt.itemsize <= int_size) or (np.issubdtype(dt, np.unsignedinteger) and dt.itemsize < int_size)) return safe def _stats(input, labels=None, index=None, centered=False): """Count, sum, and optionally compute (sum - centre)^2 of input by label Parameters ---------- input : array_like, N-D The input data to be analyzed. labels : array_like (N-D), optional The labels of the data in `input`. This array must be broadcast compatible with `input`; typically, it is the same shape as `input`. If `labels` is None, all nonzero values in `input` are treated as the single labeled group. index : label or sequence of labels, optional These are the labels of the groups for which the stats are computed. If `index` is None, the stats are computed for the single group where `labels` is greater than 0. centered : bool, optional If True, the centered sum of squares for each labeled group is also returned. Default is False. Returns ------- counts : int or ndarray of ints The number of elements in each labeled group. sums : scalar or ndarray of scalars The sums of the values in each labeled group. sums_c : scalar or ndarray of scalars, optional The sums of mean-centered squares of the values in each labeled group. This is only returned if `centered` is True. """ def single_group(vals): if centered: vals_c = vals - vals.mean() return vals.size, vals.sum(), (vals_c * vals_c.conjugate()).sum() else: return vals.size, vals.sum() if labels is None: return single_group(input) # ensure input and labels match sizes input, labels = numpy.broadcast_arrays(input, labels) if index is None: return single_group(input[labels > 0]) if numpy.isscalar(index): return single_group(input[labels == index]) def _sum_centered(labels): # `labels` is expected to be an ndarray with the same shape as `input`. # It must contain the label indices (which are not necessarily the labels # themselves). means = sums / counts centered_input = input - means[labels] # bincount expects 1-D inputs, so we ravel the arguments. bc = numpy.bincount(labels.ravel(), weights=(centered_input * centered_input.conjugate()).ravel()) return bc # Remap labels to unique integers if necessary, or if the largest # label is larger than the number of values. if (not _safely_castable_to_int(labels.dtype) or labels.min() < 0 or labels.max() > labels.size): # Use numpy.unique to generate the label indices. `new_labels` will # be 1-D, but it should be interpreted as the flattened N-D array of # label indices. unique_labels, new_labels = numpy.unique(labels, return_inverse=True) counts = numpy.bincount(new_labels) sums = numpy.bincount(new_labels, weights=input.ravel()) if centered: # Compute the sum of the mean-centered squares. # We must reshape new_labels to the N-D shape of `input` before # passing it _sum_centered. sums_c = _sum_centered(new_labels.reshape(labels.shape)) idxs = numpy.searchsorted(unique_labels, index) # make all of idxs valid idxs[idxs >= unique_labels.size] = 0 found = (unique_labels[idxs] == index) else: # labels are an integer type allowed by bincount, and there aren't too # many, so call bincount directly. counts = numpy.bincount(labels.ravel()) sums = numpy.bincount(labels.ravel(), weights=input.ravel()) if centered: sums_c = _sum_centered(labels) # make sure all index values are valid idxs = numpy.asanyarray(index, numpy.int_).copy() found = (idxs >= 0) & (idxs < counts.size) idxs[~found] = 0 counts = counts[idxs] counts[~found] = 0 sums = sums[idxs] sums[~found] = 0 if not centered: return (counts, sums) else: sums_c = sums_c[idxs] sums_c[~found] = 0 return (counts, sums, sums_c) def sum(input, labels=None, index=None): """ Calculate the sum of the values of the array. Notes ----- This is an alias for `ndimage.sum_labels` kept for backwards compatibility reasons, for new code please prefer `sum_labels`. See the `sum_labels` docstring for more details. """ return sum_labels(input, labels, index) def sum_labels(input, labels=None, index=None): """ Calculate the sum of the values of the array. Parameters ---------- input : array_like Values of `input` inside the regions defined by `labels` are summed together. labels : array_like of ints, optional Assign labels to the values of the array. Has to have the same shape as `input`. index : array_like, optional A single label number or a sequence of label numbers of the objects to be measured. Returns ------- sum : ndarray or scalar An array of the sums of values of `input` inside the regions defined by `labels` with the same shape as `index`. If 'index' is None or scalar, a scalar is returned. See Also -------- mean, median Examples -------- >>> from scipy import ndimage >>> input = [0,1,2,3] >>> labels = [1,1,2,2] >>> ndimage.sum_labels(input, labels, index=[1,2]) [1.0, 5.0] >>> ndimage.sum_labels(input, labels, index=1) 1 >>> ndimage.sum_labels(input, labels) 6 """ count, sum = _stats(input, labels, index) return sum def mean(input, labels=None, index=None): """ Calculate the mean of the values of an array at labels. Parameters ---------- input : array_like Array on which to compute the mean of elements over distinct regions. labels : array_like, optional Array of labels of same shape, or broadcastable to the same shape as `input`. All elements sharing the same label form one region over which the mean of the elements is computed. index : int or sequence of ints, optional Labels of the objects over which the mean is to be computed. Default is None, in which case the mean for all values where label is greater than 0 is calculated. Returns ------- out : list Sequence of same length as `index`, with the mean of the different regions labeled by the labels in `index`. See Also -------- variance, standard_deviation, minimum, maximum, sum, label Examples -------- >>> from scipy import ndimage >>> import numpy as np >>> a = np.arange(25).reshape((5,5)) >>> labels = np.zeros_like(a) >>> labels[3:5,3:5] = 1 >>> index = np.unique(labels) >>> labels array([[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 1, 1], [0, 0, 0, 1, 1]]) >>> index array([0, 1]) >>> ndimage.mean(a, labels=labels, index=index) [10.285714285714286, 21.0] """ count, sum = _stats(input, labels, index) return sum / numpy.asanyarray(count).astype(numpy.float64) def variance(input, labels=None, index=None): """ Calculate the variance of the values of an N-D image array, optionally at specified sub-regions. Parameters ---------- input : array_like Nd-image data to process. labels : array_like, optional Labels defining sub-regions in `input`. If not None, must be same shape as `input`. index : int or sequence of ints, optional `labels` to include in output. If None (default), all values where `labels` is non-zero are used. Returns ------- variance : float or ndarray Values of variance, for each sub-region if `labels` and `index` are specified. See Also -------- label, standard_deviation, maximum, minimum, extrema Examples -------- >>> import numpy as np >>> a = np.array([[1, 2, 0, 0], ... [5, 3, 0, 4], ... [0, 0, 0, 7], ... [9, 3, 0, 0]]) >>> from scipy import ndimage >>> ndimage.variance(a) 7.609375 Features to process can be specified using `labels` and `index`: >>> lbl, nlbl = ndimage.label(a) >>> ndimage.variance(a, lbl, index=np.arange(1, nlbl+1)) array([ 2.1875, 2.25 , 9. ]) If no index is given, all non-zero `labels` are processed: >>> ndimage.variance(a, lbl) 6.1875 """ count, sum, sum_c_sq = _stats(input, labels, index, centered=True) return sum_c_sq / np.asanyarray(count).astype(float) def standard_deviation(input, labels=None, index=None): """ Calculate the standard deviation of the values of an N-D image array, optionally at specified sub-regions. Parameters ---------- input : array_like N-D image data to process. labels : array_like, optional Labels to identify sub-regions in `input`. If not None, must be same shape as `input`. index : int or sequence of ints, optional `labels` to include in output. If None (default), all values where `labels` is non-zero are used. Returns ------- standard_deviation : float or ndarray Values of standard deviation, for each sub-region if `labels` and `index` are specified. See Also -------- label, variance, maximum, minimum, extrema Examples -------- >>> import numpy as np >>> a = np.array([[1, 2, 0, 0], ... [5, 3, 0, 4], ... [0, 0, 0, 7], ... [9, 3, 0, 0]]) >>> from scipy import ndimage >>> ndimage.standard_deviation(a) 2.7585095613392387 Features to process can be specified using `labels` and `index`: >>> lbl, nlbl = ndimage.label(a) >>> ndimage.standard_deviation(a, lbl, index=np.arange(1, nlbl+1)) array([ 1.479, 1.5 , 3. ]) If no index is given, non-zero `labels` are processed: >>> ndimage.standard_deviation(a, lbl) 2.4874685927665499 """ return numpy.sqrt(variance(input, labels, index)) def _select(input, labels=None, index=None, find_min=False, find_max=False, find_min_positions=False, find_max_positions=False, find_median=False): """Returns min, max, or both, plus their positions (if requested), and median.""" input = numpy.asanyarray(input) find_positions = find_min_positions or find_max_positions positions = None if find_positions: positions = numpy.arange(input.size).reshape(input.shape) def single_group(vals, positions): result = [] if find_min: result += [vals.min()] if find_min_positions: result += [positions[vals == vals.min()][0]] if find_max: result += [vals.max()] if find_max_positions: result += [positions[vals == vals.max()][0]] if find_median: result += [numpy.median(vals)] return result if labels is None: return single_group(input, positions) # ensure input and labels match sizes input, labels = numpy.broadcast_arrays(input, labels) if index is None: mask = (labels > 0) masked_positions = None if find_positions: masked_positions = positions[mask] return single_group(input[mask], masked_positions) if numpy.isscalar(index): mask = (labels == index) masked_positions = None if find_positions: masked_positions = positions[mask] return single_group(input[mask], masked_positions) # remap labels to unique integers if necessary, or if the largest # label is larger than the number of values. if (not _safely_castable_to_int(labels.dtype) or labels.min() < 0 or labels.max() > labels.size): # remap labels, and indexes unique_labels, labels = numpy.unique(labels, return_inverse=True) idxs = numpy.searchsorted(unique_labels, index) # make all of idxs valid idxs[idxs >= unique_labels.size] = 0 found = (unique_labels[idxs] == index) else: # labels are an integer type, and there aren't too many idxs = numpy.asanyarray(index, numpy.int_).copy() found = (idxs >= 0) & (idxs <= labels.max()) idxs[~ found] = labels.max() + 1 if find_median: order = numpy.lexsort((input.ravel(), labels.ravel())) else: order = input.ravel().argsort() input = input.ravel()[order] labels = labels.ravel()[order] if find_positions: positions = positions.ravel()[order] result = [] if find_min: mins = numpy.zeros(labels.max() + 2, input.dtype) mins[labels[::-1]] = input[::-1] result += [mins[idxs]] if find_min_positions: minpos = numpy.zeros(labels.max() + 2, int) minpos[labels[::-1]] = positions[::-1] result += [minpos[idxs]] if find_max: maxs = numpy.zeros(labels.max() + 2, input.dtype) maxs[labels] = input result += [maxs[idxs]] if find_max_positions: maxpos = numpy.zeros(labels.max() + 2, int) maxpos[labels] = positions result += [maxpos[idxs]] if find_median: locs = numpy.arange(len(labels)) lo = numpy.zeros(labels.max() + 2, numpy.int_) lo[labels[::-1]] = locs[::-1] hi = numpy.zeros(labels.max() + 2, numpy.int_) hi[labels] = locs lo = lo[idxs] hi = hi[idxs] # lo is an index to the lowest value in input for each label, # hi is an index to the largest value. # move them to be either the same ((hi - lo) % 2 == 0) or next # to each other ((hi - lo) % 2 == 1), then average. step = (hi - lo) // 2 lo += step hi -= step if (np.issubdtype(input.dtype, np.integer) or np.issubdtype(input.dtype, np.bool_)): # avoid integer overflow or boolean addition (gh-12836) result += [(input[lo].astype('d') + input[hi].astype('d')) / 2.0] else: result += [(input[lo] + input[hi]) / 2.0] return result def minimum(input, labels=None, index=None): """ Calculate the minimum of the values of an array over labeled regions. Parameters ---------- input : array_like Array_like of values. For each region specified by `labels`, the minimal values of `input` over the region is computed. labels : array_like, optional An array_like of integers marking different regions over which the minimum value of `input` is to be computed. `labels` must have the same shape as `input`. If `labels` is not specified, the minimum over the whole array is returned. index : array_like, optional A list of region labels that are taken into account for computing the minima. If index is None, the minimum over all elements where `labels` is non-zero is returned. Returns ------- minimum : float or list of floats List of minima of `input` over the regions determined by `labels` and whose index is in `index`. If `index` or `labels` are not specified, a float is returned: the minimal value of `input` if `labels` is None, and the minimal value of elements where `labels` is greater than zero if `index` is None. See Also -------- label, maximum, median, minimum_position, extrema, sum, mean, variance, standard_deviation Notes ----- The function returns a Python list and not a NumPy array, use `np.array` to convert the list to an array. Examples -------- >>> from scipy import ndimage >>> import numpy as np >>> a = np.array([[1, 2, 0, 0], ... [5, 3, 0, 4], ... [0, 0, 0, 7], ... [9, 3, 0, 0]]) >>> labels, labels_nb = ndimage.label(a) >>> labels array([[1, 1, 0, 0], [1, 1, 0, 2], [0, 0, 0, 2], [3, 3, 0, 0]]) >>> ndimage.minimum(a, labels=labels, index=np.arange(1, labels_nb + 1)) [1.0, 4.0, 3.0] >>> ndimage.minimum(a) 0.0 >>> ndimage.minimum(a, labels=labels) 1.0 """ return _select(input, labels, index, find_min=True)[0] def maximum(input, labels=None, index=None): """ Calculate the maximum of the values of an array over labeled regions. Parameters ---------- input : array_like Array_like of values. For each region specified by `labels`, the maximal values of `input` over the region is computed. labels : array_like, optional An array of integers marking different regions over which the maximum value of `input` is to be computed. `labels` must have the same shape as `input`. If `labels` is not specified, the maximum over the whole array is returned. index : array_like, optional A list of region labels that are taken into account for computing the maxima. If index is None, the maximum over all elements where `labels` is non-zero is returned. Returns ------- output : float or list of floats List of maxima of `input` over the regions determined by `labels` and whose index is in `index`. If `index` or `labels` are not specified, a float is returned: the maximal value of `input` if `labels` is None, and the maximal value of elements where `labels` is greater than zero if `index` is None. See Also -------- label, minimum, median, maximum_position, extrema, sum, mean, variance, standard_deviation Notes ----- The function returns a Python list and not a NumPy array, use `np.array` to convert the list to an array. Examples -------- >>> import numpy as np >>> a = np.arange(16).reshape((4,4)) >>> a array([[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11], [12, 13, 14, 15]]) >>> labels = np.zeros_like(a) >>> labels[:2,:2] = 1 >>> labels[2:, 1:3] = 2 >>> labels array([[1, 1, 0, 0], [1, 1, 0, 0], [0, 2, 2, 0], [0, 2, 2, 0]]) >>> from scipy import ndimage >>> ndimage.maximum(a) 15.0 >>> ndimage.maximum(a, labels=labels, index=[1,2]) [5.0, 14.0] >>> ndimage.maximum(a, labels=labels) 14.0 >>> b = np.array([[1, 2, 0, 0], ... [5, 3, 0, 4], ... [0, 0, 0, 7], ... [9, 3, 0, 0]]) >>> labels, labels_nb = ndimage.label(b) >>> labels array([[1, 1, 0, 0], [1, 1, 0, 2], [0, 0, 0, 2], [3, 3, 0, 0]]) >>> ndimage.maximum(b, labels=labels, index=np.arange(1, labels_nb + 1)) [5.0, 7.0, 9.0] """ return _select(input, labels, index, find_max=True)[0] def median(input, labels=None, index=None): """ Calculate the median of the values of an array over labeled regions. Parameters ---------- input : array_like Array_like of values. For each region specified by `labels`, the median value of `input` over the region is computed. labels : array_like, optional An array_like of integers marking different regions over which the median value of `input` is to be computed. `labels` must have the same shape as `input`. If `labels` is not specified, the median over the whole array is returned. index : array_like, optional A list of region labels that are taken into account for computing the medians. If index is None, the median over all elements where `labels` is non-zero is returned. Returns ------- median : float or list of floats List of medians of `input` over the regions determined by `labels` and whose index is in `index`. If `index` or `labels` are not specified, a float is returned: the median value of `input` if `labels` is None, and the median value of elements where `labels` is greater than zero if `index` is None. See Also -------- label, minimum, maximum, extrema, sum, mean, variance, standard_deviation Notes ----- The function returns a Python list and not a NumPy array, use `np.array` to convert the list to an array. Examples -------- >>> from scipy import ndimage >>> import numpy as np >>> a = np.array([[1, 2, 0, 1], ... [5, 3, 0, 4], ... [0, 0, 0, 7], ... [9, 3, 0, 0]]) >>> labels, labels_nb = ndimage.label(a) >>> labels array([[1, 1, 0, 2], [1, 1, 0, 2], [0, 0, 0, 2], [3, 3, 0, 0]]) >>> ndimage.median(a, labels=labels, index=np.arange(1, labels_nb + 1)) [2.5, 4.0, 6.0] >>> ndimage.median(a) 1.0 >>> ndimage.median(a, labels=labels) 3.0 """ return _select(input, labels, index, find_median=True)[0] def minimum_position(input, labels=None, index=None): """ Find the positions of the minimums of the values of an array at labels. Parameters ---------- input : array_like Array_like of values. labels : array_like, optional An array of integers marking different regions over which the position of the minimum value of `input` is to be computed. `labels` must have the same shape as `input`. If `labels` is not specified, the location of the first minimum over the whole array is returned. The `labels` argument only works when `index` is specified. index : array_like, optional A list of region labels that are taken into account for finding the location of the minima. If `index` is None, the ``first`` minimum over all elements where `labels` is non-zero is returned. The `index` argument only works when `labels` is specified. Returns ------- output : list of tuples of ints Tuple of ints or list of tuples of ints that specify the location of minima of `input` over the regions determined by `labels` and whose index is in `index`. If `index` or `labels` are not specified, a tuple of ints is returned specifying the location of the first minimal value of `input`. See Also -------- label, minimum, median, maximum_position, extrema, sum, mean, variance, standard_deviation Examples -------- >>> import numpy as np >>> a = np.array([[10, 20, 30], ... [40, 80, 100], ... [1, 100, 200]]) >>> b = np.array([[1, 2, 0, 1], ... [5, 3, 0, 4], ... [0, 0, 0, 7], ... [9, 3, 0, 0]]) >>> from scipy import ndimage >>> ndimage.minimum_position(a) (2, 0) >>> ndimage.minimum_position(b) (0, 2) Features to process can be specified using `labels` and `index`: >>> label, pos = ndimage.label(a) >>> ndimage.minimum_position(a, label, index=np.arange(1, pos+1)) [(2, 0)] >>> label, pos = ndimage.label(b) >>> ndimage.minimum_position(b, label, index=np.arange(1, pos+1)) [(0, 0), (0, 3), (3, 1)] """ dims = numpy.array(numpy.asarray(input).shape) # see numpy.unravel_index to understand this line. dim_prod = numpy.cumprod([1] + list(dims[:0:-1]))[::-1] result = _select(input, labels, index, find_min_positions=True)[0] if numpy.isscalar(result): return tuple((result // dim_prod) % dims) return [tuple(v) for v in (result.reshape(-1, 1) // dim_prod) % dims] def maximum_position(input, labels=None, index=None): """ Find the positions of the maximums of the values of an array at labels. For each region specified by `labels`, the position of the maximum value of `input` within the region is returned. Parameters ---------- input : array_like Array_like of values. labels : array_like, optional An array of integers marking different regions over which the position of the maximum value of `input` is to be computed. `labels` must have the same shape as `input`. If `labels` is not specified, the location of the first maximum over the whole array is returned. The `labels` argument only works when `index` is specified. index : array_like, optional A list of region labels that are taken into account for finding the location of the maxima. If `index` is None, the first maximum over all elements where `labels` is non-zero is returned. The `index` argument only works when `labels` is specified. Returns ------- output : list of tuples of ints List of tuples of ints that specify the location of maxima of `input` over the regions determined by `labels` and whose index is in `index`. If `index` or `labels` are not specified, a tuple of ints is returned specifying the location of the ``first`` maximal value of `input`. See Also -------- label, minimum, median, maximum_position, extrema, sum, mean, variance, standard_deviation Examples -------- >>> from scipy import ndimage >>> import numpy as np >>> a = np.array([[1, 2, 0, 0], ... [5, 3, 0, 4], ... [0, 0, 0, 7], ... [9, 3, 0, 0]]) >>> ndimage.maximum_position(a) (3, 0) Features to process can be specified using `labels` and `index`: >>> lbl = np.array([[0, 1, 2, 3], ... [0, 1, 2, 3], ... [0, 1, 2, 3], ... [0, 1, 2, 3]]) >>> ndimage.maximum_position(a, lbl, 1) (1, 1) If no index is given, non-zero `labels` are processed: >>> ndimage.maximum_position(a, lbl) (2, 3) If there are no maxima, the position of the first element is returned: >>> ndimage.maximum_position(a, lbl, 2) (0, 2) """ dims = numpy.array(numpy.asarray(input).shape) # see numpy.unravel_index to understand this line. dim_prod = numpy.cumprod([1] + list(dims[:0:-1]))[::-1] result = _select(input, labels, index, find_max_positions=True)[0] if numpy.isscalar(result): return tuple((result // dim_prod) % dims) return [tuple(v) for v in (result.reshape(-1, 1) // dim_prod) % dims] def extrema(input, labels=None, index=None): """ Calculate the minimums and maximums of the values of an array at labels, along with their positions. Parameters ---------- input : ndarray N-D image data to process. labels : ndarray, optional Labels of features in input. If not None, must be same shape as `input`. index : int or sequence of ints, optional Labels to include in output. If None (default), all values where non-zero `labels` are used. Returns ------- minimums, maximums : int or ndarray Values of minimums and maximums in each feature. min_positions, max_positions : tuple or list of tuples Each tuple gives the N-D coordinates of the corresponding minimum or maximum. See Also -------- maximum, minimum, maximum_position, minimum_position, center_of_mass Examples -------- >>> import numpy as np >>> a = np.array([[1, 2, 0, 0], ... [5, 3, 0, 4], ... [0, 0, 0, 7], ... [9, 3, 0, 0]]) >>> from scipy import ndimage >>> ndimage.extrema(a) (0, 9, (0, 2), (3, 0)) Features to process can be specified using `labels` and `index`: >>> lbl, nlbl = ndimage.label(a) >>> ndimage.extrema(a, lbl, index=np.arange(1, nlbl+1)) (array([1, 4, 3]), array([5, 7, 9]), [(0, 0), (1, 3), (3, 1)], [(1, 0), (2, 3), (3, 0)]) If no index is given, non-zero `labels` are processed: >>> ndimage.extrema(a, lbl) (1, 9, (0, 0), (3, 0)) """ dims = numpy.array(numpy.asarray(input).shape) # see numpy.unravel_index to understand this line. dim_prod = numpy.cumprod([1] + list(dims[:0:-1]))[::-1] minimums, min_positions, maximums, max_positions = _select(input, labels, index, find_min=True, find_max=True, find_min_positions=True, find_max_positions=True) if numpy.isscalar(minimums): return (minimums, maximums, tuple((min_positions // dim_prod) % dims), tuple((max_positions // dim_prod) % dims)) min_positions = [tuple(v) for v in (min_positions.reshape(-1, 1) // dim_prod) % dims] max_positions = [tuple(v) for v in (max_positions.reshape(-1, 1) // dim_prod) % dims] return minimums, maximums, min_positions, max_positions def center_of_mass(input, labels=None, index=None): """ Calculate the center of mass of the values of an array at labels. Parameters ---------- input : ndarray Data from which to calculate center-of-mass. The masses can either be positive or negative. labels : ndarray, optional Labels for objects in `input`, as generated by `ndimage.label`. Only used with `index`. Dimensions must be the same as `input`. index : int or sequence of ints, optional Labels for which to calculate centers-of-mass. If not specified, the combined center of mass of all labels greater than zero will be calculated. Only used with `labels`. Returns ------- center_of_mass : tuple, or list of tuples Coordinates of centers-of-mass. Examples -------- >>> import numpy as np >>> a = np.array(([0,0,0,0], ... [0,1,1,0], ... [0,1,1,0], ... [0,1,1,0])) >>> from scipy import ndimage >>> ndimage.center_of_mass(a) (2.0, 1.5) Calculation of multiple objects in an image >>> b = np.array(([0,1,1,0], ... [0,1,0,0], ... [0,0,0,0], ... [0,0,1,1], ... [0,0,1,1])) >>> lbl = ndimage.label(b)[0] >>> ndimage.center_of_mass(b, lbl, [1,2]) [(0.33333333333333331, 1.3333333333333333), (3.5, 2.5)] Negative masses are also accepted, which can occur for example when bias is removed from measured data due to random noise. >>> c = np.array(([-1,0,0,0], ... [0,-1,-1,0], ... [0,1,-1,0], ... [0,1,1,0])) >>> ndimage.center_of_mass(c) (-4.0, 1.0) If there are division by zero issues, the function does not raise an error but rather issues a RuntimeWarning before returning inf and/or NaN. >>> d = np.array([-1, 1]) >>> ndimage.center_of_mass(d) (inf,) """ normalizer = sum(input, labels, index) grids = numpy.ogrid[[slice(0, i) for i in input.shape]] results = [sum(input * grids[dir].astype(float), labels, index) / normalizer for dir in range(input.ndim)] if numpy.isscalar(results[0]): return tuple(results) return [tuple(v) for v in numpy.array(results).T] def histogram(input, min, max, bins, labels=None, index=None): """ Calculate the histogram of the values of an array, optionally at labels. Histogram calculates the frequency of values in an array within bins determined by `min`, `max`, and `bins`. The `labels` and `index` keywords can limit the scope of the histogram to specified sub-regions within the array. Parameters ---------- input : array_like Data for which to calculate histogram. min, max : int Minimum and maximum values of range of histogram bins. bins : int Number of bins. labels : array_like, optional Labels for objects in `input`. If not None, must be same shape as `input`. index : int or sequence of ints, optional Label or labels for which to calculate histogram. If None, all values where label is greater than zero are used Returns ------- hist : ndarray Histogram counts. Examples -------- >>> import numpy as np >>> a = np.array([[ 0. , 0.2146, 0.5962, 0. ], ... [ 0. , 0.7778, 0. , 0. ], ... [ 0. , 0. , 0. , 0. ], ... [ 0. , 0. , 0.7181, 0.2787], ... [ 0. , 0. , 0.6573, 0.3094]]) >>> from scipy import ndimage >>> ndimage.histogram(a, 0, 1, 10) array([13, 0, 2, 1, 0, 1, 1, 2, 0, 0]) With labels and no indices, non-zero elements are counted: >>> lbl, nlbl = ndimage.label(a) >>> ndimage.histogram(a, 0, 1, 10, lbl) array([0, 0, 2, 1, 0, 1, 1, 2, 0, 0]) Indices can be used to count only certain objects: >>> ndimage.histogram(a, 0, 1, 10, lbl, 2) array([0, 0, 1, 1, 0, 0, 1, 1, 0, 0]) """ _bins = numpy.linspace(min, max, bins + 1) def _hist(vals): return numpy.histogram(vals, _bins)[0] return labeled_comprehension(input, labels, index, _hist, object, None, pass_positions=False) def watershed_ift(input, markers, structure=None, output=None): """ Apply watershed from markers using image foresting transform algorithm. Parameters ---------- input : array_like Input. markers : array_like Markers are points within each watershed that form the beginning of the process. Negative markers are considered background markers which are processed after the other markers. structure : structure element, optional A structuring element defining the connectivity of the object can be provided. If None, an element is generated with a squared connectivity equal to one. output : ndarray, optional An output array can optionally be provided. The same shape as input. Returns ------- watershed_ift : ndarray Output. Same shape as `input`. References ---------- .. [1] A.X. Falcao, J. Stolfi and R. de Alencar Lotufo, "The image foresting transform: theory, algorithms, and applications", Pattern Analysis and Machine Intelligence, vol. 26, pp. 19-29, 2004. """ input = numpy.asarray(input) if input.dtype.type not in [numpy.uint8, numpy.uint16]: raise TypeError('only 8 and 16 unsigned inputs are supported') if structure is None: structure = _morphology.generate_binary_structure(input.ndim, 1) structure = numpy.asarray(structure, dtype=bool) if structure.ndim != input.ndim: raise RuntimeError('structure and input must have equal rank') for ii in structure.shape: if ii != 3: raise RuntimeError('structure dimensions must be equal to 3') if not structure.flags.contiguous: structure = structure.copy() markers = numpy.asarray(markers) if input.shape != markers.shape: raise RuntimeError('input and markers must have equal shape') integral_types = [numpy.int8, numpy.int16, numpy.int32, numpy.int_, numpy.int64, numpy.intc, numpy.intp] if markers.dtype.type not in integral_types: raise RuntimeError('marker should be of integer type') if isinstance(output, numpy.ndarray): if output.dtype.type not in integral_types: raise RuntimeError('output should be of integer type') else: output = markers.dtype output = _ni_support._get_output(output, input) _nd_image.watershed_ift(input, markers, structure, output) return output
56,013
32.441194
124
py
scipy
scipy-main/scipy/ndimage/_interpolation.py
# Copyright (C) 2003-2005 Peter J. Verveer # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # # 3. The name of the author may not be used to endorse or promote # products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS # OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE # GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import itertools import warnings import numpy from scipy._lib._util import normalize_axis_index from scipy import special from . import _ni_support from . import _nd_image from ._ni_docstrings import docfiller __all__ = ['spline_filter1d', 'spline_filter', 'geometric_transform', 'map_coordinates', 'affine_transform', 'shift', 'zoom', 'rotate'] @docfiller def spline_filter1d(input, order=3, axis=-1, output=numpy.float64, mode='mirror'): """ Calculate a 1-D spline filter along the given axis. The lines of the array along the given axis are filtered by a spline filter. The order of the spline must be >= 2 and <= 5. Parameters ---------- %(input)s order : int, optional The order of the spline, default is 3. axis : int, optional The axis along which the spline filter is applied. Default is the last axis. output : ndarray or dtype, optional The array in which to place the output, or the dtype of the returned array. Default is ``numpy.float64``. %(mode_interp_mirror)s Returns ------- spline_filter1d : ndarray The filtered input. See Also -------- spline_filter : Multidimensional spline filter. Notes ----- All of the interpolation functions in `ndimage` do spline interpolation of the input image. If using B-splines of `order > 1`, the input image values have to be converted to B-spline coefficients first, which is done by applying this 1-D filter sequentially along all axes of the input. All functions that require B-spline coefficients will automatically filter their inputs, a behavior controllable with the `prefilter` keyword argument. For functions that accept a `mode` parameter, the result will only be correct if it matches the `mode` used when filtering. For complex-valued `input`, this function processes the real and imaginary components independently. .. versionadded:: 1.6.0 Complex-valued support added. Examples -------- We can filter an image using 1-D spline along the given axis: >>> from scipy.ndimage import spline_filter1d >>> import numpy as np >>> import matplotlib.pyplot as plt >>> orig_img = np.eye(20) # create an image >>> orig_img[10, :] = 1.0 >>> sp_filter_axis_0 = spline_filter1d(orig_img, axis=0) >>> sp_filter_axis_1 = spline_filter1d(orig_img, axis=1) >>> f, ax = plt.subplots(1, 3, sharex=True) >>> for ind, data in enumerate([[orig_img, "original image"], ... [sp_filter_axis_0, "spline filter (axis=0)"], ... [sp_filter_axis_1, "spline filter (axis=1)"]]): ... ax[ind].imshow(data[0], cmap='gray_r') ... ax[ind].set_title(data[1]) >>> plt.tight_layout() >>> plt.show() """ if order < 0 or order > 5: raise RuntimeError('spline order not supported') input = numpy.asarray(input) complex_output = numpy.iscomplexobj(input) output = _ni_support._get_output(output, input, complex_output=complex_output) if complex_output: spline_filter1d(input.real, order, axis, output.real, mode) spline_filter1d(input.imag, order, axis, output.imag, mode) return output if order in [0, 1]: output[...] = numpy.array(input) else: mode = _ni_support._extend_mode_to_code(mode) axis = normalize_axis_index(axis, input.ndim) _nd_image.spline_filter1d(input, order, axis, output, mode) return output @docfiller def spline_filter(input, order=3, output=numpy.float64, mode='mirror'): """ Multidimensional spline filter. Parameters ---------- %(input)s order : int, optional The order of the spline, default is 3. axis : int, optional The axis along which the spline filter is applied. Default is the last axis. output : ndarray or dtype, optional The array in which to place the output, or the dtype of the returned array. Default is ``numpy.float64``. %(mode_interp_mirror)s Returns ------- spline_filter : ndarray Filtered array. Has the same shape as `input`. See Also -------- spline_filter1d : Calculate a 1-D spline filter along the given axis. Notes ----- The multidimensional filter is implemented as a sequence of 1-D spline filters. The intermediate arrays are stored in the same data type as the output. Therefore, for output types with a limited precision, the results may be imprecise because intermediate results may be stored with insufficient precision. For complex-valued `input`, this function processes the real and imaginary components independently. .. versionadded:: 1.6.0 Complex-valued support added. Examples -------- We can filter an image using multidimentional splines: >>> from scipy.ndimage import spline_filter >>> import numpy as np >>> import matplotlib.pyplot as plt >>> orig_img = np.eye(20) # create an image >>> orig_img[10, :] = 1.0 >>> sp_filter = spline_filter(orig_img, order=3) >>> f, ax = plt.subplots(1, 2, sharex=True) >>> for ind, data in enumerate([[orig_img, "original image"], ... [sp_filter, "spline filter"]]): ... ax[ind].imshow(data[0], cmap='gray_r') ... ax[ind].set_title(data[1]) >>> plt.tight_layout() >>> plt.show() """ if order < 2 or order > 5: raise RuntimeError('spline order not supported') input = numpy.asarray(input) complex_output = numpy.iscomplexobj(input) output = _ni_support._get_output(output, input, complex_output=complex_output) if complex_output: spline_filter(input.real, order, output.real, mode) spline_filter(input.imag, order, output.imag, mode) return output if order not in [0, 1] and input.ndim > 0: for axis in range(input.ndim): spline_filter1d(input, order, axis, output=output, mode=mode) input = output else: output[...] = input[...] return output def _prepad_for_spline_filter(input, mode, cval): if mode in ['nearest', 'grid-constant']: npad = 12 if mode == 'grid-constant': padded = numpy.pad(input, npad, mode='constant', constant_values=cval) elif mode == 'nearest': padded = numpy.pad(input, npad, mode='edge') else: # other modes have exact boundary conditions implemented so # no prepadding is needed npad = 0 padded = input return padded, npad @docfiller def geometric_transform(input, mapping, output_shape=None, output=None, order=3, mode='constant', cval=0.0, prefilter=True, extra_arguments=(), extra_keywords={}): """ Apply an arbitrary geometric transform. The given mapping function is used to find, for each point in the output, the corresponding coordinates in the input. The value of the input at those coordinates is determined by spline interpolation of the requested order. Parameters ---------- %(input)s mapping : {callable, scipy.LowLevelCallable} A callable object that accepts a tuple of length equal to the output array rank, and returns the corresponding input coordinates as a tuple of length equal to the input array rank. output_shape : tuple of ints, optional Shape tuple. %(output)s order : int, optional The order of the spline interpolation, default is 3. The order has to be in the range 0-5. %(mode_interp_constant)s %(cval)s %(prefilter)s extra_arguments : tuple, optional Extra arguments passed to `mapping`. extra_keywords : dict, optional Extra keywords passed to `mapping`. Returns ------- output : ndarray The filtered input. See Also -------- map_coordinates, affine_transform, spline_filter1d Notes ----- This function also accepts low-level callback functions with one the following signatures and wrapped in `scipy.LowLevelCallable`: .. code:: c int mapping(npy_intp *output_coordinates, double *input_coordinates, int output_rank, int input_rank, void *user_data) int mapping(intptr_t *output_coordinates, double *input_coordinates, int output_rank, int input_rank, void *user_data) The calling function iterates over the elements of the output array, calling the callback function at each element. The coordinates of the current output element are passed through ``output_coordinates``. The callback function must return the coordinates at which the input must be interpolated in ``input_coordinates``. The rank of the input and output arrays are given by ``input_rank`` and ``output_rank`` respectively. ``user_data`` is the data pointer provided to `scipy.LowLevelCallable` as-is. The callback function must return an integer error status that is zero if something went wrong and one otherwise. If an error occurs, you should normally set the Python error status with an informative message before returning, otherwise a default error message is set by the calling function. In addition, some other low-level function pointer specifications are accepted, but these are for backward compatibility only and should not be used in new code. For complex-valued `input`, this function transforms the real and imaginary components independently. .. versionadded:: 1.6.0 Complex-valued support added. Examples -------- >>> import numpy as np >>> from scipy.ndimage import geometric_transform >>> a = np.arange(12.).reshape((4, 3)) >>> def shift_func(output_coords): ... return (output_coords[0] - 0.5, output_coords[1] - 0.5) ... >>> geometric_transform(a, shift_func) array([[ 0. , 0. , 0. ], [ 0. , 1.362, 2.738], [ 0. , 4.812, 6.187], [ 0. , 8.263, 9.637]]) >>> b = [1, 2, 3, 4, 5] >>> def shift_func(output_coords): ... return (output_coords[0] - 3,) ... >>> geometric_transform(b, shift_func, mode='constant') array([0, 0, 0, 1, 2]) >>> geometric_transform(b, shift_func, mode='nearest') array([1, 1, 1, 1, 2]) >>> geometric_transform(b, shift_func, mode='reflect') array([3, 2, 1, 1, 2]) >>> geometric_transform(b, shift_func, mode='wrap') array([2, 3, 4, 1, 2]) """ if order < 0 or order > 5: raise RuntimeError('spline order not supported') input = numpy.asarray(input) if output_shape is None: output_shape = input.shape if input.ndim < 1 or len(output_shape) < 1: raise RuntimeError('input and output rank must be > 0') complex_output = numpy.iscomplexobj(input) output = _ni_support._get_output(output, input, shape=output_shape, complex_output=complex_output) if complex_output: kwargs = dict(order=order, mode=mode, prefilter=prefilter, output_shape=output_shape, extra_arguments=extra_arguments, extra_keywords=extra_keywords) geometric_transform(input.real, mapping, output=output.real, cval=numpy.real(cval), **kwargs) geometric_transform(input.imag, mapping, output=output.imag, cval=numpy.imag(cval), **kwargs) return output if prefilter and order > 1: padded, npad = _prepad_for_spline_filter(input, mode, cval) filtered = spline_filter(padded, order, output=numpy.float64, mode=mode) else: npad = 0 filtered = input mode = _ni_support._extend_mode_to_code(mode) _nd_image.geometric_transform(filtered, mapping, None, None, None, output, order, mode, cval, npad, extra_arguments, extra_keywords) return output @docfiller def map_coordinates(input, coordinates, output=None, order=3, mode='constant', cval=0.0, prefilter=True): """ Map the input array to new coordinates by interpolation. The array of coordinates is used to find, for each point in the output, the corresponding coordinates in the input. The value of the input at those coordinates is determined by spline interpolation of the requested order. The shape of the output is derived from that of the coordinate array by dropping the first axis. The values of the array along the first axis are the coordinates in the input array at which the output value is found. Parameters ---------- %(input)s coordinates : array_like The coordinates at which `input` is evaluated. %(output)s order : int, optional The order of the spline interpolation, default is 3. The order has to be in the range 0-5. %(mode_interp_constant)s %(cval)s %(prefilter)s Returns ------- map_coordinates : ndarray The result of transforming the input. The shape of the output is derived from that of `coordinates` by dropping the first axis. See Also -------- spline_filter, geometric_transform, scipy.interpolate Notes ----- For complex-valued `input`, this function maps the real and imaginary components independently. .. versionadded:: 1.6.0 Complex-valued support added. Examples -------- >>> from scipy import ndimage >>> import numpy as np >>> a = np.arange(12.).reshape((4, 3)) >>> a array([[ 0., 1., 2.], [ 3., 4., 5.], [ 6., 7., 8.], [ 9., 10., 11.]]) >>> ndimage.map_coordinates(a, [[0.5, 2], [0.5, 1]], order=1) array([ 2., 7.]) Above, the interpolated value of a[0.5, 0.5] gives output[0], while a[2, 1] is output[1]. >>> inds = np.array([[0.5, 2], [0.5, 4]]) >>> ndimage.map_coordinates(a, inds, order=1, cval=-33.3) array([ 2. , -33.3]) >>> ndimage.map_coordinates(a, inds, order=1, mode='nearest') array([ 2., 8.]) >>> ndimage.map_coordinates(a, inds, order=1, cval=0, output=bool) array([ True, False], dtype=bool) """ if order < 0 or order > 5: raise RuntimeError('spline order not supported') input = numpy.asarray(input) coordinates = numpy.asarray(coordinates) if numpy.iscomplexobj(coordinates): raise TypeError('Complex type not supported') output_shape = coordinates.shape[1:] if input.ndim < 1 or len(output_shape) < 1: raise RuntimeError('input and output rank must be > 0') if coordinates.shape[0] != input.ndim: raise RuntimeError('invalid shape for coordinate array') complex_output = numpy.iscomplexobj(input) output = _ni_support._get_output(output, input, shape=output_shape, complex_output=complex_output) if complex_output: kwargs = dict(order=order, mode=mode, prefilter=prefilter) map_coordinates(input.real, coordinates, output=output.real, cval=numpy.real(cval), **kwargs) map_coordinates(input.imag, coordinates, output=output.imag, cval=numpy.imag(cval), **kwargs) return output if prefilter and order > 1: padded, npad = _prepad_for_spline_filter(input, mode, cval) filtered = spline_filter(padded, order, output=numpy.float64, mode=mode) else: npad = 0 filtered = input mode = _ni_support._extend_mode_to_code(mode) _nd_image.geometric_transform(filtered, None, coordinates, None, None, output, order, mode, cval, npad, None, None) return output @docfiller def affine_transform(input, matrix, offset=0.0, output_shape=None, output=None, order=3, mode='constant', cval=0.0, prefilter=True): """ Apply an affine transformation. Given an output image pixel index vector ``o``, the pixel value is determined from the input image at position ``np.dot(matrix, o) + offset``. This does 'pull' (or 'backward') resampling, transforming the output space to the input to locate data. Affine transformations are often described in the 'push' (or 'forward') direction, transforming input to output. If you have a matrix for the 'push' transformation, use its inverse (:func:`numpy.linalg.inv`) in this function. Parameters ---------- %(input)s matrix : ndarray The inverse coordinate transformation matrix, mapping output coordinates to input coordinates. If ``ndim`` is the number of dimensions of ``input``, the given matrix must have one of the following shapes: - ``(ndim, ndim)``: the linear transformation matrix for each output coordinate. - ``(ndim,)``: assume that the 2-D transformation matrix is diagonal, with the diagonal specified by the given value. A more efficient algorithm is then used that exploits the separability of the problem. - ``(ndim + 1, ndim + 1)``: assume that the transformation is specified using homogeneous coordinates [1]_. In this case, any value passed to ``offset`` is ignored. - ``(ndim, ndim + 1)``: as above, but the bottom row of a homogeneous transformation matrix is always ``[0, 0, ..., 1]``, and may be omitted. offset : float or sequence, optional The offset into the array where the transform is applied. If a float, `offset` is the same for each axis. If a sequence, `offset` should contain one value for each axis. output_shape : tuple of ints, optional Shape tuple. %(output)s order : int, optional The order of the spline interpolation, default is 3. The order has to be in the range 0-5. %(mode_interp_constant)s %(cval)s %(prefilter)s Returns ------- affine_transform : ndarray The transformed input. Notes ----- The given matrix and offset are used to find for each point in the output the corresponding coordinates in the input by an affine transformation. The value of the input at those coordinates is determined by spline interpolation of the requested order. Points outside the boundaries of the input are filled according to the given mode. .. versionchanged:: 0.18.0 Previously, the exact interpretation of the affine transformation depended on whether the matrix was supplied as a 1-D or a 2-D array. If a 1-D array was supplied to the matrix parameter, the output pixel value at index ``o`` was determined from the input image at position ``matrix * (o + offset)``. For complex-valued `input`, this function transforms the real and imaginary components independently. .. versionadded:: 1.6.0 Complex-valued support added. References ---------- .. [1] https://en.wikipedia.org/wiki/Homogeneous_coordinates """ if order < 0 or order > 5: raise RuntimeError('spline order not supported') input = numpy.asarray(input) if output_shape is None: if isinstance(output, numpy.ndarray): output_shape = output.shape else: output_shape = input.shape if input.ndim < 1 or len(output_shape) < 1: raise RuntimeError('input and output rank must be > 0') complex_output = numpy.iscomplexobj(input) output = _ni_support._get_output(output, input, shape=output_shape, complex_output=complex_output) if complex_output: kwargs = dict(offset=offset, output_shape=output_shape, order=order, mode=mode, prefilter=prefilter) affine_transform(input.real, matrix, output=output.real, cval=numpy.real(cval), **kwargs) affine_transform(input.imag, matrix, output=output.imag, cval=numpy.imag(cval), **kwargs) return output if prefilter and order > 1: padded, npad = _prepad_for_spline_filter(input, mode, cval) filtered = spline_filter(padded, order, output=numpy.float64, mode=mode) else: npad = 0 filtered = input mode = _ni_support._extend_mode_to_code(mode) matrix = numpy.asarray(matrix, dtype=numpy.float64) if matrix.ndim not in [1, 2] or matrix.shape[0] < 1: raise RuntimeError('no proper affine matrix provided') if (matrix.ndim == 2 and matrix.shape[1] == input.ndim + 1 and (matrix.shape[0] in [input.ndim, input.ndim + 1])): if matrix.shape[0] == input.ndim + 1: exptd = [0] * input.ndim + [1] if not numpy.all(matrix[input.ndim] == exptd): msg = ('Expected homogeneous transformation matrix with ' 'shape {} for image shape {}, but bottom row was ' 'not equal to {}'.format(matrix.shape, input.shape, exptd)) raise ValueError(msg) # assume input is homogeneous coordinate transformation matrix offset = matrix[:input.ndim, input.ndim] matrix = matrix[:input.ndim, :input.ndim] if matrix.shape[0] != input.ndim: raise RuntimeError('affine matrix has wrong number of rows') if matrix.ndim == 2 and matrix.shape[1] != output.ndim: raise RuntimeError('affine matrix has wrong number of columns') if not matrix.flags.contiguous: matrix = matrix.copy() offset = _ni_support._normalize_sequence(offset, input.ndim) offset = numpy.asarray(offset, dtype=numpy.float64) if offset.ndim != 1 or offset.shape[0] < 1: raise RuntimeError('no proper offset provided') if not offset.flags.contiguous: offset = offset.copy() if matrix.ndim == 1: warnings.warn( "The behavior of affine_transform with a 1-D " "array supplied for the matrix parameter has changed in " "SciPy 0.18.0." ) _nd_image.zoom_shift(filtered, matrix, offset/matrix, output, order, mode, cval, npad, False) else: _nd_image.geometric_transform(filtered, None, None, matrix, offset, output, order, mode, cval, npad, None, None) return output @docfiller def shift(input, shift, output=None, order=3, mode='constant', cval=0.0, prefilter=True): """ Shift an array. The array is shifted using spline interpolation of the requested order. Points outside the boundaries of the input are filled according to the given mode. Parameters ---------- %(input)s shift : float or sequence The shift along the axes. If a float, `shift` is the same for each axis. If a sequence, `shift` should contain one value for each axis. %(output)s order : int, optional The order of the spline interpolation, default is 3. The order has to be in the range 0-5. %(mode_interp_constant)s %(cval)s %(prefilter)s Returns ------- shift : ndarray The shifted input. Notes ----- For complex-valued `input`, this function shifts the real and imaginary components independently. .. versionadded:: 1.6.0 Complex-valued support added. """ if order < 0 or order > 5: raise RuntimeError('spline order not supported') input = numpy.asarray(input) if input.ndim < 1: raise RuntimeError('input and output rank must be > 0') complex_output = numpy.iscomplexobj(input) output = _ni_support._get_output(output, input, complex_output=complex_output) if complex_output: # import under different name to avoid confusion with shift parameter from scipy.ndimage._interpolation import shift as _shift kwargs = dict(order=order, mode=mode, prefilter=prefilter) _shift(input.real, shift, output=output.real, cval=numpy.real(cval), **kwargs) _shift(input.imag, shift, output=output.imag, cval=numpy.imag(cval), **kwargs) return output if prefilter and order > 1: padded, npad = _prepad_for_spline_filter(input, mode, cval) filtered = spline_filter(padded, order, output=numpy.float64, mode=mode) else: npad = 0 filtered = input mode = _ni_support._extend_mode_to_code(mode) shift = _ni_support._normalize_sequence(shift, input.ndim) shift = [-ii for ii in shift] shift = numpy.asarray(shift, dtype=numpy.float64) if not shift.flags.contiguous: shift = shift.copy() _nd_image.zoom_shift(filtered, None, shift, output, order, mode, cval, npad, False) return output @docfiller def zoom(input, zoom, output=None, order=3, mode='constant', cval=0.0, prefilter=True, *, grid_mode=False): """ Zoom an array. The array is zoomed using spline interpolation of the requested order. Parameters ---------- %(input)s zoom : float or sequence The zoom factor along the axes. If a float, `zoom` is the same for each axis. If a sequence, `zoom` should contain one value for each axis. %(output)s order : int, optional The order of the spline interpolation, default is 3. The order has to be in the range 0-5. %(mode_interp_constant)s %(cval)s %(prefilter)s grid_mode : bool, optional If False, the distance from the pixel centers is zoomed. Otherwise, the distance including the full pixel extent is used. For example, a 1d signal of length 5 is considered to have length 4 when `grid_mode` is False, but length 5 when `grid_mode` is True. See the following visual illustration: .. code-block:: text | pixel 1 | pixel 2 | pixel 3 | pixel 4 | pixel 5 | |<-------------------------------------->| vs. |<----------------------------------------------->| The starting point of the arrow in the diagram above corresponds to coordinate location 0 in each mode. Returns ------- zoom : ndarray The zoomed input. Notes ----- For complex-valued `input`, this function zooms the real and imaginary components independently. .. versionadded:: 1.6.0 Complex-valued support added. Examples -------- >>> from scipy import ndimage, datasets >>> import matplotlib.pyplot as plt >>> fig = plt.figure() >>> ax1 = fig.add_subplot(121) # left side >>> ax2 = fig.add_subplot(122) # right side >>> ascent = datasets.ascent() >>> result = ndimage.zoom(ascent, 3.0) >>> ax1.imshow(ascent, vmin=0, vmax=255) >>> ax2.imshow(result, vmin=0, vmax=255) >>> plt.show() >>> print(ascent.shape) (512, 512) >>> print(result.shape) (1536, 1536) """ if order < 0 or order > 5: raise RuntimeError('spline order not supported') input = numpy.asarray(input) if input.ndim < 1: raise RuntimeError('input and output rank must be > 0') zoom = _ni_support._normalize_sequence(zoom, input.ndim) output_shape = tuple( [int(round(ii * jj)) for ii, jj in zip(input.shape, zoom)]) complex_output = numpy.iscomplexobj(input) output = _ni_support._get_output(output, input, shape=output_shape, complex_output=complex_output) if complex_output: # import under different name to avoid confusion with zoom parameter from scipy.ndimage._interpolation import zoom as _zoom kwargs = dict(order=order, mode=mode, prefilter=prefilter) _zoom(input.real, zoom, output=output.real, cval=numpy.real(cval), **kwargs) _zoom(input.imag, zoom, output=output.imag, cval=numpy.imag(cval), **kwargs) return output if prefilter and order > 1: padded, npad = _prepad_for_spline_filter(input, mode, cval) filtered = spline_filter(padded, order, output=numpy.float64, mode=mode) else: npad = 0 filtered = input if grid_mode: # warn about modes that may have surprising behavior suggest_mode = None if mode == 'constant': suggest_mode = 'grid-constant' elif mode == 'wrap': suggest_mode = 'grid-wrap' if suggest_mode is not None: warnings.warn( ("It is recommended to use mode = {} instead of {} when " "grid_mode is True.").format(suggest_mode, mode) ) mode = _ni_support._extend_mode_to_code(mode) zoom_div = numpy.array(output_shape) zoom_nominator = numpy.array(input.shape) if not grid_mode: zoom_div -= 1 zoom_nominator -= 1 # Zooming to infinite values is unpredictable, so just choose # zoom factor 1 instead zoom = numpy.divide(zoom_nominator, zoom_div, out=numpy.ones_like(input.shape, dtype=numpy.float64), where=zoom_div != 0) zoom = numpy.ascontiguousarray(zoom) _nd_image.zoom_shift(filtered, zoom, None, output, order, mode, cval, npad, grid_mode) return output @docfiller def rotate(input, angle, axes=(1, 0), reshape=True, output=None, order=3, mode='constant', cval=0.0, prefilter=True): """ Rotate an array. The array is rotated in the plane defined by the two axes given by the `axes` parameter using spline interpolation of the requested order. Parameters ---------- %(input)s angle : float The rotation angle in degrees. axes : tuple of 2 ints, optional The two axes that define the plane of rotation. Default is the first two axes. reshape : bool, optional If `reshape` is true, the output shape is adapted so that the input array is contained completely in the output. Default is True. %(output)s order : int, optional The order of the spline interpolation, default is 3. The order has to be in the range 0-5. %(mode_interp_constant)s %(cval)s %(prefilter)s Returns ------- rotate : ndarray The rotated input. Notes ----- For complex-valued `input`, this function rotates the real and imaginary components independently. .. versionadded:: 1.6.0 Complex-valued support added. Examples -------- >>> from scipy import ndimage, datasets >>> import matplotlib.pyplot as plt >>> fig = plt.figure(figsize=(10, 3)) >>> ax1, ax2, ax3 = fig.subplots(1, 3) >>> img = datasets.ascent() >>> img_45 = ndimage.rotate(img, 45, reshape=False) >>> full_img_45 = ndimage.rotate(img, 45, reshape=True) >>> ax1.imshow(img, cmap='gray') >>> ax1.set_axis_off() >>> ax2.imshow(img_45, cmap='gray') >>> ax2.set_axis_off() >>> ax3.imshow(full_img_45, cmap='gray') >>> ax3.set_axis_off() >>> fig.set_layout_engine('tight') >>> plt.show() >>> print(img.shape) (512, 512) >>> print(img_45.shape) (512, 512) >>> print(full_img_45.shape) (724, 724) """ input_arr = numpy.asarray(input) ndim = input_arr.ndim if ndim < 2: raise ValueError('input array should be at least 2D') axes = list(axes) if len(axes) != 2: raise ValueError('axes should contain exactly two values') if not all([float(ax).is_integer() for ax in axes]): raise ValueError('axes should contain only integer values') if axes[0] < 0: axes[0] += ndim if axes[1] < 0: axes[1] += ndim if axes[0] < 0 or axes[1] < 0 or axes[0] >= ndim or axes[1] >= ndim: raise ValueError('invalid rotation plane specified') axes.sort() c, s = special.cosdg(angle), special.sindg(angle) rot_matrix = numpy.array([[c, s], [-s, c]]) img_shape = numpy.asarray(input_arr.shape) in_plane_shape = img_shape[axes] if reshape: # Compute transformed input bounds iy, ix = in_plane_shape out_bounds = rot_matrix @ [[0, 0, iy, iy], [0, ix, 0, ix]] # Compute the shape of the transformed input plane out_plane_shape = (numpy.ptp(out_bounds, axis=1) + 0.5).astype(int) else: out_plane_shape = img_shape[axes] out_center = rot_matrix @ ((out_plane_shape - 1) / 2) in_center = (in_plane_shape - 1) / 2 offset = in_center - out_center output_shape = img_shape output_shape[axes] = out_plane_shape output_shape = tuple(output_shape) complex_output = numpy.iscomplexobj(input_arr) output = _ni_support._get_output(output, input_arr, shape=output_shape, complex_output=complex_output) if ndim <= 2: affine_transform(input_arr, rot_matrix, offset, output_shape, output, order, mode, cval, prefilter) else: # If ndim > 2, the rotation is applied over all the planes # parallel to axes planes_coord = itertools.product( *[[slice(None)] if ax in axes else range(img_shape[ax]) for ax in range(ndim)]) out_plane_shape = tuple(out_plane_shape) for coordinates in planes_coord: ia = input_arr[coordinates] oa = output[coordinates] affine_transform(ia, rot_matrix, offset, out_plane_shape, oa, order, mode, cval, prefilter) return output
35,940
35.787103
82
py
scipy
scipy-main/scipy/ndimage/_morphology.py
# Copyright (C) 2003-2005 Peter J. Verveer # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # # 3. The name of the author may not be used to endorse or promote # products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS # OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE # GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import warnings import operator import numpy from . import _ni_support from . import _nd_image from . import _filters __all__ = ['iterate_structure', 'generate_binary_structure', 'binary_erosion', 'binary_dilation', 'binary_opening', 'binary_closing', 'binary_hit_or_miss', 'binary_propagation', 'binary_fill_holes', 'grey_erosion', 'grey_dilation', 'grey_opening', 'grey_closing', 'morphological_gradient', 'morphological_laplace', 'white_tophat', 'black_tophat', 'distance_transform_bf', 'distance_transform_cdt', 'distance_transform_edt'] def _center_is_true(structure, origin): structure = numpy.array(structure) coor = tuple([oo + ss // 2 for ss, oo in zip(structure.shape, origin)]) return bool(structure[coor]) def iterate_structure(structure, iterations, origin=None): """ Iterate a structure by dilating it with itself. Parameters ---------- structure : array_like Structuring element (an array of bools, for example), to be dilated with itself. iterations : int number of dilations performed on the structure with itself origin : optional If origin is None, only the iterated structure is returned. If not, a tuple of the iterated structure and the modified origin is returned. Returns ------- iterate_structure : ndarray of bools A new structuring element obtained by dilating `structure` (`iterations` - 1) times with itself. See Also -------- generate_binary_structure Examples -------- >>> from scipy import ndimage >>> struct = ndimage.generate_binary_structure(2, 1) >>> struct.astype(int) array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) >>> ndimage.iterate_structure(struct, 2).astype(int) array([[0, 0, 1, 0, 0], [0, 1, 1, 1, 0], [1, 1, 1, 1, 1], [0, 1, 1, 1, 0], [0, 0, 1, 0, 0]]) >>> ndimage.iterate_structure(struct, 3).astype(int) array([[0, 0, 0, 1, 0, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1, 1], [0, 1, 1, 1, 1, 1, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 0, 1, 0, 0, 0]]) """ structure = numpy.asarray(structure) if iterations < 2: return structure.copy() ni = iterations - 1 shape = [ii + ni * (ii - 1) for ii in structure.shape] pos = [ni * (structure.shape[ii] // 2) for ii in range(len(shape))] slc = tuple(slice(pos[ii], pos[ii] + structure.shape[ii], None) for ii in range(len(shape))) out = numpy.zeros(shape, bool) out[slc] = structure != 0 out = binary_dilation(out, structure, iterations=ni) if origin is None: return out else: origin = _ni_support._normalize_sequence(origin, structure.ndim) origin = [iterations * o for o in origin] return out, origin def generate_binary_structure(rank, connectivity): """ Generate a binary structure for binary morphological operations. Parameters ---------- rank : int Number of dimensions of the array to which the structuring element will be applied, as returned by `np.ndim`. connectivity : int `connectivity` determines which elements of the output array belong to the structure, i.e., are considered as neighbors of the central element. Elements up to a squared distance of `connectivity` from the center are considered neighbors. `connectivity` may range from 1 (no diagonal elements are neighbors) to `rank` (all elements are neighbors). Returns ------- output : ndarray of bools Structuring element which may be used for binary morphological operations, with `rank` dimensions and all dimensions equal to 3. See Also -------- iterate_structure, binary_dilation, binary_erosion Notes ----- `generate_binary_structure` can only create structuring elements with dimensions equal to 3, i.e., minimal dimensions. For larger structuring elements, that are useful e.g., for eroding large objects, one may either use `iterate_structure`, or create directly custom arrays with numpy functions such as `numpy.ones`. Examples -------- >>> from scipy import ndimage >>> import numpy as np >>> struct = ndimage.generate_binary_structure(2, 1) >>> struct array([[False, True, False], [ True, True, True], [False, True, False]], dtype=bool) >>> a = np.zeros((5,5)) >>> a[2, 2] = 1 >>> a array([[ 0., 0., 0., 0., 0.], [ 0., 0., 0., 0., 0.], [ 0., 0., 1., 0., 0.], [ 0., 0., 0., 0., 0.], [ 0., 0., 0., 0., 0.]]) >>> b = ndimage.binary_dilation(a, structure=struct).astype(a.dtype) >>> b array([[ 0., 0., 0., 0., 0.], [ 0., 0., 1., 0., 0.], [ 0., 1., 1., 1., 0.], [ 0., 0., 1., 0., 0.], [ 0., 0., 0., 0., 0.]]) >>> ndimage.binary_dilation(b, structure=struct).astype(a.dtype) array([[ 0., 0., 1., 0., 0.], [ 0., 1., 1., 1., 0.], [ 1., 1., 1., 1., 1.], [ 0., 1., 1., 1., 0.], [ 0., 0., 1., 0., 0.]]) >>> struct = ndimage.generate_binary_structure(2, 2) >>> struct array([[ True, True, True], [ True, True, True], [ True, True, True]], dtype=bool) >>> struct = ndimage.generate_binary_structure(3, 1) >>> struct # no diagonal elements array([[[False, False, False], [False, True, False], [False, False, False]], [[False, True, False], [ True, True, True], [False, True, False]], [[False, False, False], [False, True, False], [False, False, False]]], dtype=bool) """ if connectivity < 1: connectivity = 1 if rank < 1: return numpy.array(True, dtype=bool) output = numpy.fabs(numpy.indices([3] * rank) - 1) output = numpy.add.reduce(output, 0) return output <= connectivity def _binary_erosion(input, structure, iterations, mask, output, border_value, origin, invert, brute_force): try: iterations = operator.index(iterations) except TypeError as e: raise TypeError('iterations parameter should be an integer') from e input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') if structure is None: structure = generate_binary_structure(input.ndim, 1) else: structure = numpy.asarray(structure, dtype=bool) if structure.ndim != input.ndim: raise RuntimeError('structure and input must have same dimensionality') if not structure.flags.contiguous: structure = structure.copy() if numpy.prod(structure.shape, axis=0) < 1: raise RuntimeError('structure must not be empty') if mask is not None: mask = numpy.asarray(mask) if mask.shape != input.shape: raise RuntimeError('mask and input must have equal sizes') origin = _ni_support._normalize_sequence(origin, input.ndim) cit = _center_is_true(structure, origin) if isinstance(output, numpy.ndarray): if numpy.iscomplexobj(output): raise TypeError('Complex output type not supported') else: output = bool output = _ni_support._get_output(output, input) temp_needed = numpy.may_share_memory(input, output) if temp_needed: # input and output arrays cannot share memory temp = output output = _ni_support._get_output(output.dtype, input) if iterations == 1: _nd_image.binary_erosion(input, structure, mask, output, border_value, origin, invert, cit, 0) elif cit and not brute_force: changed, coordinate_list = _nd_image.binary_erosion( input, structure, mask, output, border_value, origin, invert, cit, 1) structure = structure[tuple([slice(None, None, -1)] * structure.ndim)] for ii in range(len(origin)): origin[ii] = -origin[ii] if not structure.shape[ii] & 1: origin[ii] -= 1 if mask is not None: mask = numpy.asarray(mask, dtype=numpy.int8) if not structure.flags.contiguous: structure = structure.copy() _nd_image.binary_erosion2(output, structure, mask, iterations - 1, origin, invert, coordinate_list) else: tmp_in = numpy.empty_like(input, dtype=bool) tmp_out = output if iterations >= 1 and not iterations & 1: tmp_in, tmp_out = tmp_out, tmp_in changed = _nd_image.binary_erosion( input, structure, mask, tmp_out, border_value, origin, invert, cit, 0) ii = 1 while ii < iterations or (iterations < 1 and changed): tmp_in, tmp_out = tmp_out, tmp_in changed = _nd_image.binary_erosion( tmp_in, structure, mask, tmp_out, border_value, origin, invert, cit, 0) ii += 1 if temp_needed: temp[...] = output output = temp return output def binary_erosion(input, structure=None, iterations=1, mask=None, output=None, border_value=0, origin=0, brute_force=False): """ Multidimensional binary erosion with a given structuring element. Binary erosion is a mathematical morphology operation used for image processing. Parameters ---------- input : array_like Binary image to be eroded. Non-zero (True) elements form the subset to be eroded. structure : array_like, optional Structuring element used for the erosion. Non-zero elements are considered True. If no structuring element is provided, an element is generated with a square connectivity equal to one. iterations : int, optional The erosion is repeated `iterations` times (one, by default). If iterations is less than 1, the erosion is repeated until the result does not change anymore. mask : array_like, optional If a mask is given, only those elements with a True value at the corresponding mask element are modified at each iteration. output : ndarray, optional Array of the same shape as input, into which the output is placed. By default, a new array is created. border_value : int (cast to 0 or 1), optional Value at the border in the output array. origin : int or tuple of ints, optional Placement of the filter, by default 0. brute_force : boolean, optional Memory condition: if False, only the pixels whose value was changed in the last iteration are tracked as candidates to be updated (eroded) in the current iteration; if True all pixels are considered as candidates for erosion, regardless of what happened in the previous iteration. False by default. Returns ------- binary_erosion : ndarray of bools Erosion of the input by the structuring element. See Also -------- grey_erosion, binary_dilation, binary_closing, binary_opening, generate_binary_structure Notes ----- Erosion [1]_ is a mathematical morphology operation [2]_ that uses a structuring element for shrinking the shapes in an image. The binary erosion of an image by a structuring element is the locus of the points where a superimposition of the structuring element centered on the point is entirely contained in the set of non-zero elements of the image. References ---------- .. [1] https://en.wikipedia.org/wiki/Erosion_%28morphology%29 .. [2] https://en.wikipedia.org/wiki/Mathematical_morphology Examples -------- >>> from scipy import ndimage >>> import numpy as np >>> a = np.zeros((7,7), dtype=int) >>> a[1:6, 2:5] = 1 >>> a array([[0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0]]) >>> ndimage.binary_erosion(a).astype(a.dtype) array([[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]]) >>> #Erosion removes objects smaller than the structure >>> ndimage.binary_erosion(a, structure=np.ones((5,5))).astype(a.dtype) array([[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]]) """ return _binary_erosion(input, structure, iterations, mask, output, border_value, origin, 0, brute_force) def binary_dilation(input, structure=None, iterations=1, mask=None, output=None, border_value=0, origin=0, brute_force=False): """ Multidimensional binary dilation with the given structuring element. Parameters ---------- input : array_like Binary array_like to be dilated. Non-zero (True) elements form the subset to be dilated. structure : array_like, optional Structuring element used for the dilation. Non-zero elements are considered True. If no structuring element is provided an element is generated with a square connectivity equal to one. iterations : int, optional The dilation is repeated `iterations` times (one, by default). If iterations is less than 1, the dilation is repeated until the result does not change anymore. Only an integer of iterations is accepted. mask : array_like, optional If a mask is given, only those elements with a True value at the corresponding mask element are modified at each iteration. output : ndarray, optional Array of the same shape as input, into which the output is placed. By default, a new array is created. border_value : int (cast to 0 or 1), optional Value at the border in the output array. origin : int or tuple of ints, optional Placement of the filter, by default 0. brute_force : boolean, optional Memory condition: if False, only the pixels whose value was changed in the last iteration are tracked as candidates to be updated (dilated) in the current iteration; if True all pixels are considered as candidates for dilation, regardless of what happened in the previous iteration. False by default. Returns ------- binary_dilation : ndarray of bools Dilation of the input by the structuring element. See Also -------- grey_dilation, binary_erosion, binary_closing, binary_opening, generate_binary_structure Notes ----- Dilation [1]_ is a mathematical morphology operation [2]_ that uses a structuring element for expanding the shapes in an image. The binary dilation of an image by a structuring element is the locus of the points covered by the structuring element, when its center lies within the non-zero points of the image. References ---------- .. [1] https://en.wikipedia.org/wiki/Dilation_%28morphology%29 .. [2] https://en.wikipedia.org/wiki/Mathematical_morphology Examples -------- >>> from scipy import ndimage >>> import numpy as np >>> a = np.zeros((5, 5)) >>> a[2, 2] = 1 >>> a array([[ 0., 0., 0., 0., 0.], [ 0., 0., 0., 0., 0.], [ 0., 0., 1., 0., 0.], [ 0., 0., 0., 0., 0.], [ 0., 0., 0., 0., 0.]]) >>> ndimage.binary_dilation(a) array([[False, False, False, False, False], [False, False, True, False, False], [False, True, True, True, False], [False, False, True, False, False], [False, False, False, False, False]], dtype=bool) >>> ndimage.binary_dilation(a).astype(a.dtype) array([[ 0., 0., 0., 0., 0.], [ 0., 0., 1., 0., 0.], [ 0., 1., 1., 1., 0.], [ 0., 0., 1., 0., 0.], [ 0., 0., 0., 0., 0.]]) >>> # 3x3 structuring element with connectivity 1, used by default >>> struct1 = ndimage.generate_binary_structure(2, 1) >>> struct1 array([[False, True, False], [ True, True, True], [False, True, False]], dtype=bool) >>> # 3x3 structuring element with connectivity 2 >>> struct2 = ndimage.generate_binary_structure(2, 2) >>> struct2 array([[ True, True, True], [ True, True, True], [ True, True, True]], dtype=bool) >>> ndimage.binary_dilation(a, structure=struct1).astype(a.dtype) array([[ 0., 0., 0., 0., 0.], [ 0., 0., 1., 0., 0.], [ 0., 1., 1., 1., 0.], [ 0., 0., 1., 0., 0.], [ 0., 0., 0., 0., 0.]]) >>> ndimage.binary_dilation(a, structure=struct2).astype(a.dtype) array([[ 0., 0., 0., 0., 0.], [ 0., 1., 1., 1., 0.], [ 0., 1., 1., 1., 0.], [ 0., 1., 1., 1., 0.], [ 0., 0., 0., 0., 0.]]) >>> ndimage.binary_dilation(a, structure=struct1,\\ ... iterations=2).astype(a.dtype) array([[ 0., 0., 1., 0., 0.], [ 0., 1., 1., 1., 0.], [ 1., 1., 1., 1., 1.], [ 0., 1., 1., 1., 0.], [ 0., 0., 1., 0., 0.]]) """ input = numpy.asarray(input) if structure is None: structure = generate_binary_structure(input.ndim, 1) origin = _ni_support._normalize_sequence(origin, input.ndim) structure = numpy.asarray(structure) structure = structure[tuple([slice(None, None, -1)] * structure.ndim)] for ii in range(len(origin)): origin[ii] = -origin[ii] if not structure.shape[ii] & 1: origin[ii] -= 1 return _binary_erosion(input, structure, iterations, mask, output, border_value, origin, 1, brute_force) def binary_opening(input, structure=None, iterations=1, output=None, origin=0, mask=None, border_value=0, brute_force=False): """ Multidimensional binary opening with the given structuring element. The *opening* of an input image by a structuring element is the *dilation* of the *erosion* of the image by the structuring element. Parameters ---------- input : array_like Binary array_like to be opened. Non-zero (True) elements form the subset to be opened. structure : array_like, optional Structuring element used for the opening. Non-zero elements are considered True. If no structuring element is provided an element is generated with a square connectivity equal to one (i.e., only nearest neighbors are connected to the center, diagonally-connected elements are not considered neighbors). iterations : int, optional The erosion step of the opening, then the dilation step are each repeated `iterations` times (one, by default). If `iterations` is less than 1, each operation is repeated until the result does not change anymore. Only an integer of iterations is accepted. output : ndarray, optional Array of the same shape as input, into which the output is placed. By default, a new array is created. origin : int or tuple of ints, optional Placement of the filter, by default 0. mask : array_like, optional If a mask is given, only those elements with a True value at the corresponding mask element are modified at each iteration. .. versionadded:: 1.1.0 border_value : int (cast to 0 or 1), optional Value at the border in the output array. .. versionadded:: 1.1.0 brute_force : boolean, optional Memory condition: if False, only the pixels whose value was changed in the last iteration are tracked as candidates to be updated in the current iteration; if true all pixels are considered as candidates for update, regardless of what happened in the previous iteration. False by default. .. versionadded:: 1.1.0 Returns ------- binary_opening : ndarray of bools Opening of the input by the structuring element. See Also -------- grey_opening, binary_closing, binary_erosion, binary_dilation, generate_binary_structure Notes ----- *Opening* [1]_ is a mathematical morphology operation [2]_ that consists in the succession of an erosion and a dilation of the input with the same structuring element. Opening, therefore, removes objects smaller than the structuring element. Together with *closing* (`binary_closing`), opening can be used for noise removal. References ---------- .. [1] https://en.wikipedia.org/wiki/Opening_%28morphology%29 .. [2] https://en.wikipedia.org/wiki/Mathematical_morphology Examples -------- >>> from scipy import ndimage >>> import numpy as np >>> a = np.zeros((5,5), dtype=int) >>> a[1:4, 1:4] = 1; a[4, 4] = 1 >>> a array([[0, 0, 0, 0, 0], [0, 1, 1, 1, 0], [0, 1, 1, 1, 0], [0, 1, 1, 1, 0], [0, 0, 0, 0, 1]]) >>> # Opening removes small objects >>> ndimage.binary_opening(a, structure=np.ones((3,3))).astype(int) array([[0, 0, 0, 0, 0], [0, 1, 1, 1, 0], [0, 1, 1, 1, 0], [0, 1, 1, 1, 0], [0, 0, 0, 0, 0]]) >>> # Opening can also smooth corners >>> ndimage.binary_opening(a).astype(int) array([[0, 0, 0, 0, 0], [0, 0, 1, 0, 0], [0, 1, 1, 1, 0], [0, 0, 1, 0, 0], [0, 0, 0, 0, 0]]) >>> # Opening is the dilation of the erosion of the input >>> ndimage.binary_erosion(a).astype(int) array([[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 1, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]) >>> ndimage.binary_dilation(ndimage.binary_erosion(a)).astype(int) array([[0, 0, 0, 0, 0], [0, 0, 1, 0, 0], [0, 1, 1, 1, 0], [0, 0, 1, 0, 0], [0, 0, 0, 0, 0]]) """ input = numpy.asarray(input) if structure is None: rank = input.ndim structure = generate_binary_structure(rank, 1) tmp = binary_erosion(input, structure, iterations, mask, None, border_value, origin, brute_force) return binary_dilation(tmp, structure, iterations, mask, output, border_value, origin, brute_force) def binary_closing(input, structure=None, iterations=1, output=None, origin=0, mask=None, border_value=0, brute_force=False): """ Multidimensional binary closing with the given structuring element. The *closing* of an input image by a structuring element is the *erosion* of the *dilation* of the image by the structuring element. Parameters ---------- input : array_like Binary array_like to be closed. Non-zero (True) elements form the subset to be closed. structure : array_like, optional Structuring element used for the closing. Non-zero elements are considered True. If no structuring element is provided an element is generated with a square connectivity equal to one (i.e., only nearest neighbors are connected to the center, diagonally-connected elements are not considered neighbors). iterations : int, optional The dilation step of the closing, then the erosion step are each repeated `iterations` times (one, by default). If iterations is less than 1, each operations is repeated until the result does not change anymore. Only an integer of iterations is accepted. output : ndarray, optional Array of the same shape as input, into which the output is placed. By default, a new array is created. origin : int or tuple of ints, optional Placement of the filter, by default 0. mask : array_like, optional If a mask is given, only those elements with a True value at the corresponding mask element are modified at each iteration. .. versionadded:: 1.1.0 border_value : int (cast to 0 or 1), optional Value at the border in the output array. .. versionadded:: 1.1.0 brute_force : boolean, optional Memory condition: if False, only the pixels whose value was changed in the last iteration are tracked as candidates to be updated in the current iteration; if true al pixels are considered as candidates for update, regardless of what happened in the previous iteration. False by default. .. versionadded:: 1.1.0 Returns ------- binary_closing : ndarray of bools Closing of the input by the structuring element. See Also -------- grey_closing, binary_opening, binary_dilation, binary_erosion, generate_binary_structure Notes ----- *Closing* [1]_ is a mathematical morphology operation [2]_ that consists in the succession of a dilation and an erosion of the input with the same structuring element. Closing therefore fills holes smaller than the structuring element. Together with *opening* (`binary_opening`), closing can be used for noise removal. References ---------- .. [1] https://en.wikipedia.org/wiki/Closing_%28morphology%29 .. [2] https://en.wikipedia.org/wiki/Mathematical_morphology Examples -------- >>> from scipy import ndimage >>> import numpy as np >>> a = np.zeros((5,5), dtype=int) >>> a[1:-1, 1:-1] = 1; a[2,2] = 0 >>> a array([[0, 0, 0, 0, 0], [0, 1, 1, 1, 0], [0, 1, 0, 1, 0], [0, 1, 1, 1, 0], [0, 0, 0, 0, 0]]) >>> # Closing removes small holes >>> ndimage.binary_closing(a).astype(int) array([[0, 0, 0, 0, 0], [0, 1, 1, 1, 0], [0, 1, 1, 1, 0], [0, 1, 1, 1, 0], [0, 0, 0, 0, 0]]) >>> # Closing is the erosion of the dilation of the input >>> ndimage.binary_dilation(a).astype(int) array([[0, 1, 1, 1, 0], [1, 1, 1, 1, 1], [1, 1, 1, 1, 1], [1, 1, 1, 1, 1], [0, 1, 1, 1, 0]]) >>> ndimage.binary_erosion(ndimage.binary_dilation(a)).astype(int) array([[0, 0, 0, 0, 0], [0, 1, 1, 1, 0], [0, 1, 1, 1, 0], [0, 1, 1, 1, 0], [0, 0, 0, 0, 0]]) >>> a = np.zeros((7,7), dtype=int) >>> a[1:6, 2:5] = 1; a[1:3,3] = 0 >>> a array([[0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0]]) >>> # In addition to removing holes, closing can also >>> # coarsen boundaries with fine hollows. >>> ndimage.binary_closing(a).astype(int) array([[0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0]]) >>> ndimage.binary_closing(a, structure=np.ones((2,2))).astype(int) array([[0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0]]) """ input = numpy.asarray(input) if structure is None: rank = input.ndim structure = generate_binary_structure(rank, 1) tmp = binary_dilation(input, structure, iterations, mask, None, border_value, origin, brute_force) return binary_erosion(tmp, structure, iterations, mask, output, border_value, origin, brute_force) def binary_hit_or_miss(input, structure1=None, structure2=None, output=None, origin1=0, origin2=None): """ Multidimensional binary hit-or-miss transform. The hit-or-miss transform finds the locations of a given pattern inside the input image. Parameters ---------- input : array_like (cast to booleans) Binary image where a pattern is to be detected. structure1 : array_like (cast to booleans), optional Part of the structuring element to be fitted to the foreground (non-zero elements) of `input`. If no value is provided, a structure of square connectivity 1 is chosen. structure2 : array_like (cast to booleans), optional Second part of the structuring element that has to miss completely the foreground. If no value is provided, the complementary of `structure1` is taken. output : ndarray, optional Array of the same shape as input, into which the output is placed. By default, a new array is created. origin1 : int or tuple of ints, optional Placement of the first part of the structuring element `structure1`, by default 0 for a centered structure. origin2 : int or tuple of ints, optional Placement of the second part of the structuring element `structure2`, by default 0 for a centered structure. If a value is provided for `origin1` and not for `origin2`, then `origin2` is set to `origin1`. Returns ------- binary_hit_or_miss : ndarray Hit-or-miss transform of `input` with the given structuring element (`structure1`, `structure2`). See Also -------- binary_erosion References ---------- .. [1] https://en.wikipedia.org/wiki/Hit-or-miss_transform Examples -------- >>> from scipy import ndimage >>> import numpy as np >>> a = np.zeros((7,7), dtype=int) >>> a[1, 1] = 1; a[2:4, 2:4] = 1; a[4:6, 4:6] = 1 >>> a array([[0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0, 0], [0, 0, 1, 1, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0], [0, 0, 0, 0, 1, 1, 0], [0, 0, 0, 0, 0, 0, 0]]) >>> structure1 = np.array([[1, 0, 0], [0, 1, 1], [0, 1, 1]]) >>> structure1 array([[1, 0, 0], [0, 1, 1], [0, 1, 1]]) >>> # Find the matches of structure1 in the array a >>> ndimage.binary_hit_or_miss(a, structure1=structure1).astype(int) array([[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]]) >>> # Change the origin of the filter >>> # origin1=1 is equivalent to origin1=(1,1) here >>> ndimage.binary_hit_or_miss(a, structure1=structure1,\\ ... origin1=1).astype(int) array([[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0]]) """ input = numpy.asarray(input) if structure1 is None: structure1 = generate_binary_structure(input.ndim, 1) if structure2 is None: structure2 = numpy.logical_not(structure1) origin1 = _ni_support._normalize_sequence(origin1, input.ndim) if origin2 is None: origin2 = origin1 else: origin2 = _ni_support._normalize_sequence(origin2, input.ndim) tmp1 = _binary_erosion(input, structure1, 1, None, None, 0, origin1, 0, False) inplace = isinstance(output, numpy.ndarray) result = _binary_erosion(input, structure2, 1, None, output, 0, origin2, 1, False) if inplace: numpy.logical_not(output, output) numpy.logical_and(tmp1, output, output) else: numpy.logical_not(result, result) return numpy.logical_and(tmp1, result) def binary_propagation(input, structure=None, mask=None, output=None, border_value=0, origin=0): """ Multidimensional binary propagation with the given structuring element. Parameters ---------- input : array_like Binary image to be propagated inside `mask`. structure : array_like, optional Structuring element used in the successive dilations. The output may depend on the structuring element, especially if `mask` has several connex components. If no structuring element is provided, an element is generated with a squared connectivity equal to one. mask : array_like, optional Binary mask defining the region into which `input` is allowed to propagate. output : ndarray, optional Array of the same shape as input, into which the output is placed. By default, a new array is created. border_value : int (cast to 0 or 1), optional Value at the border in the output array. origin : int or tuple of ints, optional Placement of the filter, by default 0. Returns ------- binary_propagation : ndarray Binary propagation of `input` inside `mask`. Notes ----- This function is functionally equivalent to calling binary_dilation with the number of iterations less than one: iterative dilation until the result does not change anymore. The succession of an erosion and propagation inside the original image can be used instead of an *opening* for deleting small objects while keeping the contours of larger objects untouched. References ---------- .. [1] http://cmm.ensmp.fr/~serra/cours/pdf/en/ch6en.pdf, slide 15. .. [2] I.T. Young, J.J. Gerbrands, and L.J. van Vliet, "Fundamentals of image processing", 1998 ftp://qiftp.tudelft.nl/DIPimage/docs/FIP2.3.pdf Examples -------- >>> from scipy import ndimage >>> import numpy as np >>> input = np.zeros((8, 8), dtype=int) >>> input[2, 2] = 1 >>> mask = np.zeros((8, 8), dtype=int) >>> mask[1:4, 1:4] = mask[4, 4] = mask[6:8, 6:8] = 1 >>> input array([[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]]) >>> mask array([[0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 0, 0, 0, 0], [0, 1, 1, 1, 0, 0, 0, 0], [0, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 0, 0, 1, 1]]) >>> ndimage.binary_propagation(input, mask=mask).astype(int) array([[0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 0, 0, 0, 0], [0, 1, 1, 1, 0, 0, 0, 0], [0, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]]) >>> ndimage.binary_propagation(input, mask=mask,\\ ... structure=np.ones((3,3))).astype(int) array([[0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 0, 0, 0, 0], [0, 1, 1, 1, 0, 0, 0, 0], [0, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]]) >>> # Comparison between opening and erosion+propagation >>> a = np.zeros((6,6), dtype=int) >>> a[2:5, 2:5] = 1; a[0, 0] = 1; a[5, 5] = 1 >>> a array([[1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 0], [0, 0, 1, 1, 1, 0], [0, 0, 1, 1, 1, 0], [0, 0, 0, 0, 0, 1]]) >>> ndimage.binary_opening(a).astype(int) array([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 1, 1, 1, 0], [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0]]) >>> b = ndimage.binary_erosion(a) >>> b.astype(int) array([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]) >>> ndimage.binary_propagation(b, mask=a).astype(int) array([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 0], [0, 0, 1, 1, 1, 0], [0, 0, 1, 1, 1, 0], [0, 0, 0, 0, 0, 0]]) """ return binary_dilation(input, structure, -1, mask, output, border_value, origin) def binary_fill_holes(input, structure=None, output=None, origin=0): """ Fill the holes in binary objects. Parameters ---------- input : array_like N-D binary array with holes to be filled structure : array_like, optional Structuring element used in the computation; large-size elements make computations faster but may miss holes separated from the background by thin regions. The default element (with a square connectivity equal to one) yields the intuitive result where all holes in the input have been filled. output : ndarray, optional Array of the same shape as input, into which the output is placed. By default, a new array is created. origin : int, tuple of ints, optional Position of the structuring element. Returns ------- out : ndarray Transformation of the initial image `input` where holes have been filled. See Also -------- binary_dilation, binary_propagation, label Notes ----- The algorithm used in this function consists in invading the complementary of the shapes in `input` from the outer boundary of the image, using binary dilations. Holes are not connected to the boundary and are therefore not invaded. The result is the complementary subset of the invaded region. References ---------- .. [1] https://en.wikipedia.org/wiki/Mathematical_morphology Examples -------- >>> from scipy import ndimage >>> import numpy as np >>> a = np.zeros((5, 5), dtype=int) >>> a[1:4, 1:4] = 1 >>> a[2,2] = 0 >>> a array([[0, 0, 0, 0, 0], [0, 1, 1, 1, 0], [0, 1, 0, 1, 0], [0, 1, 1, 1, 0], [0, 0, 0, 0, 0]]) >>> ndimage.binary_fill_holes(a).astype(int) array([[0, 0, 0, 0, 0], [0, 1, 1, 1, 0], [0, 1, 1, 1, 0], [0, 1, 1, 1, 0], [0, 0, 0, 0, 0]]) >>> # Too big structuring element >>> ndimage.binary_fill_holes(a, structure=np.ones((5,5))).astype(int) array([[0, 0, 0, 0, 0], [0, 1, 1, 1, 0], [0, 1, 0, 1, 0], [0, 1, 1, 1, 0], [0, 0, 0, 0, 0]]) """ mask = numpy.logical_not(input) tmp = numpy.zeros(mask.shape, bool) inplace = isinstance(output, numpy.ndarray) if inplace: binary_dilation(tmp, structure, -1, mask, output, 1, origin) numpy.logical_not(output, output) else: output = binary_dilation(tmp, structure, -1, mask, None, 1, origin) numpy.logical_not(output, output) return output def grey_erosion(input, size=None, footprint=None, structure=None, output=None, mode="reflect", cval=0.0, origin=0): """ Calculate a greyscale erosion, using either a structuring element, or a footprint corresponding to a flat structuring element. Grayscale erosion is a mathematical morphology operation. For the simple case of a full and flat structuring element, it can be viewed as a minimum filter over a sliding window. Parameters ---------- input : array_like Array over which the grayscale erosion is to be computed. size : tuple of ints Shape of a flat and full structuring element used for the grayscale erosion. Optional if `footprint` or `structure` is provided. footprint : array of ints, optional Positions of non-infinite elements of a flat structuring element used for the grayscale erosion. Non-zero values give the set of neighbors of the center over which the minimum is chosen. structure : array of ints, optional Structuring element used for the grayscale erosion. `structure` may be a non-flat structuring element. output : array, optional An array used for storing the output of the erosion may be provided. mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional The `mode` parameter determines how the array borders are handled, where `cval` is the value when mode is equal to 'constant'. Default is 'reflect' cval : scalar, optional Value to fill past edges of input if `mode` is 'constant'. Default is 0.0. origin : scalar, optional The `origin` parameter controls the placement of the filter. Default 0 Returns ------- output : ndarray Grayscale erosion of `input`. See Also -------- binary_erosion, grey_dilation, grey_opening, grey_closing generate_binary_structure, minimum_filter Notes ----- The grayscale erosion of an image input by a structuring element s defined over a domain E is given by: (input+s)(x) = min {input(y) - s(x-y), for y in E} In particular, for structuring elements defined as s(y) = 0 for y in E, the grayscale erosion computes the minimum of the input image inside a sliding window defined by E. Grayscale erosion [1]_ is a *mathematical morphology* operation [2]_. References ---------- .. [1] https://en.wikipedia.org/wiki/Erosion_%28morphology%29 .. [2] https://en.wikipedia.org/wiki/Mathematical_morphology Examples -------- >>> from scipy import ndimage >>> import numpy as np >>> a = np.zeros((7,7), dtype=int) >>> a[1:6, 1:6] = 3 >>> a[4,4] = 2; a[2,3] = 1 >>> a array([[0, 0, 0, 0, 0, 0, 0], [0, 3, 3, 3, 3, 3, 0], [0, 3, 3, 1, 3, 3, 0], [0, 3, 3, 3, 3, 3, 0], [0, 3, 3, 3, 2, 3, 0], [0, 3, 3, 3, 3, 3, 0], [0, 0, 0, 0, 0, 0, 0]]) >>> ndimage.grey_erosion(a, size=(3,3)) array([[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 3, 2, 2, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]]) >>> footprint = ndimage.generate_binary_structure(2, 1) >>> footprint array([[False, True, False], [ True, True, True], [False, True, False]], dtype=bool) >>> # Diagonally-connected elements are not considered neighbors >>> ndimage.grey_erosion(a, size=(3,3), footprint=footprint) array([[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 3, 1, 2, 0, 0], [0, 0, 3, 2, 2, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]]) """ if size is None and footprint is None and structure is None: raise ValueError("size, footprint, or structure must be specified") return _filters._min_or_max_filter(input, size, footprint, structure, output, mode, cval, origin, 1) def grey_dilation(input, size=None, footprint=None, structure=None, output=None, mode="reflect", cval=0.0, origin=0): """ Calculate a greyscale dilation, using either a structuring element, or a footprint corresponding to a flat structuring element. Grayscale dilation is a mathematical morphology operation. For the simple case of a full and flat structuring element, it can be viewed as a maximum filter over a sliding window. Parameters ---------- input : array_like Array over which the grayscale dilation is to be computed. size : tuple of ints Shape of a flat and full structuring element used for the grayscale dilation. Optional if `footprint` or `structure` is provided. footprint : array of ints, optional Positions of non-infinite elements of a flat structuring element used for the grayscale dilation. Non-zero values give the set of neighbors of the center over which the maximum is chosen. structure : array of ints, optional Structuring element used for the grayscale dilation. `structure` may be a non-flat structuring element. output : array, optional An array used for storing the output of the dilation may be provided. mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional The `mode` parameter determines how the array borders are handled, where `cval` is the value when mode is equal to 'constant'. Default is 'reflect' cval : scalar, optional Value to fill past edges of input if `mode` is 'constant'. Default is 0.0. origin : scalar, optional The `origin` parameter controls the placement of the filter. Default 0 Returns ------- grey_dilation : ndarray Grayscale dilation of `input`. See Also -------- binary_dilation, grey_erosion, grey_closing, grey_opening generate_binary_structure, maximum_filter Notes ----- The grayscale dilation of an image input by a structuring element s defined over a domain E is given by: (input+s)(x) = max {input(y) + s(x-y), for y in E} In particular, for structuring elements defined as s(y) = 0 for y in E, the grayscale dilation computes the maximum of the input image inside a sliding window defined by E. Grayscale dilation [1]_ is a *mathematical morphology* operation [2]_. References ---------- .. [1] https://en.wikipedia.org/wiki/Dilation_%28morphology%29 .. [2] https://en.wikipedia.org/wiki/Mathematical_morphology Examples -------- >>> from scipy import ndimage >>> import numpy as np >>> a = np.zeros((7,7), dtype=int) >>> a[2:5, 2:5] = 1 >>> a[4,4] = 2; a[2,3] = 3 >>> a array([[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 3, 1, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 1, 1, 2, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]]) >>> ndimage.grey_dilation(a, size=(3,3)) array([[0, 0, 0, 0, 0, 0, 0], [0, 1, 3, 3, 3, 1, 0], [0, 1, 3, 3, 3, 1, 0], [0, 1, 3, 3, 3, 2, 0], [0, 1, 1, 2, 2, 2, 0], [0, 1, 1, 2, 2, 2, 0], [0, 0, 0, 0, 0, 0, 0]]) >>> ndimage.grey_dilation(a, footprint=np.ones((3,3))) array([[0, 0, 0, 0, 0, 0, 0], [0, 1, 3, 3, 3, 1, 0], [0, 1, 3, 3, 3, 1, 0], [0, 1, 3, 3, 3, 2, 0], [0, 1, 1, 2, 2, 2, 0], [0, 1, 1, 2, 2, 2, 0], [0, 0, 0, 0, 0, 0, 0]]) >>> s = ndimage.generate_binary_structure(2,1) >>> s array([[False, True, False], [ True, True, True], [False, True, False]], dtype=bool) >>> ndimage.grey_dilation(a, footprint=s) array([[0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 3, 1, 0, 0], [0, 1, 3, 3, 3, 1, 0], [0, 1, 1, 3, 2, 1, 0], [0, 1, 1, 2, 2, 2, 0], [0, 0, 1, 1, 2, 0, 0], [0, 0, 0, 0, 0, 0, 0]]) >>> ndimage.grey_dilation(a, size=(3,3), structure=np.ones((3,3))) array([[1, 1, 1, 1, 1, 1, 1], [1, 2, 4, 4, 4, 2, 1], [1, 2, 4, 4, 4, 2, 1], [1, 2, 4, 4, 4, 3, 1], [1, 2, 2, 3, 3, 3, 1], [1, 2, 2, 3, 3, 3, 1], [1, 1, 1, 1, 1, 1, 1]]) """ if size is None and footprint is None and structure is None: raise ValueError("size, footprint, or structure must be specified") if structure is not None: structure = numpy.asarray(structure) structure = structure[tuple([slice(None, None, -1)] * structure.ndim)] if footprint is not None: footprint = numpy.asarray(footprint) footprint = footprint[tuple([slice(None, None, -1)] * footprint.ndim)] input = numpy.asarray(input) origin = _ni_support._normalize_sequence(origin, input.ndim) for ii in range(len(origin)): origin[ii] = -origin[ii] if footprint is not None: sz = footprint.shape[ii] elif structure is not None: sz = structure.shape[ii] elif numpy.isscalar(size): sz = size else: sz = size[ii] if not sz & 1: origin[ii] -= 1 return _filters._min_or_max_filter(input, size, footprint, structure, output, mode, cval, origin, 0) def grey_opening(input, size=None, footprint=None, structure=None, output=None, mode="reflect", cval=0.0, origin=0): """ Multidimensional grayscale opening. A grayscale opening consists in the succession of a grayscale erosion, and a grayscale dilation. Parameters ---------- input : array_like Array over which the grayscale opening is to be computed. size : tuple of ints Shape of a flat and full structuring element used for the grayscale opening. Optional if `footprint` or `structure` is provided. footprint : array of ints, optional Positions of non-infinite elements of a flat structuring element used for the grayscale opening. structure : array of ints, optional Structuring element used for the grayscale opening. `structure` may be a non-flat structuring element. output : array, optional An array used for storing the output of the opening may be provided. mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional The `mode` parameter determines how the array borders are handled, where `cval` is the value when mode is equal to 'constant'. Default is 'reflect' cval : scalar, optional Value to fill past edges of input if `mode` is 'constant'. Default is 0.0. origin : scalar, optional The `origin` parameter controls the placement of the filter. Default 0 Returns ------- grey_opening : ndarray Result of the grayscale opening of `input` with `structure`. See Also -------- binary_opening, grey_dilation, grey_erosion, grey_closing generate_binary_structure Notes ----- The action of a grayscale opening with a flat structuring element amounts to smoothen high local maxima, whereas binary opening erases small objects. References ---------- .. [1] https://en.wikipedia.org/wiki/Mathematical_morphology Examples -------- >>> from scipy import ndimage >>> import numpy as np >>> a = np.arange(36).reshape((6,6)) >>> a[3, 3] = 50 >>> a array([[ 0, 1, 2, 3, 4, 5], [ 6, 7, 8, 9, 10, 11], [12, 13, 14, 15, 16, 17], [18, 19, 20, 50, 22, 23], [24, 25, 26, 27, 28, 29], [30, 31, 32, 33, 34, 35]]) >>> ndimage.grey_opening(a, size=(3,3)) array([[ 0, 1, 2, 3, 4, 4], [ 6, 7, 8, 9, 10, 10], [12, 13, 14, 15, 16, 16], [18, 19, 20, 22, 22, 22], [24, 25, 26, 27, 28, 28], [24, 25, 26, 27, 28, 28]]) >>> # Note that the local maximum a[3,3] has disappeared """ if (size is not None) and (footprint is not None): warnings.warn("ignoring size because footprint is set", UserWarning, stacklevel=2) tmp = grey_erosion(input, size, footprint, structure, None, mode, cval, origin) return grey_dilation(tmp, size, footprint, structure, output, mode, cval, origin) def grey_closing(input, size=None, footprint=None, structure=None, output=None, mode="reflect", cval=0.0, origin=0): """ Multidimensional grayscale closing. A grayscale closing consists in the succession of a grayscale dilation, and a grayscale erosion. Parameters ---------- input : array_like Array over which the grayscale closing is to be computed. size : tuple of ints Shape of a flat and full structuring element used for the grayscale closing. Optional if `footprint` or `structure` is provided. footprint : array of ints, optional Positions of non-infinite elements of a flat structuring element used for the grayscale closing. structure : array of ints, optional Structuring element used for the grayscale closing. `structure` may be a non-flat structuring element. output : array, optional An array used for storing the output of the closing may be provided. mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional The `mode` parameter determines how the array borders are handled, where `cval` is the value when mode is equal to 'constant'. Default is 'reflect' cval : scalar, optional Value to fill past edges of input if `mode` is 'constant'. Default is 0.0. origin : scalar, optional The `origin` parameter controls the placement of the filter. Default 0 Returns ------- grey_closing : ndarray Result of the grayscale closing of `input` with `structure`. See Also -------- binary_closing, grey_dilation, grey_erosion, grey_opening, generate_binary_structure Notes ----- The action of a grayscale closing with a flat structuring element amounts to smoothen deep local minima, whereas binary closing fills small holes. References ---------- .. [1] https://en.wikipedia.org/wiki/Mathematical_morphology Examples -------- >>> from scipy import ndimage >>> import numpy as np >>> a = np.arange(36).reshape((6,6)) >>> a[3,3] = 0 >>> a array([[ 0, 1, 2, 3, 4, 5], [ 6, 7, 8, 9, 10, 11], [12, 13, 14, 15, 16, 17], [18, 19, 20, 0, 22, 23], [24, 25, 26, 27, 28, 29], [30, 31, 32, 33, 34, 35]]) >>> ndimage.grey_closing(a, size=(3,3)) array([[ 7, 7, 8, 9, 10, 11], [ 7, 7, 8, 9, 10, 11], [13, 13, 14, 15, 16, 17], [19, 19, 20, 20, 22, 23], [25, 25, 26, 27, 28, 29], [31, 31, 32, 33, 34, 35]]) >>> # Note that the local minimum a[3,3] has disappeared """ if (size is not None) and (footprint is not None): warnings.warn("ignoring size because footprint is set", UserWarning, stacklevel=2) tmp = grey_dilation(input, size, footprint, structure, None, mode, cval, origin) return grey_erosion(tmp, size, footprint, structure, output, mode, cval, origin) def morphological_gradient(input, size=None, footprint=None, structure=None, output=None, mode="reflect", cval=0.0, origin=0): """ Multidimensional morphological gradient. The morphological gradient is calculated as the difference between a dilation and an erosion of the input with a given structuring element. Parameters ---------- input : array_like Array over which to compute the morphlogical gradient. size : tuple of ints Shape of a flat and full structuring element used for the mathematical morphology operations. Optional if `footprint` or `structure` is provided. A larger `size` yields a more blurred gradient. footprint : array of ints, optional Positions of non-infinite elements of a flat structuring element used for the morphology operations. Larger footprints give a more blurred morphological gradient. structure : array of ints, optional Structuring element used for the morphology operations. `structure` may be a non-flat structuring element. output : array, optional An array used for storing the output of the morphological gradient may be provided. mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional The `mode` parameter determines how the array borders are handled, where `cval` is the value when mode is equal to 'constant'. Default is 'reflect' cval : scalar, optional Value to fill past edges of input if `mode` is 'constant'. Default is 0.0. origin : scalar, optional The `origin` parameter controls the placement of the filter. Default 0 Returns ------- morphological_gradient : ndarray Morphological gradient of `input`. See Also -------- grey_dilation, grey_erosion, gaussian_gradient_magnitude Notes ----- For a flat structuring element, the morphological gradient computed at a given point corresponds to the maximal difference between elements of the input among the elements covered by the structuring element centered on the point. References ---------- .. [1] https://en.wikipedia.org/wiki/Mathematical_morphology Examples -------- >>> from scipy import ndimage >>> import numpy as np >>> a = np.zeros((7,7), dtype=int) >>> a[2:5, 2:5] = 1 >>> ndimage.morphological_gradient(a, size=(3,3)) array([[0, 0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1, 0], [0, 1, 1, 0, 1, 1, 0], [0, 1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1, 0], [0, 0, 0, 0, 0, 0, 0]]) >>> # The morphological gradient is computed as the difference >>> # between a dilation and an erosion >>> ndimage.grey_dilation(a, size=(3,3)) -\\ ... ndimage.grey_erosion(a, size=(3,3)) array([[0, 0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1, 0], [0, 1, 1, 0, 1, 1, 0], [0, 1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1, 0], [0, 0, 0, 0, 0, 0, 0]]) >>> a = np.zeros((7,7), dtype=int) >>> a[2:5, 2:5] = 1 >>> a[4,4] = 2; a[2,3] = 3 >>> a array([[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 3, 1, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 1, 1, 2, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]]) >>> ndimage.morphological_gradient(a, size=(3,3)) array([[0, 0, 0, 0, 0, 0, 0], [0, 1, 3, 3, 3, 1, 0], [0, 1, 3, 3, 3, 1, 0], [0, 1, 3, 2, 3, 2, 0], [0, 1, 1, 2, 2, 2, 0], [0, 1, 1, 2, 2, 2, 0], [0, 0, 0, 0, 0, 0, 0]]) """ tmp = grey_dilation(input, size, footprint, structure, None, mode, cval, origin) if isinstance(output, numpy.ndarray): grey_erosion(input, size, footprint, structure, output, mode, cval, origin) return numpy.subtract(tmp, output, output) else: return (tmp - grey_erosion(input, size, footprint, structure, None, mode, cval, origin)) def morphological_laplace(input, size=None, footprint=None, structure=None, output=None, mode="reflect", cval=0.0, origin=0): """ Multidimensional morphological laplace. Parameters ---------- input : array_like Input. size : int or sequence of ints, optional See `structure`. footprint : bool or ndarray, optional See `structure`. structure : structure, optional Either `size`, `footprint`, or the `structure` must be provided. output : ndarray, optional An output array can optionally be provided. mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional The mode parameter determines how the array borders are handled. For 'constant' mode, values beyond borders are set to be `cval`. Default is 'reflect'. cval : scalar, optional Value to fill past edges of input if mode is 'constant'. Default is 0.0 origin : origin, optional The origin parameter controls the placement of the filter. Returns ------- morphological_laplace : ndarray Output """ tmp1 = grey_dilation(input, size, footprint, structure, None, mode, cval, origin) if isinstance(output, numpy.ndarray): grey_erosion(input, size, footprint, structure, output, mode, cval, origin) numpy.add(tmp1, output, output) numpy.subtract(output, input, output) return numpy.subtract(output, input, output) else: tmp2 = grey_erosion(input, size, footprint, structure, None, mode, cval, origin) numpy.add(tmp1, tmp2, tmp2) numpy.subtract(tmp2, input, tmp2) numpy.subtract(tmp2, input, tmp2) return tmp2 def white_tophat(input, size=None, footprint=None, structure=None, output=None, mode="reflect", cval=0.0, origin=0): """ Multidimensional white tophat filter. Parameters ---------- input : array_like Input. size : tuple of ints Shape of a flat and full structuring element used for the filter. Optional if `footprint` or `structure` is provided. footprint : array of ints, optional Positions of elements of a flat structuring element used for the white tophat filter. structure : array of ints, optional Structuring element used for the filter. `structure` may be a non-flat structuring element. output : array, optional An array used for storing the output of the filter may be provided. mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional The `mode` parameter determines how the array borders are handled, where `cval` is the value when mode is equal to 'constant'. Default is 'reflect' cval : scalar, optional Value to fill past edges of input if `mode` is 'constant'. Default is 0.0. origin : scalar, optional The `origin` parameter controls the placement of the filter. Default is 0. Returns ------- output : ndarray Result of the filter of `input` with `structure`. See Also -------- black_tophat Examples -------- Subtract gray background from a bright peak. >>> from scipy.ndimage import generate_binary_structure, white_tophat >>> import numpy as np >>> square = generate_binary_structure(rank=2, connectivity=3) >>> bright_on_gray = np.array([[2, 3, 3, 3, 2], ... [3, 4, 5, 4, 3], ... [3, 5, 9, 5, 3], ... [3, 4, 5, 4, 3], ... [2, 3, 3, 3, 2]]) >>> white_tophat(input=bright_on_gray, structure=square) array([[0, 0, 0, 0, 0], [0, 0, 1, 0, 0], [0, 1, 5, 1, 0], [0, 0, 1, 0, 0], [0, 0, 0, 0, 0]]) """ if (size is not None) and (footprint is not None): warnings.warn("ignoring size because footprint is set", UserWarning, stacklevel=2) tmp = grey_erosion(input, size, footprint, structure, None, mode, cval, origin) tmp = grey_dilation(tmp, size, footprint, structure, output, mode, cval, origin) if tmp is None: tmp = output if input.dtype == numpy.bool_ and tmp.dtype == numpy.bool_: numpy.bitwise_xor(input, tmp, out=tmp) else: numpy.subtract(input, tmp, out=tmp) return tmp def black_tophat(input, size=None, footprint=None, structure=None, output=None, mode="reflect", cval=0.0, origin=0): """ Multidimensional black tophat filter. Parameters ---------- input : array_like Input. size : tuple of ints, optional Shape of a flat and full structuring element used for the filter. Optional if `footprint` or `structure` is provided. footprint : array of ints, optional Positions of non-infinite elements of a flat structuring element used for the black tophat filter. structure : array of ints, optional Structuring element used for the filter. `structure` may be a non-flat structuring element. output : array, optional An array used for storing the output of the filter may be provided. mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional The `mode` parameter determines how the array borders are handled, where `cval` is the value when mode is equal to 'constant'. Default is 'reflect' cval : scalar, optional Value to fill past edges of input if `mode` is 'constant'. Default is 0.0. origin : scalar, optional The `origin` parameter controls the placement of the filter. Default 0 Returns ------- black_tophat : ndarray Result of the filter of `input` with `structure`. See Also -------- white_tophat, grey_opening, grey_closing Examples -------- Change dark peak to bright peak and subtract background. >>> from scipy.ndimage import generate_binary_structure, black_tophat >>> import numpy as np >>> square = generate_binary_structure(rank=2, connectivity=3) >>> dark_on_gray = np.array([[7, 6, 6, 6, 7], ... [6, 5, 4, 5, 6], ... [6, 4, 0, 4, 6], ... [6, 5, 4, 5, 6], ... [7, 6, 6, 6, 7]]) >>> black_tophat(input=dark_on_gray, structure=square) array([[0, 0, 0, 0, 0], [0, 0, 1, 0, 0], [0, 1, 5, 1, 0], [0, 0, 1, 0, 0], [0, 0, 0, 0, 0]]) """ if (size is not None) and (footprint is not None): warnings.warn("ignoring size because footprint is set", UserWarning, stacklevel=2) tmp = grey_dilation(input, size, footprint, structure, None, mode, cval, origin) tmp = grey_erosion(tmp, size, footprint, structure, output, mode, cval, origin) if tmp is None: tmp = output if input.dtype == numpy.bool_ and tmp.dtype == numpy.bool_: numpy.bitwise_xor(tmp, input, out=tmp) else: numpy.subtract(tmp, input, out=tmp) return tmp def distance_transform_bf(input, metric="euclidean", sampling=None, return_distances=True, return_indices=False, distances=None, indices=None): """ Distance transform function by a brute force algorithm. This function calculates the distance transform of the `input`, by replacing each foreground (non-zero) element, with its shortest distance to the background (any zero-valued element). In addition to the distance transform, the feature transform can be calculated. In this case the index of the closest background element to each foreground element is returned in a separate array. Parameters ---------- input : array_like Input metric : {'euclidean', 'taxicab', 'chessboard'}, optional 'cityblock' and 'manhattan' are also valid, and map to 'taxicab'. The default is 'euclidean'. sampling : float, or sequence of float, optional This parameter is only used when `metric` is 'euclidean'. Spacing of elements along each dimension. If a sequence, must be of length equal to the input rank; if a single number, this is used for all axes. If not specified, a grid spacing of unity is implied. return_distances : bool, optional Whether to calculate the distance transform. Default is True. return_indices : bool, optional Whether to calculate the feature transform. Default is False. distances : ndarray, optional An output array to store the calculated distance transform, instead of returning it. `return_distances` must be True. It must be the same shape as `input`, and of type float64 if `metric` is 'euclidean', uint32 otherwise. indices : int32 ndarray, optional An output array to store the calculated feature transform, instead of returning it. `return_indicies` must be True. Its shape must be `(input.ndim,) + input.shape`. Returns ------- distances : ndarray, optional The calculated distance transform. Returned only when `return_distances` is True and `distances` is not supplied. It will have the same shape as the input array. indices : int32 ndarray, optional The calculated feature transform. It has an input-shaped array for each dimension of the input. See distance_transform_edt documentation for an example. Returned only when `return_indices` is True and `indices` is not supplied. See Also -------- distance_transform_cdt : Faster distance transform for taxicab and chessboard metrics distance_transform_edt : Faster distance transform for euclidean metric Notes ----- This function employs a slow brute force algorithm. See also the function `distance_transform_cdt` for more efficient taxicab [1]_ and chessboard algorithms [2]_. References ---------- .. [1] Taxicab distance. Wikipedia, 2023. https://en.wikipedia.org/wiki/Taxicab_geometry .. [2] Chessboard distance. Wikipedia, 2023. https://en.wikipedia.org/wiki/Chebyshev_distance Examples -------- Import the necessary modules. >>> import numpy as np >>> from scipy.ndimage import distance_transform_bf >>> import matplotlib.pyplot as plt >>> from mpl_toolkits.axes_grid1 import ImageGrid First, we create a toy binary image. >>> def add_circle(center_x, center_y, radius, image, fillvalue=1): ... # fill circular area with 1 ... xx, yy = np.mgrid[:image.shape[0], :image.shape[1]] ... circle = (xx - center_x) ** 2 + (yy - center_y) ** 2 ... circle_shape = np.sqrt(circle) < radius ... image[circle_shape] = fillvalue ... return image >>> image = np.zeros((100, 100), dtype=np.uint8) >>> image[35:65, 20:80] = 1 >>> image = add_circle(28, 65, 10, image) >>> image = add_circle(37, 30, 10, image) >>> image = add_circle(70, 45, 20, image) >>> image = add_circle(45, 80, 10, image) Next, we set up the figure. >>> fig = plt.figure(figsize=(8, 8)) # set up the figure structure >>> grid = ImageGrid(fig, 111, nrows_ncols=(2, 2), axes_pad=(0.4, 0.3), ... label_mode="1", share_all=True, ... cbar_location="right", cbar_mode="each", ... cbar_size="7%", cbar_pad="2%") >>> for ax in grid: ... ax.axis('off') # remove axes from images The top left image is the original binary image. >>> binary_image = grid[0].imshow(image, cmap='gray') >>> cbar_binary_image = grid.cbar_axes[0].colorbar(binary_image) >>> cbar_binary_image.set_ticks([0, 1]) >>> grid[0].set_title("Binary image: foreground in white") The distance transform calculates the distance between foreground pixels and the image background according to a distance metric. Available metrics in `distance_transform_bf` are: ``euclidean`` (default), ``taxicab`` and ``chessboard``. The top right image contains the distance transform based on the ``euclidean`` metric. >>> distance_transform_euclidean = distance_transform_bf(image) >>> euclidean_transform = grid[1].imshow(distance_transform_euclidean, ... cmap='gray') >>> cbar_euclidean = grid.cbar_axes[1].colorbar(euclidean_transform) >>> colorbar_ticks = [0, 10, 20] >>> cbar_euclidean.set_ticks(colorbar_ticks) >>> grid[1].set_title("Euclidean distance") The lower left image contains the distance transform using the ``taxicab`` metric. >>> distance_transform_taxicab = distance_transform_bf(image, ... metric='taxicab') >>> taxicab_transformation = grid[2].imshow(distance_transform_taxicab, ... cmap='gray') >>> cbar_taxicab = grid.cbar_axes[2].colorbar(taxicab_transformation) >>> cbar_taxicab.set_ticks(colorbar_ticks) >>> grid[2].set_title("Taxicab distance") Finally, the lower right image contains the distance transform using the ``chessboard`` metric. >>> distance_transform_cb = distance_transform_bf(image, ... metric='chessboard') >>> chessboard_transformation = grid[3].imshow(distance_transform_cb, ... cmap='gray') >>> cbar_taxicab = grid.cbar_axes[3].colorbar(chessboard_transformation) >>> cbar_taxicab.set_ticks(colorbar_ticks) >>> grid[3].set_title("Chessboard distance") >>> plt.show() """ ft_inplace = isinstance(indices, numpy.ndarray) dt_inplace = isinstance(distances, numpy.ndarray) _distance_tranform_arg_check( dt_inplace, ft_inplace, return_distances, return_indices ) tmp1 = numpy.asarray(input) != 0 struct = generate_binary_structure(tmp1.ndim, tmp1.ndim) tmp2 = binary_dilation(tmp1, struct) tmp2 = numpy.logical_xor(tmp1, tmp2) tmp1 = tmp1.astype(numpy.int8) - tmp2.astype(numpy.int8) metric = metric.lower() if metric == 'euclidean': metric = 1 elif metric in ['taxicab', 'cityblock', 'manhattan']: metric = 2 elif metric == 'chessboard': metric = 3 else: raise RuntimeError('distance metric not supported') if sampling is not None: sampling = _ni_support._normalize_sequence(sampling, tmp1.ndim) sampling = numpy.asarray(sampling, dtype=numpy.float64) if not sampling.flags.contiguous: sampling = sampling.copy() if return_indices: ft = numpy.zeros(tmp1.shape, dtype=numpy.int32) else: ft = None if return_distances: if distances is None: if metric == 1: dt = numpy.zeros(tmp1.shape, dtype=numpy.float64) else: dt = numpy.zeros(tmp1.shape, dtype=numpy.uint32) else: if distances.shape != tmp1.shape: raise RuntimeError('distances array has wrong shape') if metric == 1: if distances.dtype.type != numpy.float64: raise RuntimeError('distances array must be float64') else: if distances.dtype.type != numpy.uint32: raise RuntimeError('distances array must be uint32') dt = distances else: dt = None _nd_image.distance_transform_bf(tmp1, metric, sampling, dt, ft) if return_indices: if isinstance(indices, numpy.ndarray): if indices.dtype.type != numpy.int32: raise RuntimeError('indices array must be int32') if indices.shape != (tmp1.ndim,) + tmp1.shape: raise RuntimeError('indices array has wrong shape') tmp2 = indices else: tmp2 = numpy.indices(tmp1.shape, dtype=numpy.int32) ft = numpy.ravel(ft) for ii in range(tmp2.shape[0]): rtmp = numpy.ravel(tmp2[ii, ...])[ft] rtmp.shape = tmp1.shape tmp2[ii, ...] = rtmp ft = tmp2 # construct and return the result result = [] if return_distances and not dt_inplace: result.append(dt) if return_indices and not ft_inplace: result.append(ft) if len(result) == 2: return tuple(result) elif len(result) == 1: return result[0] else: return None def distance_transform_cdt(input, metric='chessboard', return_distances=True, return_indices=False, distances=None, indices=None): """ Distance transform for chamfer type of transforms. This function calculates the distance transform of the `input`, by replacing each foreground (non-zero) element, with its shortest distance to the background (any zero-valued element). In addition to the distance transform, the feature transform can be calculated. In this case the index of the closest background element to each foreground element is returned in a separate array. Parameters ---------- input : array_like Input. Values of 0 are treated as background. metric : {'chessboard', 'taxicab'} or array_like, optional The `metric` determines the type of chamfering that is done. If the `metric` is equal to 'taxicab' a structure is generated using `generate_binary_structure` with a squared distance equal to 1. If the `metric` is equal to 'chessboard', a `metric` is generated using `generate_binary_structure` with a squared distance equal to the dimensionality of the array. These choices correspond to the common interpretations of the 'taxicab' and the 'chessboard' distance metrics in two dimensions. A custom metric may be provided, in the form of a matrix where each dimension has a length of three. 'cityblock' and 'manhattan' are also valid, and map to 'taxicab'. The default is 'chessboard'. return_distances : bool, optional Whether to calculate the distance transform. Default is True. return_indices : bool, optional Whether to calculate the feature transform. Default is False. distances : int32 ndarray, optional An output array to store the calculated distance transform, instead of returning it. `return_distances` must be True. It must be the same shape as `input`. indices : int32 ndarray, optional An output array to store the calculated feature transform, instead of returning it. `return_indicies` must be True. Its shape must be `(input.ndim,) + input.shape`. Returns ------- distances : int32 ndarray, optional The calculated distance transform. Returned only when `return_distances` is True, and `distances` is not supplied. It will have the same shape as the input array. indices : int32 ndarray, optional The calculated feature transform. It has an input-shaped array for each dimension of the input. See distance_transform_edt documentation for an example. Returned only when `return_indices` is True, and `indices` is not supplied. See Also -------- distance_transform_edt : Fast distance transform for euclidean metric distance_transform_bf : Distance transform for different metrics using a slower brute force algorithm Examples -------- Import the necessary modules. >>> import numpy as np >>> from scipy.ndimage import distance_transform_cdt >>> import matplotlib.pyplot as plt >>> from mpl_toolkits.axes_grid1 import ImageGrid First, we create a toy binary image. >>> def add_circle(center_x, center_y, radius, image, fillvalue=1): ... # fill circular area with 1 ... xx, yy = np.mgrid[:image.shape[0], :image.shape[1]] ... circle = (xx - center_x) ** 2 + (yy - center_y) ** 2 ... circle_shape = np.sqrt(circle) < radius ... image[circle_shape] = fillvalue ... return image >>> image = np.zeros((100, 100), dtype=np.uint8) >>> image[35:65, 20:80] = 1 >>> image = add_circle(28, 65, 10, image) >>> image = add_circle(37, 30, 10, image) >>> image = add_circle(70, 45, 20, image) >>> image = add_circle(45, 80, 10, image) Next, we set up the figure. >>> fig = plt.figure(figsize=(5, 15)) >>> grid = ImageGrid(fig, 111, nrows_ncols=(3, 1), axes_pad=(0.5, 0.3), ... label_mode="1", share_all=True, ... cbar_location="right", cbar_mode="each", ... cbar_size="7%", cbar_pad="2%") >>> for ax in grid: ... ax.axis('off') >>> top, middle, bottom = grid >>> colorbar_ticks = [0, 10, 20] The top image contains the original binary image. >>> binary_image = top.imshow(image, cmap='gray') >>> cbar_binary_image = top.cax.colorbar(binary_image) >>> cbar_binary_image.set_ticks([0, 1]) >>> top.set_title("Binary image: foreground in white") The middle image contains the distance transform using the ``taxicab`` metric. >>> distance_taxicab = distance_transform_cdt(image, metric="taxicab") >>> taxicab_transform = middle.imshow(distance_taxicab, cmap='gray') >>> cbar_taxicab = middle.cax.colorbar(taxicab_transform) >>> cbar_taxicab.set_ticks(colorbar_ticks) >>> middle.set_title("Taxicab metric") The bottom image contains the distance transform using the ``chessboard`` metric. >>> distance_chessboard = distance_transform_cdt(image, ... metric="chessboard") >>> chessboard_transform = bottom.imshow(distance_chessboard, cmap='gray') >>> cbar_chessboard = bottom.cax.colorbar(chessboard_transform) >>> cbar_chessboard.set_ticks(colorbar_ticks) >>> bottom.set_title("Chessboard metric") >>> plt.tight_layout() >>> plt.show() """ ft_inplace = isinstance(indices, numpy.ndarray) dt_inplace = isinstance(distances, numpy.ndarray) _distance_tranform_arg_check( dt_inplace, ft_inplace, return_distances, return_indices ) input = numpy.asarray(input) if isinstance(metric, str): if metric in ['taxicab', 'cityblock', 'manhattan']: rank = input.ndim metric = generate_binary_structure(rank, 1) elif metric == 'chessboard': rank = input.ndim metric = generate_binary_structure(rank, rank) else: raise ValueError('invalid metric provided') else: try: metric = numpy.asarray(metric) except Exception as e: raise ValueError('invalid metric provided') from e for s in metric.shape: if s != 3: raise ValueError('metric sizes must be equal to 3') if not metric.flags.contiguous: metric = metric.copy() if dt_inplace: if distances.dtype.type != numpy.int32: raise ValueError('distances must be of int32 type') if distances.shape != input.shape: raise ValueError('distances has wrong shape') dt = distances dt[...] = numpy.where(input, -1, 0).astype(numpy.int32) else: dt = numpy.where(input, -1, 0).astype(numpy.int32) rank = dt.ndim if return_indices: sz = numpy.prod(dt.shape, axis=0) ft = numpy.arange(sz, dtype=numpy.int32) ft.shape = dt.shape else: ft = None _nd_image.distance_transform_op(metric, dt, ft) dt = dt[tuple([slice(None, None, -1)] * rank)] if return_indices: ft = ft[tuple([slice(None, None, -1)] * rank)] _nd_image.distance_transform_op(metric, dt, ft) dt = dt[tuple([slice(None, None, -1)] * rank)] if return_indices: ft = ft[tuple([slice(None, None, -1)] * rank)] ft = numpy.ravel(ft) if ft_inplace: if indices.dtype.type != numpy.int32: raise ValueError('indices array must be int32') if indices.shape != (dt.ndim,) + dt.shape: raise ValueError('indices array has wrong shape') tmp = indices else: tmp = numpy.indices(dt.shape, dtype=numpy.int32) for ii in range(tmp.shape[0]): rtmp = numpy.ravel(tmp[ii, ...])[ft] rtmp.shape = dt.shape tmp[ii, ...] = rtmp ft = tmp # construct and return the result result = [] if return_distances and not dt_inplace: result.append(dt) if return_indices and not ft_inplace: result.append(ft) if len(result) == 2: return tuple(result) elif len(result) == 1: return result[0] else: return None def distance_transform_edt(input, sampling=None, return_distances=True, return_indices=False, distances=None, indices=None): """ Exact Euclidean distance transform. This function calculates the distance transform of the `input`, by replacing each foreground (non-zero) element, with its shortest distance to the background (any zero-valued element). In addition to the distance transform, the feature transform can be calculated. In this case the index of the closest background element to each foreground element is returned in a separate array. Parameters ---------- input : array_like Input data to transform. Can be any type but will be converted into binary: 1 wherever input equates to True, 0 elsewhere. sampling : float, or sequence of float, optional Spacing of elements along each dimension. If a sequence, must be of length equal to the input rank; if a single number, this is used for all axes. If not specified, a grid spacing of unity is implied. return_distances : bool, optional Whether to calculate the distance transform. Default is True. return_indices : bool, optional Whether to calculate the feature transform. Default is False. distances : float64 ndarray, optional An output array to store the calculated distance transform, instead of returning it. `return_distances` must be True. It must be the same shape as `input`. indices : int32 ndarray, optional An output array to store the calculated feature transform, instead of returning it. `return_indicies` must be True. Its shape must be `(input.ndim,) + input.shape`. Returns ------- distances : float64 ndarray, optional The calculated distance transform. Returned only when `return_distances` is True and `distances` is not supplied. It will have the same shape as the input array. indices : int32 ndarray, optional The calculated feature transform. It has an input-shaped array for each dimension of the input. See example below. Returned only when `return_indices` is True and `indices` is not supplied. Notes ----- The Euclidean distance transform gives values of the Euclidean distance:: n y_i = sqrt(sum (x[i]-b[i])**2) i where b[i] is the background point (value 0) with the smallest Euclidean distance to input points x[i], and n is the number of dimensions. Examples -------- >>> from scipy import ndimage >>> import numpy as np >>> a = np.array(([0,1,1,1,1], ... [0,0,1,1,1], ... [0,1,1,1,1], ... [0,1,1,1,0], ... [0,1,1,0,0])) >>> ndimage.distance_transform_edt(a) array([[ 0. , 1. , 1.4142, 2.2361, 3. ], [ 0. , 0. , 1. , 2. , 2. ], [ 0. , 1. , 1.4142, 1.4142, 1. ], [ 0. , 1. , 1.4142, 1. , 0. ], [ 0. , 1. , 1. , 0. , 0. ]]) With a sampling of 2 units along x, 1 along y: >>> ndimage.distance_transform_edt(a, sampling=[2,1]) array([[ 0. , 1. , 2. , 2.8284, 3.6056], [ 0. , 0. , 1. , 2. , 3. ], [ 0. , 1. , 2. , 2.2361, 2. ], [ 0. , 1. , 2. , 1. , 0. ], [ 0. , 1. , 1. , 0. , 0. ]]) Asking for indices as well: >>> edt, inds = ndimage.distance_transform_edt(a, return_indices=True) >>> inds array([[[0, 0, 1, 1, 3], [1, 1, 1, 1, 3], [2, 2, 1, 3, 3], [3, 3, 4, 4, 3], [4, 4, 4, 4, 4]], [[0, 0, 1, 1, 4], [0, 1, 1, 1, 4], [0, 0, 1, 4, 4], [0, 0, 3, 3, 4], [0, 0, 3, 3, 4]]]) With arrays provided for inplace outputs: >>> indices = np.zeros(((np.ndim(a),) + a.shape), dtype=np.int32) >>> ndimage.distance_transform_edt(a, return_indices=True, indices=indices) array([[ 0. , 1. , 1.4142, 2.2361, 3. ], [ 0. , 0. , 1. , 2. , 2. ], [ 0. , 1. , 1.4142, 1.4142, 1. ], [ 0. , 1. , 1.4142, 1. , 0. ], [ 0. , 1. , 1. , 0. , 0. ]]) >>> indices array([[[0, 0, 1, 1, 3], [1, 1, 1, 1, 3], [2, 2, 1, 3, 3], [3, 3, 4, 4, 3], [4, 4, 4, 4, 4]], [[0, 0, 1, 1, 4], [0, 1, 1, 1, 4], [0, 0, 1, 4, 4], [0, 0, 3, 3, 4], [0, 0, 3, 3, 4]]]) """ ft_inplace = isinstance(indices, numpy.ndarray) dt_inplace = isinstance(distances, numpy.ndarray) _distance_tranform_arg_check( dt_inplace, ft_inplace, return_distances, return_indices ) # calculate the feature transform input = numpy.atleast_1d(numpy.where(input, 1, 0).astype(numpy.int8)) if sampling is not None: sampling = _ni_support._normalize_sequence(sampling, input.ndim) sampling = numpy.asarray(sampling, dtype=numpy.float64) if not sampling.flags.contiguous: sampling = sampling.copy() if ft_inplace: ft = indices if ft.shape != (input.ndim,) + input.shape: raise RuntimeError('indices array has wrong shape') if ft.dtype.type != numpy.int32: raise RuntimeError('indices array must be int32') else: ft = numpy.zeros((input.ndim,) + input.shape, dtype=numpy.int32) _nd_image.euclidean_feature_transform(input, sampling, ft) # if requested, calculate the distance transform if return_distances: dt = ft - numpy.indices(input.shape, dtype=ft.dtype) dt = dt.astype(numpy.float64) if sampling is not None: for ii in range(len(sampling)): dt[ii, ...] *= sampling[ii] numpy.multiply(dt, dt, dt) if dt_inplace: dt = numpy.add.reduce(dt, axis=0) if distances.shape != dt.shape: raise RuntimeError('distances array has wrong shape') if distances.dtype.type != numpy.float64: raise RuntimeError('distances array must be float64') numpy.sqrt(dt, distances) else: dt = numpy.add.reduce(dt, axis=0) dt = numpy.sqrt(dt) # construct and return the result result = [] if return_distances and not dt_inplace: result.append(dt) if return_indices and not ft_inplace: result.append(ft) if len(result) == 2: return tuple(result) elif len(result) == 1: return result[0] else: return None def _distance_tranform_arg_check(distances_out, indices_out, return_distances, return_indices): """Raise a RuntimeError if the arguments are invalid""" error_msgs = [] if (not return_distances) and (not return_indices): error_msgs.append( 'at least one of return_distances/return_indices must be True') if distances_out and not return_distances: error_msgs.append( 'return_distances must be True if distances is supplied' ) if indices_out and not return_indices: error_msgs.append('return_indices must be True if indices is supplied') if error_msgs: raise RuntimeError(', '.join(error_msgs))
94,837
36.678983
90
py
scipy
scipy-main/scipy/ndimage/fourier.py
# This file is not meant for public use and will be removed in SciPy v2.0.0. # Use the `scipy.ndimage` namespace for importing the functions # included below. import warnings from . import _fourier __all__ = [ # noqa: F822 'fourier_gaussian', 'fourier_uniform', 'fourier_ellipsoid', 'fourier_shift' ] def __dir__(): return __all__ def __getattr__(name): if name not in __all__: raise AttributeError( "scipy.ndimage.fourier is deprecated and has no attribute " f"{name}. Try looking in scipy.ndimage instead.") warnings.warn(f"Please use `{name}` from the `scipy.ndimage` namespace, " "the `scipy.ndimage.fourier` namespace is deprecated.", category=DeprecationWarning, stacklevel=2) return getattr(_fourier, name)
816
26.233333
77
py
scipy
scipy-main/scipy/ndimage/_ni_support.py
# Copyright (C) 2003-2005 Peter J. Verveer # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # # 3. The name of the author may not be used to endorse or promote # products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS # OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE # GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from collections.abc import Iterable import operator import warnings import numpy def _extend_mode_to_code(mode): """Convert an extension mode to the corresponding integer code. """ if mode == 'nearest': return 0 elif mode == 'wrap': return 1 elif mode in ['reflect', 'grid-mirror']: return 2 elif mode == 'mirror': return 3 elif mode == 'constant': return 4 elif mode == 'grid-wrap': return 5 elif mode == 'grid-constant': return 6 else: raise RuntimeError('boundary mode not supported') def _normalize_sequence(input, rank): """If input is a scalar, create a sequence of length equal to the rank by duplicating the input. If input is a sequence, check if its length is equal to the length of array. """ is_str = isinstance(input, str) if not is_str and isinstance(input, Iterable): normalized = list(input) if len(normalized) != rank: err = "sequence argument must have length equal to input rank" raise RuntimeError(err) else: normalized = [input] * rank return normalized def _get_output(output, input, shape=None, complex_output=False): if shape is None: shape = input.shape if output is None: if not complex_output: output = numpy.zeros(shape, dtype=input.dtype.name) else: complex_type = numpy.promote_types(input.dtype, numpy.complex64) output = numpy.zeros(shape, dtype=complex_type) elif isinstance(output, (type, numpy.dtype)): # Classes (like `np.float32`) and dtypes are interpreted as dtype if complex_output and numpy.dtype(output).kind != 'c': warnings.warn("promoting specified output dtype to complex") output = numpy.promote_types(output, numpy.complex64) output = numpy.zeros(shape, dtype=output) elif isinstance(output, str): # testsuite only appears to cover # f->np.float32 here f_dict = {"f": numpy.float32, "d": numpy.float64, "F": numpy.complex64, "D": numpy.complex128} output = f_dict[output] if complex_output and numpy.dtype(output).kind != 'c': raise RuntimeError("output must have complex dtype") output = numpy.zeros(shape, dtype=output) elif output.shape != shape: raise RuntimeError("output shape not correct") elif complex_output and output.dtype.kind != 'c': raise RuntimeError("output must have complex dtype") return output def _check_axes(axes, ndim): if axes is None: return tuple(range(ndim)) elif numpy.isscalar(axes): axes = (operator.index(axes),) elif isinstance(axes, Iterable): for ax in axes: axes = tuple(operator.index(ax) for ax in axes) if ax < -ndim or ax > ndim - 1: raise ValueError(f"specified axis: {ax} is out of range") axes = tuple(ax % ndim if ax < 0 else ax for ax in axes) else: message = "axes must be an integer, iterable of integers, or None" raise ValueError(message) if len(tuple(set(axes))) != len(axes): raise ValueError("axes must be unique") return axes
4,747
37.290323
76
py
scipy
scipy-main/scipy/ndimage/interpolation.py
# This file is not meant for public use and will be removed in SciPy v2.0.0. # Use the `scipy.ndimage` namespace for importing the functions # included below. import warnings from . import _interpolation __all__ = [ # noqa: F822 'spline_filter1d', 'spline_filter', 'geometric_transform', 'map_coordinates', 'affine_transform', 'shift', 'zoom', 'rotate', 'docfiller' ] def __dir__(): return __all__ def __getattr__(name): if name not in __all__: raise AttributeError( "scipy.ndimage.interpolation is deprecated and has no attribute " f"{name}. Try looking in scipy.ndimage instead.") warnings.warn(f"Please use `{name}` from the `scipy.ndimage` namespace, " "the `scipy.ndimage.interpolation` namespace is deprecated.", category=DeprecationWarning, stacklevel=2) return getattr(_interpolation, name)
909
27.4375
79
py
scipy
scipy-main/scipy/ndimage/_filters.py
# Copyright (C) 2003-2005 Peter J. Verveer # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # # 3. The name of the author may not be used to endorse or promote # products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS # OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE # GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from collections.abc import Iterable import numbers import warnings import numpy import operator from scipy._lib._util import normalize_axis_index from . import _ni_support from . import _nd_image from . import _ni_docstrings __all__ = ['correlate1d', 'convolve1d', 'gaussian_filter1d', 'gaussian_filter', 'prewitt', 'sobel', 'generic_laplace', 'laplace', 'gaussian_laplace', 'generic_gradient_magnitude', 'gaussian_gradient_magnitude', 'correlate', 'convolve', 'uniform_filter1d', 'uniform_filter', 'minimum_filter1d', 'maximum_filter1d', 'minimum_filter', 'maximum_filter', 'rank_filter', 'median_filter', 'percentile_filter', 'generic_filter1d', 'generic_filter'] def _invalid_origin(origin, lenw): return (origin < -(lenw // 2)) or (origin > (lenw - 1) // 2) def _complex_via_real_components(func, input, weights, output, cval, **kwargs): """Complex convolution via a linear combination of real convolutions.""" complex_input = input.dtype.kind == 'c' complex_weights = weights.dtype.kind == 'c' if complex_input and complex_weights: # real component of the output func(input.real, weights.real, output=output.real, cval=numpy.real(cval), **kwargs) output.real -= func(input.imag, weights.imag, output=None, cval=numpy.imag(cval), **kwargs) # imaginary component of the output func(input.real, weights.imag, output=output.imag, cval=numpy.real(cval), **kwargs) output.imag += func(input.imag, weights.real, output=None, cval=numpy.imag(cval), **kwargs) elif complex_input: func(input.real, weights, output=output.real, cval=numpy.real(cval), **kwargs) func(input.imag, weights, output=output.imag, cval=numpy.imag(cval), **kwargs) else: if numpy.iscomplexobj(cval): raise ValueError("Cannot provide a complex-valued cval when the " "input is real.") func(input, weights.real, output=output.real, cval=cval, **kwargs) func(input, weights.imag, output=output.imag, cval=cval, **kwargs) return output @_ni_docstrings.docfiller def correlate1d(input, weights, axis=-1, output=None, mode="reflect", cval=0.0, origin=0): """Calculate a 1-D correlation along the given axis. The lines of the array along the given axis are correlated with the given weights. Parameters ---------- %(input)s weights : array 1-D sequence of numbers. %(axis)s %(output)s %(mode_reflect)s %(cval)s %(origin)s Returns ------- result : ndarray Correlation result. Has the same shape as `input`. Examples -------- >>> from scipy.ndimage import correlate1d >>> correlate1d([2, 8, 0, 4, 1, 9, 9, 0], weights=[1, 3]) array([ 8, 26, 8, 12, 7, 28, 36, 9]) """ input = numpy.asarray(input) weights = numpy.asarray(weights) complex_input = input.dtype.kind == 'c' complex_weights = weights.dtype.kind == 'c' if complex_input or complex_weights: if complex_weights: weights = weights.conj() weights = weights.astype(numpy.complex128, copy=False) kwargs = dict(axis=axis, mode=mode, origin=origin) output = _ni_support._get_output(output, input, complex_output=True) return _complex_via_real_components(correlate1d, input, weights, output, cval, **kwargs) output = _ni_support._get_output(output, input) weights = numpy.asarray(weights, dtype=numpy.float64) if weights.ndim != 1 or weights.shape[0] < 1: raise RuntimeError('no filter weights given') if not weights.flags.contiguous: weights = weights.copy() axis = normalize_axis_index(axis, input.ndim) if _invalid_origin(origin, len(weights)): raise ValueError('Invalid origin; origin must satisfy ' '-(len(weights) // 2) <= origin <= ' '(len(weights)-1) // 2') mode = _ni_support._extend_mode_to_code(mode) _nd_image.correlate1d(input, weights, axis, output, mode, cval, origin) return output @_ni_docstrings.docfiller def convolve1d(input, weights, axis=-1, output=None, mode="reflect", cval=0.0, origin=0): """Calculate a 1-D convolution along the given axis. The lines of the array along the given axis are convolved with the given weights. Parameters ---------- %(input)s weights : ndarray 1-D sequence of numbers. %(axis)s %(output)s %(mode_reflect)s %(cval)s %(origin)s Returns ------- convolve1d : ndarray Convolved array with same shape as input Examples -------- >>> from scipy.ndimage import convolve1d >>> convolve1d([2, 8, 0, 4, 1, 9, 9, 0], weights=[1, 3]) array([14, 24, 4, 13, 12, 36, 27, 0]) """ weights = weights[::-1] origin = -origin if not len(weights) & 1: origin -= 1 weights = numpy.asarray(weights) if weights.dtype.kind == 'c': # pre-conjugate here to counteract the conjugation in correlate1d weights = weights.conj() return correlate1d(input, weights, axis, output, mode, cval, origin) def _gaussian_kernel1d(sigma, order, radius): """ Computes a 1-D Gaussian convolution kernel. """ if order < 0: raise ValueError('order must be non-negative') exponent_range = numpy.arange(order + 1) sigma2 = sigma * sigma x = numpy.arange(-radius, radius+1) phi_x = numpy.exp(-0.5 / sigma2 * x ** 2) phi_x = phi_x / phi_x.sum() if order == 0: return phi_x else: # f(x) = q(x) * phi(x) = q(x) * exp(p(x)) # f'(x) = (q'(x) + q(x) * p'(x)) * phi(x) # p'(x) = -1 / sigma ** 2 # Implement q'(x) + q(x) * p'(x) as a matrix operator and apply to the # coefficients of q(x) q = numpy.zeros(order + 1) q[0] = 1 D = numpy.diag(exponent_range[1:], 1) # D @ q(x) = q'(x) P = numpy.diag(numpy.ones(order)/-sigma2, -1) # P @ q(x) = q(x) * p'(x) Q_deriv = D + P for _ in range(order): q = Q_deriv.dot(q) q = (x[:, None] ** exponent_range).dot(q) return q * phi_x @_ni_docstrings.docfiller def gaussian_filter1d(input, sigma, axis=-1, order=0, output=None, mode="reflect", cval=0.0, truncate=4.0, *, radius=None): """1-D Gaussian filter. Parameters ---------- %(input)s sigma : scalar standard deviation for Gaussian kernel %(axis)s order : int, optional An order of 0 corresponds to convolution with a Gaussian kernel. A positive order corresponds to convolution with that derivative of a Gaussian. %(output)s %(mode_reflect)s %(cval)s truncate : float, optional Truncate the filter at this many standard deviations. Default is 4.0. radius : None or int, optional Radius of the Gaussian kernel. If specified, the size of the kernel will be ``2*radius + 1``, and `truncate` is ignored. Default is None. Returns ------- gaussian_filter1d : ndarray Notes ----- The Gaussian kernel will have size ``2*radius + 1`` along each axis. If `radius` is None, a default ``radius = round(truncate * sigma)`` will be used. Examples -------- >>> from scipy.ndimage import gaussian_filter1d >>> import numpy as np >>> gaussian_filter1d([1.0, 2.0, 3.0, 4.0, 5.0], 1) array([ 1.42704095, 2.06782203, 3. , 3.93217797, 4.57295905]) >>> gaussian_filter1d([1.0, 2.0, 3.0, 4.0, 5.0], 4) array([ 2.91948343, 2.95023502, 3. , 3.04976498, 3.08051657]) >>> import matplotlib.pyplot as plt >>> rng = np.random.default_rng() >>> x = rng.standard_normal(101).cumsum() >>> y3 = gaussian_filter1d(x, 3) >>> y6 = gaussian_filter1d(x, 6) >>> plt.plot(x, 'k', label='original data') >>> plt.plot(y3, '--', label='filtered, sigma=3') >>> plt.plot(y6, ':', label='filtered, sigma=6') >>> plt.legend() >>> plt.grid() >>> plt.show() """ sd = float(sigma) # make the radius of the filter equal to truncate standard deviations lw = int(truncate * sd + 0.5) if radius is not None: lw = radius if not isinstance(lw, numbers.Integral) or lw < 0: raise ValueError('Radius must be a nonnegative integer.') # Since we are calling correlate, not convolve, revert the kernel weights = _gaussian_kernel1d(sigma, order, lw)[::-1] return correlate1d(input, weights, axis, output, mode, cval, 0) @_ni_docstrings.docfiller def gaussian_filter(input, sigma, order=0, output=None, mode="reflect", cval=0.0, truncate=4.0, *, radius=None, axes=None): """Multidimensional Gaussian filter. Parameters ---------- %(input)s sigma : scalar or sequence of scalars Standard deviation for Gaussian kernel. The standard deviations of the Gaussian filter are given for each axis as a sequence, or as a single number, in which case it is equal for all axes. order : int or sequence of ints, optional The order of the filter along each axis is given as a sequence of integers, or as a single number. An order of 0 corresponds to convolution with a Gaussian kernel. A positive order corresponds to convolution with that derivative of a Gaussian. %(output)s %(mode_multiple)s %(cval)s truncate : float, optional Truncate the filter at this many standard deviations. Default is 4.0. radius : None or int or sequence of ints, optional Radius of the Gaussian kernel. The radius are given for each axis as a sequence, or as a single number, in which case it is equal for all axes. If specified, the size of the kernel along each axis will be ``2*radius + 1``, and `truncate` is ignored. Default is None. axes : tuple of int or None, optional If None, `input` is filtered along all axes. Otherwise, `input` is filtered along the specified axes. When `axes` is specified, any tuples used for `sigma`, `order`, `mode` and/or `radius` must match the length of `axes`. The ith entry in any of these tuples corresponds to the ith entry in `axes`. Returns ------- gaussian_filter : ndarray Returned array of same shape as `input`. Notes ----- The multidimensional filter is implemented as a sequence of 1-D convolution filters. The intermediate arrays are stored in the same data type as the output. Therefore, for output types with a limited precision, the results may be imprecise because intermediate results may be stored with insufficient precision. The Gaussian kernel will have size ``2*radius + 1`` along each axis. If `radius` is None, the default ``radius = round(truncate * sigma)`` will be used. Examples -------- >>> from scipy.ndimage import gaussian_filter >>> import numpy as np >>> a = np.arange(50, step=2).reshape((5,5)) >>> a array([[ 0, 2, 4, 6, 8], [10, 12, 14, 16, 18], [20, 22, 24, 26, 28], [30, 32, 34, 36, 38], [40, 42, 44, 46, 48]]) >>> gaussian_filter(a, sigma=1) array([[ 4, 6, 8, 9, 11], [10, 12, 14, 15, 17], [20, 22, 24, 25, 27], [29, 31, 33, 34, 36], [35, 37, 39, 40, 42]]) >>> from scipy import datasets >>> import matplotlib.pyplot as plt >>> fig = plt.figure() >>> plt.gray() # show the filtered result in grayscale >>> ax1 = fig.add_subplot(121) # left side >>> ax2 = fig.add_subplot(122) # right side >>> ascent = datasets.ascent() >>> result = gaussian_filter(ascent, sigma=5) >>> ax1.imshow(ascent) >>> ax2.imshow(result) >>> plt.show() """ input = numpy.asarray(input) output = _ni_support._get_output(output, input) axes = _ni_support._check_axes(axes, input.ndim) num_axes = len(axes) orders = _ni_support._normalize_sequence(order, num_axes) sigmas = _ni_support._normalize_sequence(sigma, num_axes) modes = _ni_support._normalize_sequence(mode, num_axes) radiuses = _ni_support._normalize_sequence(radius, num_axes) axes = [(axes[ii], sigmas[ii], orders[ii], modes[ii], radiuses[ii]) for ii in range(num_axes) if sigmas[ii] > 1e-15] if len(axes) > 0: for axis, sigma, order, mode, radius in axes: gaussian_filter1d(input, sigma, axis, order, output, mode, cval, truncate, radius=radius) input = output else: output[...] = input[...] return output @_ni_docstrings.docfiller def prewitt(input, axis=-1, output=None, mode="reflect", cval=0.0): """Calculate a Prewitt filter. Parameters ---------- %(input)s %(axis)s %(output)s %(mode_multiple)s %(cval)s Returns ------- prewitt : ndarray Filtered array. Has the same shape as `input`. See Also -------- sobel: Sobel filter Notes ----- This function computes the one-dimensional Prewitt filter. Horizontal edges are emphasised with the horizontal transform (axis=0), vertical edges with the vertical transform (axis=1), and so on for higher dimensions. These can be combined to give the magnitude. Examples -------- >>> from scipy import ndimage, datasets >>> import matplotlib.pyplot as plt >>> import numpy as np >>> ascent = datasets.ascent() >>> prewitt_h = ndimage.prewitt(ascent, axis=0) >>> prewitt_v = ndimage.prewitt(ascent, axis=1) >>> magnitude = np.sqrt(prewitt_h ** 2 + prewitt_v ** 2) >>> magnitude *= 255 / np.max(magnitude) # Normalization >>> fig, axes = plt.subplots(2, 2, figsize = (8, 8)) >>> plt.gray() >>> axes[0, 0].imshow(ascent) >>> axes[0, 1].imshow(prewitt_h) >>> axes[1, 0].imshow(prewitt_v) >>> axes[1, 1].imshow(magnitude) >>> titles = ["original", "horizontal", "vertical", "magnitude"] >>> for i, ax in enumerate(axes.ravel()): ... ax.set_title(titles[i]) ... ax.axis("off") >>> plt.show() """ input = numpy.asarray(input) axis = normalize_axis_index(axis, input.ndim) output = _ni_support._get_output(output, input) modes = _ni_support._normalize_sequence(mode, input.ndim) correlate1d(input, [-1, 0, 1], axis, output, modes[axis], cval, 0) axes = [ii for ii in range(input.ndim) if ii != axis] for ii in axes: correlate1d(output, [1, 1, 1], ii, output, modes[ii], cval, 0,) return output @_ni_docstrings.docfiller def sobel(input, axis=-1, output=None, mode="reflect", cval=0.0): """Calculate a Sobel filter. Parameters ---------- %(input)s %(axis)s %(output)s %(mode_multiple)s %(cval)s Returns ------- sobel : ndarray Filtered array. Has the same shape as `input`. Notes ----- This function computes the axis-specific Sobel gradient. The horizontal edges can be emphasised with the horizontal trasform (axis=0), the vertical edges with the vertical transform (axis=1) and so on for higher dimensions. These can be combined to give the magnitude. Examples -------- >>> from scipy import ndimage, datasets >>> import matplotlib.pyplot as plt >>> import numpy as np >>> ascent = datasets.ascent().astype('int32') >>> sobel_h = ndimage.sobel(ascent, 0) # horizontal gradient >>> sobel_v = ndimage.sobel(ascent, 1) # vertical gradient >>> magnitude = np.sqrt(sobel_h**2 + sobel_v**2) >>> magnitude *= 255.0 / np.max(magnitude) # normalization >>> fig, axs = plt.subplots(2, 2, figsize=(8, 8)) >>> plt.gray() # show the filtered result in grayscale >>> axs[0, 0].imshow(ascent) >>> axs[0, 1].imshow(sobel_h) >>> axs[1, 0].imshow(sobel_v) >>> axs[1, 1].imshow(magnitude) >>> titles = ["original", "horizontal", "vertical", "magnitude"] >>> for i, ax in enumerate(axs.ravel()): ... ax.set_title(titles[i]) ... ax.axis("off") >>> plt.show() """ input = numpy.asarray(input) axis = normalize_axis_index(axis, input.ndim) output = _ni_support._get_output(output, input) modes = _ni_support._normalize_sequence(mode, input.ndim) correlate1d(input, [-1, 0, 1], axis, output, modes[axis], cval, 0) axes = [ii for ii in range(input.ndim) if ii != axis] for ii in axes: correlate1d(output, [1, 2, 1], ii, output, modes[ii], cval, 0) return output @_ni_docstrings.docfiller def generic_laplace(input, derivative2, output=None, mode="reflect", cval=0.0, extra_arguments=(), extra_keywords=None): """ N-D Laplace filter using a provided second derivative function. Parameters ---------- %(input)s derivative2 : callable Callable with the following signature:: derivative2(input, axis, output, mode, cval, *extra_arguments, **extra_keywords) See `extra_arguments`, `extra_keywords` below. %(output)s %(mode_multiple)s %(cval)s %(extra_keywords)s %(extra_arguments)s Returns ------- generic_laplace : ndarray Filtered array. Has the same shape as `input`. """ if extra_keywords is None: extra_keywords = {} input = numpy.asarray(input) output = _ni_support._get_output(output, input) axes = list(range(input.ndim)) if len(axes) > 0: modes = _ni_support._normalize_sequence(mode, len(axes)) derivative2(input, axes[0], output, modes[0], cval, *extra_arguments, **extra_keywords) for ii in range(1, len(axes)): tmp = derivative2(input, axes[ii], output.dtype, modes[ii], cval, *extra_arguments, **extra_keywords) output += tmp else: output[...] = input[...] return output @_ni_docstrings.docfiller def laplace(input, output=None, mode="reflect", cval=0.0): """N-D Laplace filter based on approximate second derivatives. Parameters ---------- %(input)s %(output)s %(mode_multiple)s %(cval)s Returns ------- laplace : ndarray Filtered array. Has the same shape as `input`. Examples -------- >>> from scipy import ndimage, datasets >>> import matplotlib.pyplot as plt >>> fig = plt.figure() >>> plt.gray() # show the filtered result in grayscale >>> ax1 = fig.add_subplot(121) # left side >>> ax2 = fig.add_subplot(122) # right side >>> ascent = datasets.ascent() >>> result = ndimage.laplace(ascent) >>> ax1.imshow(ascent) >>> ax2.imshow(result) >>> plt.show() """ def derivative2(input, axis, output, mode, cval): return correlate1d(input, [1, -2, 1], axis, output, mode, cval, 0) return generic_laplace(input, derivative2, output, mode, cval) @_ni_docstrings.docfiller def gaussian_laplace(input, sigma, output=None, mode="reflect", cval=0.0, **kwargs): """Multidimensional Laplace filter using Gaussian second derivatives. Parameters ---------- %(input)s sigma : scalar or sequence of scalars The standard deviations of the Gaussian filter are given for each axis as a sequence, or as a single number, in which case it is equal for all axes. %(output)s %(mode_multiple)s %(cval)s Extra keyword arguments will be passed to gaussian_filter(). Returns ------- gaussian_laplace : ndarray Filtered array. Has the same shape as `input`. Examples -------- >>> from scipy import ndimage, datasets >>> import matplotlib.pyplot as plt >>> ascent = datasets.ascent() >>> fig = plt.figure() >>> plt.gray() # show the filtered result in grayscale >>> ax1 = fig.add_subplot(121) # left side >>> ax2 = fig.add_subplot(122) # right side >>> result = ndimage.gaussian_laplace(ascent, sigma=1) >>> ax1.imshow(result) >>> result = ndimage.gaussian_laplace(ascent, sigma=3) >>> ax2.imshow(result) >>> plt.show() """ input = numpy.asarray(input) def derivative2(input, axis, output, mode, cval, sigma, **kwargs): order = [0] * input.ndim order[axis] = 2 return gaussian_filter(input, sigma, order, output, mode, cval, **kwargs) return generic_laplace(input, derivative2, output, mode, cval, extra_arguments=(sigma,), extra_keywords=kwargs) @_ni_docstrings.docfiller def generic_gradient_magnitude(input, derivative, output=None, mode="reflect", cval=0.0, extra_arguments=(), extra_keywords=None): """Gradient magnitude using a provided gradient function. Parameters ---------- %(input)s derivative : callable Callable with the following signature:: derivative(input, axis, output, mode, cval, *extra_arguments, **extra_keywords) See `extra_arguments`, `extra_keywords` below. `derivative` can assume that `input` and `output` are ndarrays. Note that the output from `derivative` is modified inplace; be careful to copy important inputs before returning them. %(output)s %(mode_multiple)s %(cval)s %(extra_keywords)s %(extra_arguments)s Returns ------- generic_gradient_matnitude : ndarray Filtered array. Has the same shape as `input`. """ if extra_keywords is None: extra_keywords = {} input = numpy.asarray(input) output = _ni_support._get_output(output, input) axes = list(range(input.ndim)) if len(axes) > 0: modes = _ni_support._normalize_sequence(mode, len(axes)) derivative(input, axes[0], output, modes[0], cval, *extra_arguments, **extra_keywords) numpy.multiply(output, output, output) for ii in range(1, len(axes)): tmp = derivative(input, axes[ii], output.dtype, modes[ii], cval, *extra_arguments, **extra_keywords) numpy.multiply(tmp, tmp, tmp) output += tmp # This allows the sqrt to work with a different default casting numpy.sqrt(output, output, casting='unsafe') else: output[...] = input[...] return output @_ni_docstrings.docfiller def gaussian_gradient_magnitude(input, sigma, output=None, mode="reflect", cval=0.0, **kwargs): """Multidimensional gradient magnitude using Gaussian derivatives. Parameters ---------- %(input)s sigma : scalar or sequence of scalars The standard deviations of the Gaussian filter are given for each axis as a sequence, or as a single number, in which case it is equal for all axes. %(output)s %(mode_multiple)s %(cval)s Extra keyword arguments will be passed to gaussian_filter(). Returns ------- gaussian_gradient_magnitude : ndarray Filtered array. Has the same shape as `input`. Examples -------- >>> from scipy import ndimage, datasets >>> import matplotlib.pyplot as plt >>> fig = plt.figure() >>> plt.gray() # show the filtered result in grayscale >>> ax1 = fig.add_subplot(121) # left side >>> ax2 = fig.add_subplot(122) # right side >>> ascent = datasets.ascent() >>> result = ndimage.gaussian_gradient_magnitude(ascent, sigma=5) >>> ax1.imshow(ascent) >>> ax2.imshow(result) >>> plt.show() """ input = numpy.asarray(input) def derivative(input, axis, output, mode, cval, sigma, **kwargs): order = [0] * input.ndim order[axis] = 1 return gaussian_filter(input, sigma, order, output, mode, cval, **kwargs) return generic_gradient_magnitude(input, derivative, output, mode, cval, extra_arguments=(sigma,), extra_keywords=kwargs) def _correlate_or_convolve(input, weights, output, mode, cval, origin, convolution): input = numpy.asarray(input) weights = numpy.asarray(weights) complex_input = input.dtype.kind == 'c' complex_weights = weights.dtype.kind == 'c' if complex_input or complex_weights: if complex_weights and not convolution: # As for numpy.correlate, conjugate weights rather than input. weights = weights.conj() kwargs = dict( mode=mode, origin=origin, convolution=convolution ) output = _ni_support._get_output(output, input, complex_output=True) return _complex_via_real_components(_correlate_or_convolve, input, weights, output, cval, **kwargs) origins = _ni_support._normalize_sequence(origin, input.ndim) weights = numpy.asarray(weights, dtype=numpy.float64) wshape = [ii for ii in weights.shape if ii > 0] if len(wshape) != input.ndim: raise RuntimeError('filter weights array has incorrect shape.') if convolution: weights = weights[tuple([slice(None, None, -1)] * weights.ndim)] for ii in range(len(origins)): origins[ii] = -origins[ii] if not weights.shape[ii] & 1: origins[ii] -= 1 for origin, lenw in zip(origins, wshape): if _invalid_origin(origin, lenw): raise ValueError('Invalid origin; origin must satisfy ' '-(weights.shape[k] // 2) <= origin[k] <= ' '(weights.shape[k]-1) // 2') if not weights.flags.contiguous: weights = weights.copy() output = _ni_support._get_output(output, input) temp_needed = numpy.may_share_memory(input, output) if temp_needed: # input and output arrays cannot share memory temp = output output = _ni_support._get_output(output.dtype, input) if not isinstance(mode, str) and isinstance(mode, Iterable): raise RuntimeError("A sequence of modes is not supported") mode = _ni_support._extend_mode_to_code(mode) _nd_image.correlate(input, weights, output, mode, cval, origins) if temp_needed: temp[...] = output output = temp return output @_ni_docstrings.docfiller def correlate(input, weights, output=None, mode='reflect', cval=0.0, origin=0): """ Multidimensional correlation. The array is correlated with the given kernel. Parameters ---------- %(input)s weights : ndarray array of weights, same number of dimensions as input %(output)s %(mode_reflect)s %(cval)s %(origin_multiple)s Returns ------- result : ndarray The result of correlation of `input` with `weights`. See Also -------- convolve : Convolve an image with a kernel. Examples -------- Correlation is the process of moving a filter mask often referred to as kernel over the image and computing the sum of products at each location. >>> from scipy.ndimage import correlate >>> import numpy as np >>> input_img = np.arange(25).reshape(5,5) >>> print(input_img) [[ 0 1 2 3 4] [ 5 6 7 8 9] [10 11 12 13 14] [15 16 17 18 19] [20 21 22 23 24]] Define a kernel (weights) for correlation. In this example, it is for sum of center and up, down, left and right next elements. >>> weights = [[0, 1, 0], ... [1, 1, 1], ... [0, 1, 0]] We can calculate a correlation result: For example, element ``[2,2]`` is ``7 + 11 + 12 + 13 + 17 = 60``. >>> correlate(input_img, weights) array([[ 6, 10, 15, 20, 24], [ 26, 30, 35, 40, 44], [ 51, 55, 60, 65, 69], [ 76, 80, 85, 90, 94], [ 96, 100, 105, 110, 114]]) """ return _correlate_or_convolve(input, weights, output, mode, cval, origin, False) @_ni_docstrings.docfiller def convolve(input, weights, output=None, mode='reflect', cval=0.0, origin=0): """ Multidimensional convolution. The array is convolved with the given kernel. Parameters ---------- %(input)s weights : array_like Array of weights, same number of dimensions as input %(output)s %(mode_reflect)s cval : scalar, optional Value to fill past edges of input if `mode` is 'constant'. Default is 0.0 origin : int, optional Controls the origin of the input signal, which is where the filter is centered to produce the first element of the output. Positive values shift the filter to the right, and negative values shift the filter to the left. Default is 0. Returns ------- result : ndarray The result of convolution of `input` with `weights`. See Also -------- correlate : Correlate an image with a kernel. Notes ----- Each value in result is :math:`C_i = \\sum_j{I_{i+k-j} W_j}`, where W is the `weights` kernel, j is the N-D spatial index over :math:`W`, I is the `input` and k is the coordinate of the center of W, specified by `origin` in the input parameters. Examples -------- Perhaps the simplest case to understand is ``mode='constant', cval=0.0``, because in this case borders (i.e., where the `weights` kernel, centered on any one value, extends beyond an edge of `input`) are treated as zeros. >>> import numpy as np >>> a = np.array([[1, 2, 0, 0], ... [5, 3, 0, 4], ... [0, 0, 0, 7], ... [9, 3, 0, 0]]) >>> k = np.array([[1,1,1],[1,1,0],[1,0,0]]) >>> from scipy import ndimage >>> ndimage.convolve(a, k, mode='constant', cval=0.0) array([[11, 10, 7, 4], [10, 3, 11, 11], [15, 12, 14, 7], [12, 3, 7, 0]]) Setting ``cval=1.0`` is equivalent to padding the outer edge of `input` with 1.0's (and then extracting only the original region of the result). >>> ndimage.convolve(a, k, mode='constant', cval=1.0) array([[13, 11, 8, 7], [11, 3, 11, 14], [16, 12, 14, 10], [15, 6, 10, 5]]) With ``mode='reflect'`` (the default), outer values are reflected at the edge of `input` to fill in missing values. >>> b = np.array([[2, 0, 0], ... [1, 0, 0], ... [0, 0, 0]]) >>> k = np.array([[0,1,0], [0,1,0], [0,1,0]]) >>> ndimage.convolve(b, k, mode='reflect') array([[5, 0, 0], [3, 0, 0], [1, 0, 0]]) This includes diagonally at the corners. >>> k = np.array([[1,0,0],[0,1,0],[0,0,1]]) >>> ndimage.convolve(b, k) array([[4, 2, 0], [3, 2, 0], [1, 1, 0]]) With ``mode='nearest'``, the single nearest value in to an edge in `input` is repeated as many times as needed to match the overlapping `weights`. >>> c = np.array([[2, 0, 1], ... [1, 0, 0], ... [0, 0, 0]]) >>> k = np.array([[0, 1, 0], ... [0, 1, 0], ... [0, 1, 0], ... [0, 1, 0], ... [0, 1, 0]]) >>> ndimage.convolve(c, k, mode='nearest') array([[7, 0, 3], [5, 0, 2], [3, 0, 1]]) """ return _correlate_or_convolve(input, weights, output, mode, cval, origin, True) @_ni_docstrings.docfiller def uniform_filter1d(input, size, axis=-1, output=None, mode="reflect", cval=0.0, origin=0): """Calculate a 1-D uniform filter along the given axis. The lines of the array along the given axis are filtered with a uniform filter of given size. Parameters ---------- %(input)s size : int length of uniform filter %(axis)s %(output)s %(mode_reflect)s %(cval)s %(origin)s Returns ------- result : ndarray Filtered array. Has same shape as `input`. Examples -------- >>> from scipy.ndimage import uniform_filter1d >>> uniform_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3) array([4, 3, 4, 1, 4, 6, 6, 3]) """ input = numpy.asarray(input) axis = normalize_axis_index(axis, input.ndim) if size < 1: raise RuntimeError('incorrect filter size') complex_output = input.dtype.kind == 'c' output = _ni_support._get_output(output, input, complex_output=complex_output) if (size // 2 + origin < 0) or (size // 2 + origin >= size): raise ValueError('invalid origin') mode = _ni_support._extend_mode_to_code(mode) if not complex_output: _nd_image.uniform_filter1d(input, size, axis, output, mode, cval, origin) else: _nd_image.uniform_filter1d(input.real, size, axis, output.real, mode, numpy.real(cval), origin) _nd_image.uniform_filter1d(input.imag, size, axis, output.imag, mode, numpy.imag(cval), origin) return output @_ni_docstrings.docfiller def uniform_filter(input, size=3, output=None, mode="reflect", cval=0.0, origin=0, *, axes=None): """Multidimensional uniform filter. Parameters ---------- %(input)s size : int or sequence of ints, optional The sizes of the uniform filter are given for each axis as a sequence, or as a single number, in which case the size is equal for all axes. %(output)s %(mode_multiple)s %(cval)s %(origin_multiple)s axes : tuple of int or None, optional If None, `input` is filtered along all axes. Otherwise, `input` is filtered along the specified axes. When `axes` is specified, any tuples used for `size`, `origin`, and/or `mode` must match the length of `axes`. The ith entry in any of these tuples corresponds to the ith entry in `axes`. Returns ------- uniform_filter : ndarray Filtered array. Has the same shape as `input`. Notes ----- The multidimensional filter is implemented as a sequence of 1-D uniform filters. The intermediate arrays are stored in the same data type as the output. Therefore, for output types with a limited precision, the results may be imprecise because intermediate results may be stored with insufficient precision. Examples -------- >>> from scipy import ndimage, datasets >>> import matplotlib.pyplot as plt >>> fig = plt.figure() >>> plt.gray() # show the filtered result in grayscale >>> ax1 = fig.add_subplot(121) # left side >>> ax2 = fig.add_subplot(122) # right side >>> ascent = datasets.ascent() >>> result = ndimage.uniform_filter(ascent, size=20) >>> ax1.imshow(ascent) >>> ax2.imshow(result) >>> plt.show() """ input = numpy.asarray(input) output = _ni_support._get_output(output, input, complex_output=input.dtype.kind == 'c') axes = _ni_support._check_axes(axes, input.ndim) num_axes = len(axes) sizes = _ni_support._normalize_sequence(size, num_axes) origins = _ni_support._normalize_sequence(origin, num_axes) modes = _ni_support._normalize_sequence(mode, num_axes) axes = [(axes[ii], sizes[ii], origins[ii], modes[ii]) for ii in range(num_axes) if sizes[ii] > 1] if len(axes) > 0: for axis, size, origin, mode in axes: uniform_filter1d(input, int(size), axis, output, mode, cval, origin) input = output else: output[...] = input[...] return output @_ni_docstrings.docfiller def minimum_filter1d(input, size, axis=-1, output=None, mode="reflect", cval=0.0, origin=0): """Calculate a 1-D minimum filter along the given axis. The lines of the array along the given axis are filtered with a minimum filter of given size. Parameters ---------- %(input)s size : int length along which to calculate 1D minimum %(axis)s %(output)s %(mode_reflect)s %(cval)s %(origin)s Returns ------- result : ndarray. Filtered image. Has the same shape as `input`. Notes ----- This function implements the MINLIST algorithm [1]_, as described by Richard Harter [2]_, and has a guaranteed O(n) performance, `n` being the `input` length, regardless of filter size. References ---------- .. [1] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.42.2777 .. [2] http://www.richardhartersworld.com/cri/2001/slidingmin.html Examples -------- >>> from scipy.ndimage import minimum_filter1d >>> minimum_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3) array([2, 0, 0, 0, 1, 1, 0, 0]) """ input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') axis = normalize_axis_index(axis, input.ndim) if size < 1: raise RuntimeError('incorrect filter size') output = _ni_support._get_output(output, input) if (size // 2 + origin < 0) or (size // 2 + origin >= size): raise ValueError('invalid origin') mode = _ni_support._extend_mode_to_code(mode) _nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval, origin, 1) return output @_ni_docstrings.docfiller def maximum_filter1d(input, size, axis=-1, output=None, mode="reflect", cval=0.0, origin=0): """Calculate a 1-D maximum filter along the given axis. The lines of the array along the given axis are filtered with a maximum filter of given size. Parameters ---------- %(input)s size : int Length along which to calculate the 1-D maximum. %(axis)s %(output)s %(mode_reflect)s %(cval)s %(origin)s Returns ------- maximum1d : ndarray, None Maximum-filtered array with same shape as input. None if `output` is not None Notes ----- This function implements the MAXLIST algorithm [1]_, as described by Richard Harter [2]_, and has a guaranteed O(n) performance, `n` being the `input` length, regardless of filter size. References ---------- .. [1] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.42.2777 .. [2] http://www.richardhartersworld.com/cri/2001/slidingmin.html Examples -------- >>> from scipy.ndimage import maximum_filter1d >>> maximum_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3) array([8, 8, 8, 4, 9, 9, 9, 9]) """ input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') axis = normalize_axis_index(axis, input.ndim) if size < 1: raise RuntimeError('incorrect filter size') output = _ni_support._get_output(output, input) if (size // 2 + origin < 0) or (size // 2 + origin >= size): raise ValueError('invalid origin') mode = _ni_support._extend_mode_to_code(mode) _nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval, origin, 0) return output def _min_or_max_filter(input, size, footprint, structure, output, mode, cval, origin, minimum, axes=None): if (size is not None) and (footprint is not None): warnings.warn("ignoring size because footprint is set", UserWarning, stacklevel=3) if structure is None: if footprint is None: if size is None: raise RuntimeError("no footprint provided") separable = True else: footprint = numpy.asarray(footprint, dtype=bool) if not footprint.any(): raise ValueError("All-zero footprint is not supported.") if footprint.all(): size = footprint.shape footprint = None separable = True else: separable = False else: structure = numpy.asarray(structure, dtype=numpy.float64) separable = False if footprint is None: footprint = numpy.ones(structure.shape, bool) else: footprint = numpy.asarray(footprint, dtype=bool) input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') output = _ni_support._get_output(output, input) temp_needed = numpy.may_share_memory(input, output) if temp_needed: # input and output arrays cannot share memory temp = output output = _ni_support._get_output(output.dtype, input) axes = _ni_support._check_axes(axes, input.ndim) num_axes = len(axes) if separable: origins = _ni_support._normalize_sequence(origin, num_axes) sizes = _ni_support._normalize_sequence(size, num_axes) modes = _ni_support._normalize_sequence(mode, num_axes) axes = [(axes[ii], sizes[ii], origins[ii], modes[ii]) for ii in range(len(axes)) if sizes[ii] > 1] if minimum: filter_ = minimum_filter1d else: filter_ = maximum_filter1d if len(axes) > 0: for axis, size, origin, mode in axes: filter_(input, int(size), axis, output, mode, cval, origin) input = output else: output[...] = input[...] else: origins = _ni_support._normalize_sequence(origin, input.ndim) if num_axes < input.ndim: if footprint.ndim != num_axes: raise RuntimeError("footprint array has incorrect shape") footprint = numpy.expand_dims( footprint, tuple(ax for ax in range(input.ndim) if ax not in axes) ) fshape = [ii for ii in footprint.shape if ii > 0] if len(fshape) != input.ndim: raise RuntimeError('footprint array has incorrect shape.') for origin, lenf in zip(origins, fshape): if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf): raise ValueError('invalid origin') if not footprint.flags.contiguous: footprint = footprint.copy() if structure is not None: if len(structure.shape) != input.ndim: raise RuntimeError('structure array has incorrect shape') if num_axes != structure.ndim: structure = numpy.expand_dims( structure, tuple(ax for ax in range(structure.ndim) if ax not in axes) ) if not structure.flags.contiguous: structure = structure.copy() if not isinstance(mode, str) and isinstance(mode, Iterable): raise RuntimeError( "A sequence of modes is not supported for non-separable " "footprints") mode = _ni_support._extend_mode_to_code(mode) _nd_image.min_or_max_filter(input, footprint, structure, output, mode, cval, origins, minimum) if temp_needed: temp[...] = output output = temp return output @_ni_docstrings.docfiller def minimum_filter(input, size=None, footprint=None, output=None, mode="reflect", cval=0.0, origin=0, *, axes=None): """Calculate a multidimensional minimum filter. Parameters ---------- %(input)s %(size_foot)s %(output)s %(mode_multiple)s %(cval)s %(origin_multiple)s axes : tuple of int or None, optional If None, `input` is filtered along all axes. Otherwise, `input` is filtered along the specified axes. When `axes` is specified, any tuples used for `size`, `origin`, and/or `mode` must match the length of `axes`. The ith entry in any of these tuples corresponds to the ith entry in `axes`. Returns ------- minimum_filter : ndarray Filtered array. Has the same shape as `input`. Notes ----- A sequence of modes (one per axis) is only supported when the footprint is separable. Otherwise, a single mode string must be provided. Examples -------- >>> from scipy import ndimage, datasets >>> import matplotlib.pyplot as plt >>> fig = plt.figure() >>> plt.gray() # show the filtered result in grayscale >>> ax1 = fig.add_subplot(121) # left side >>> ax2 = fig.add_subplot(122) # right side >>> ascent = datasets.ascent() >>> result = ndimage.minimum_filter(ascent, size=20) >>> ax1.imshow(ascent) >>> ax2.imshow(result) >>> plt.show() """ return _min_or_max_filter(input, size, footprint, None, output, mode, cval, origin, 1, axes) @_ni_docstrings.docfiller def maximum_filter(input, size=None, footprint=None, output=None, mode="reflect", cval=0.0, origin=0, *, axes=None): """Calculate a multidimensional maximum filter. Parameters ---------- %(input)s %(size_foot)s %(output)s %(mode_multiple)s %(cval)s %(origin_multiple)s axes : tuple of int or None, optional If None, `input` is filtered along all axes. Otherwise, `input` is filtered along the specified axes. When `axes` is specified, any tuples used for `size`, `origin`, and/or `mode` must match the length of `axes`. The ith entry in any of these tuples corresponds to the ith entry in `axes`. Returns ------- maximum_filter : ndarray Filtered array. Has the same shape as `input`. Notes ----- A sequence of modes (one per axis) is only supported when the footprint is separable. Otherwise, a single mode string must be provided. Examples -------- >>> from scipy import ndimage, datasets >>> import matplotlib.pyplot as plt >>> fig = plt.figure() >>> plt.gray() # show the filtered result in grayscale >>> ax1 = fig.add_subplot(121) # left side >>> ax2 = fig.add_subplot(122) # right side >>> ascent = datasets.ascent() >>> result = ndimage.maximum_filter(ascent, size=20) >>> ax1.imshow(ascent) >>> ax2.imshow(result) >>> plt.show() """ return _min_or_max_filter(input, size, footprint, None, output, mode, cval, origin, 0, axes) @_ni_docstrings.docfiller def _rank_filter(input, rank, size=None, footprint=None, output=None, mode="reflect", cval=0.0, origin=0, operation='rank', axes=None): if (size is not None) and (footprint is not None): warnings.warn("ignoring size because footprint is set", UserWarning, stacklevel=3) input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') axes = _ni_support._check_axes(axes, input.ndim) num_axes = len(axes) origins = _ni_support._normalize_sequence(origin, num_axes) if footprint is None: if size is None: raise RuntimeError("no footprint or filter size provided") sizes = _ni_support._normalize_sequence(size, num_axes) footprint = numpy.ones(sizes, dtype=bool) else: footprint = numpy.asarray(footprint, dtype=bool) if num_axes < input.ndim: # set origin = 0 for any axes not being filtered origins_temp = [0,] * input.ndim for o, ax in zip(origins, axes): origins_temp[ax] = o origins = origins_temp if not isinstance(mode, str) and isinstance(mode, Iterable): # set mode = 'constant' for any axes not being filtered modes = _ni_support._normalize_sequence(mode, num_axes) modes_temp = ['constant'] * input.ndim for m, ax in zip(modes, axes): modes_temp[ax] = m mode = modes_temp # insert singleton dimension along any non-filtered axes if footprint.ndim != num_axes: raise RuntimeError("footprint array has incorrect shape") footprint = numpy.expand_dims( footprint, tuple(ax for ax in range(input.ndim) if ax not in axes) ) fshape = [ii for ii in footprint.shape if ii > 0] if len(fshape) != input.ndim: raise RuntimeError('footprint array has incorrect shape.') for origin, lenf in zip(origins, fshape): if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf): raise ValueError('invalid origin') if not footprint.flags.contiguous: footprint = footprint.copy() filter_size = numpy.where(footprint, 1, 0).sum() if operation == 'median': rank = filter_size // 2 elif operation == 'percentile': percentile = rank if percentile < 0.0: percentile += 100.0 if percentile < 0 or percentile > 100: raise RuntimeError('invalid percentile') if percentile == 100.0: rank = filter_size - 1 else: rank = int(float(filter_size) * percentile / 100.0) if rank < 0: rank += filter_size if rank < 0 or rank >= filter_size: raise RuntimeError('rank not within filter footprint size') if rank == 0: return minimum_filter(input, None, footprint, output, mode, cval, origins, axes=None) elif rank == filter_size - 1: return maximum_filter(input, None, footprint, output, mode, cval, origins, axes=None) else: output = _ni_support._get_output(output, input) temp_needed = numpy.may_share_memory(input, output) if temp_needed: # input and output arrays cannot share memory temp = output output = _ni_support._get_output(output.dtype, input) if not isinstance(mode, str) and isinstance(mode, Iterable): raise RuntimeError( "A sequence of modes is not supported by non-separable rank " "filters") mode = _ni_support._extend_mode_to_code(mode) _nd_image.rank_filter(input, rank, footprint, output, mode, cval, origins) if temp_needed: temp[...] = output output = temp return output @_ni_docstrings.docfiller def rank_filter(input, rank, size=None, footprint=None, output=None, mode="reflect", cval=0.0, origin=0, *, axes=None): """Calculate a multidimensional rank filter. Parameters ---------- %(input)s rank : int The rank parameter may be less than zero, i.e., rank = -1 indicates the largest element. %(size_foot)s %(output)s %(mode_reflect)s %(cval)s %(origin_multiple)s axes : tuple of int or None, optional If None, `input` is filtered along all axes. Otherwise, `input` is filtered along the specified axes. Returns ------- rank_filter : ndarray Filtered array. Has the same shape as `input`. Examples -------- >>> from scipy import ndimage, datasets >>> import matplotlib.pyplot as plt >>> fig = plt.figure() >>> plt.gray() # show the filtered result in grayscale >>> ax1 = fig.add_subplot(121) # left side >>> ax2 = fig.add_subplot(122) # right side >>> ascent = datasets.ascent() >>> result = ndimage.rank_filter(ascent, rank=42, size=20) >>> ax1.imshow(ascent) >>> ax2.imshow(result) >>> plt.show() """ rank = operator.index(rank) return _rank_filter(input, rank, size, footprint, output, mode, cval, origin, 'rank', axes=axes) @_ni_docstrings.docfiller def median_filter(input, size=None, footprint=None, output=None, mode="reflect", cval=0.0, origin=0, *, axes=None): """ Calculate a multidimensional median filter. Parameters ---------- %(input)s %(size_foot)s %(output)s %(mode_reflect)s %(cval)s %(origin_multiple)s axes : tuple of int or None, optional If None, `input` is filtered along all axes. Otherwise, `input` is filtered along the specified axes. Returns ------- median_filter : ndarray Filtered array. Has the same shape as `input`. See Also -------- scipy.signal.medfilt2d Notes ----- For 2-dimensional images with ``uint8``, ``float32`` or ``float64`` dtypes the specialised function `scipy.signal.medfilt2d` may be faster. It is however limited to constant mode with ``cval=0``. Examples -------- >>> from scipy import ndimage, datasets >>> import matplotlib.pyplot as plt >>> fig = plt.figure() >>> plt.gray() # show the filtered result in grayscale >>> ax1 = fig.add_subplot(121) # left side >>> ax2 = fig.add_subplot(122) # right side >>> ascent = datasets.ascent() >>> result = ndimage.median_filter(ascent, size=20) >>> ax1.imshow(ascent) >>> ax2.imshow(result) >>> plt.show() """ return _rank_filter(input, 0, size, footprint, output, mode, cval, origin, 'median', axes=axes) @_ni_docstrings.docfiller def percentile_filter(input, percentile, size=None, footprint=None, output=None, mode="reflect", cval=0.0, origin=0, *, axes=None): """Calculate a multidimensional percentile filter. Parameters ---------- %(input)s percentile : scalar The percentile parameter may be less than zero, i.e., percentile = -20 equals percentile = 80 %(size_foot)s %(output)s %(mode_reflect)s %(cval)s %(origin_multiple)s axes : tuple of int or None, optional If None, `input` is filtered along all axes. Otherwise, `input` is filtered along the specified axes. Returns ------- percentile_filter : ndarray Filtered array. Has the same shape as `input`. Examples -------- >>> from scipy import ndimage, datasets >>> import matplotlib.pyplot as plt >>> fig = plt.figure() >>> plt.gray() # show the filtered result in grayscale >>> ax1 = fig.add_subplot(121) # left side >>> ax2 = fig.add_subplot(122) # right side >>> ascent = datasets.ascent() >>> result = ndimage.percentile_filter(ascent, percentile=20, size=20) >>> ax1.imshow(ascent) >>> ax2.imshow(result) >>> plt.show() """ return _rank_filter(input, percentile, size, footprint, output, mode, cval, origin, 'percentile', axes=axes) @_ni_docstrings.docfiller def generic_filter1d(input, function, filter_size, axis=-1, output=None, mode="reflect", cval=0.0, origin=0, extra_arguments=(), extra_keywords=None): """Calculate a 1-D filter along the given axis. `generic_filter1d` iterates over the lines of the array, calling the given function at each line. The arguments of the line are the input line, and the output line. The input and output lines are 1-D double arrays. The input line is extended appropriately according to the filter size and origin. The output line must be modified in-place with the result. Parameters ---------- %(input)s function : {callable, scipy.LowLevelCallable} Function to apply along given axis. filter_size : scalar Length of the filter. %(axis)s %(output)s %(mode_reflect)s %(cval)s %(origin)s %(extra_arguments)s %(extra_keywords)s Returns ------- generic_filter1d : ndarray Filtered array. Has the same shape as `input`. Notes ----- This function also accepts low-level callback functions with one of the following signatures and wrapped in `scipy.LowLevelCallable`: .. code:: c int function(double *input_line, npy_intp input_length, double *output_line, npy_intp output_length, void *user_data) int function(double *input_line, intptr_t input_length, double *output_line, intptr_t output_length, void *user_data) The calling function iterates over the lines of the input and output arrays, calling the callback function at each line. The current line is extended according to the border conditions set by the calling function, and the result is copied into the array that is passed through ``input_line``. The length of the input line (after extension) is passed through ``input_length``. The callback function should apply the filter and store the result in the array passed through ``output_line``. The length of the output line is passed through ``output_length``. ``user_data`` is the data pointer provided to `scipy.LowLevelCallable` as-is. The callback function must return an integer error status that is zero if something went wrong and one otherwise. If an error occurs, you should normally set the python error status with an informative message before returning, otherwise a default error message is set by the calling function. In addition, some other low-level function pointer specifications are accepted, but these are for backward compatibility only and should not be used in new code. """ if extra_keywords is None: extra_keywords = {} input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') output = _ni_support._get_output(output, input) if filter_size < 1: raise RuntimeError('invalid filter size') axis = normalize_axis_index(axis, input.ndim) if (filter_size // 2 + origin < 0) or (filter_size // 2 + origin >= filter_size): raise ValueError('invalid origin') mode = _ni_support._extend_mode_to_code(mode) _nd_image.generic_filter1d(input, function, filter_size, axis, output, mode, cval, origin, extra_arguments, extra_keywords) return output @_ni_docstrings.docfiller def generic_filter(input, function, size=None, footprint=None, output=None, mode="reflect", cval=0.0, origin=0, extra_arguments=(), extra_keywords=None): """Calculate a multidimensional filter using the given function. At each element the provided function is called. The input values within the filter footprint at that element are passed to the function as a 1-D array of double values. Parameters ---------- %(input)s function : {callable, scipy.LowLevelCallable} Function to apply at each element. %(size_foot)s %(output)s %(mode_reflect)s %(cval)s %(origin_multiple)s %(extra_arguments)s %(extra_keywords)s Returns ------- generic_filter : ndarray Filtered array. Has the same shape as `input`. Notes ----- This function also accepts low-level callback functions with one of the following signatures and wrapped in `scipy.LowLevelCallable`: .. code:: c int callback(double *buffer, npy_intp filter_size, double *return_value, void *user_data) int callback(double *buffer, intptr_t filter_size, double *return_value, void *user_data) The calling function iterates over the elements of the input and output arrays, calling the callback function at each element. The elements within the footprint of the filter at the current element are passed through the ``buffer`` parameter, and the number of elements within the footprint through ``filter_size``. The calculated value is returned in ``return_value``. ``user_data`` is the data pointer provided to `scipy.LowLevelCallable` as-is. The callback function must return an integer error status that is zero if something went wrong and one otherwise. If an error occurs, you should normally set the python error status with an informative message before returning, otherwise a default error message is set by the calling function. In addition, some other low-level function pointer specifications are accepted, but these are for backward compatibility only and should not be used in new code. Examples -------- Import the necessary modules and load the example image used for filtering. >>> import numpy as np >>> from scipy import datasets >>> from scipy.ndimage import generic_filter >>> import matplotlib.pyplot as plt >>> ascent = datasets.ascent() Compute a maximum filter with kernel size 10 by passing a simple NumPy aggregation function as argument to `function`. >>> maximum_filter_result = generic_filter(ascent, np.amax, [10, 10]) While a maximmum filter could also directly be obtained using `maximum_filter`, `generic_filter` allows generic Python function or `scipy.LowLevelCallable` to be used as a filter. Here, we compute the range between maximum and minimum value as an example for a kernel size of 5. >>> def custom_filter(image): ... return np.amax(image) - np.amin(image) >>> custom_filter_result = generic_filter(ascent, custom_filter, [5, 5]) Plot the original and filtered images. >>> fig, axes = plt.subplots(3, 1, figsize=(4, 12)) >>> plt.gray() # show the filtered result in grayscale >>> top, middle, bottom = axes >>> for ax in axes: ... ax.set_axis_off() # remove coordinate system >>> top.imshow(ascent) >>> top.set_title("Original image") >>> middle.imshow(maximum_filter_result) >>> middle.set_title("Maximum filter, Kernel: 10x10") >>> bottom.imshow(custom_filter_result) >>> bottom.set_title("Custom filter, Kernel: 5x5") >>> fig.tight_layout() """ if (size is not None) and (footprint is not None): warnings.warn("ignoring size because footprint is set", UserWarning, stacklevel=2) if extra_keywords is None: extra_keywords = {} input = numpy.asarray(input) if numpy.iscomplexobj(input): raise TypeError('Complex type not supported') origins = _ni_support._normalize_sequence(origin, input.ndim) if footprint is None: if size is None: raise RuntimeError("no footprint or filter size provided") sizes = _ni_support._normalize_sequence(size, input.ndim) footprint = numpy.ones(sizes, dtype=bool) else: footprint = numpy.asarray(footprint, dtype=bool) fshape = [ii for ii in footprint.shape if ii > 0] if len(fshape) != input.ndim: raise RuntimeError('filter footprint array has incorrect shape.') for origin, lenf in zip(origins, fshape): if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf): raise ValueError('invalid origin') if not footprint.flags.contiguous: footprint = footprint.copy() output = _ni_support._get_output(output, input) mode = _ni_support._extend_mode_to_code(mode) _nd_image.generic_filter(input, function, footprint, output, mode, cval, origins, extra_arguments, extra_keywords) return output
65,628
34.475135
90
py
scipy
scipy-main/scipy/ndimage/__init__.py
""" ========================================================= Multidimensional image processing (:mod:`scipy.ndimage`) ========================================================= .. currentmodule:: scipy.ndimage This package contains various functions for multidimensional image processing. Filters ======= .. autosummary:: :toctree: generated/ convolve - Multidimensional convolution convolve1d - 1-D convolution along the given axis correlate - Multidimensional correlation correlate1d - 1-D correlation along the given axis gaussian_filter gaussian_filter1d gaussian_gradient_magnitude gaussian_laplace generic_filter - Multidimensional filter using a given function generic_filter1d - 1-D generic filter along the given axis generic_gradient_magnitude generic_laplace laplace - N-D Laplace filter based on approximate second derivatives maximum_filter maximum_filter1d median_filter - Calculates a multidimensional median filter minimum_filter minimum_filter1d percentile_filter - Calculates a multidimensional percentile filter prewitt rank_filter - Calculates a multidimensional rank filter sobel uniform_filter - Multidimensional uniform filter uniform_filter1d - 1-D uniform filter along the given axis Fourier filters =============== .. autosummary:: :toctree: generated/ fourier_ellipsoid fourier_gaussian fourier_shift fourier_uniform Interpolation ============= .. autosummary:: :toctree: generated/ affine_transform - Apply an affine transformation geometric_transform - Apply an arbritrary geometric transform map_coordinates - Map input array to new coordinates by interpolation rotate - Rotate an array shift - Shift an array spline_filter spline_filter1d zoom - Zoom an array Measurements ============ .. autosummary:: :toctree: generated/ center_of_mass - The center of mass of the values of an array at labels extrema - Min's and max's of an array at labels, with their positions find_objects - Find objects in a labeled array histogram - Histogram of the values of an array, optionally at labels label - Label features in an array labeled_comprehension maximum maximum_position mean - Mean of the values of an array at labels median minimum minimum_position standard_deviation - Standard deviation of an N-D image array sum_labels - Sum of the values of the array value_indices - Find indices of each distinct value in given array variance - Variance of the values of an N-D image array watershed_ift Morphology ========== .. autosummary:: :toctree: generated/ binary_closing binary_dilation binary_erosion binary_fill_holes binary_hit_or_miss binary_opening binary_propagation black_tophat distance_transform_bf distance_transform_cdt distance_transform_edt generate_binary_structure grey_closing grey_dilation grey_erosion grey_opening iterate_structure morphological_gradient morphological_laplace white_tophat """ # Copyright (C) 2003-2005 Peter J. Verveer # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # # 3. The name of the author may not be used to endorse or promote # products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS # OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE # GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from ._filters import * # noqa: F401 F403 from ._fourier import * # noqa: F401 F403 from ._interpolation import * # noqa: F401 F403 from ._measurements import * # noqa: F401 F403 from ._morphology import * # noqa: F401 F403 # Deprecated namespaces, to be removed in v2.0.0 from . import filters # noqa: F401 from . import fourier # noqa: F401 from . import interpolation # noqa: F401 from . import measurements # noqa: F401 from . import morphology # noqa: F401 __all__ = [s for s in dir() if not s.startswith('_')] from scipy._lib._testutils import PytestTester test = PytestTester(__name__) del PytestTester
5,155
29.329412
74
py
scipy
scipy-main/scipy/ndimage/morphology.py
# This file is not meant for public use and will be removed in SciPy v2.0.0. # Use the `scipy.ndimage` namespace for importing the functions # included below. import warnings from . import _morphology __all__ = [ # noqa: F822 'iterate_structure', 'generate_binary_structure', 'binary_erosion', 'binary_dilation', 'binary_opening', 'binary_closing', 'binary_hit_or_miss', 'binary_propagation', 'binary_fill_holes', 'grey_erosion', 'grey_dilation', 'grey_opening', 'grey_closing', 'morphological_gradient', 'morphological_laplace', 'white_tophat', 'black_tophat', 'distance_transform_bf', 'distance_transform_cdt', 'distance_transform_edt' ] def __dir__(): return __all__ def __getattr__(name): if name not in __all__: raise AttributeError( "scipy.ndimage.morphology is deprecated and has no attribute " f"{name}. Try looking in scipy.ndimage instead.") warnings.warn(f"Please use `{name}` from the `scipy.ndimage` namespace, " "the `scipy.ndimage.morphology` namespace is deprecated.", category=DeprecationWarning, stacklevel=2) return getattr(_morphology, name)
1,188
32.027778
77
py
scipy
scipy-main/scipy/ndimage/_ni_docstrings.py
"""Docstring components common to several ndimage functions.""" from scipy._lib import doccer __all__ = ['docfiller'] _input_doc = ( """input : array_like The input array.""") _axis_doc = ( """axis : int, optional The axis of `input` along which to calculate. Default is -1.""") _output_doc = ( """output : array or dtype, optional The array in which to place the output, or the dtype of the returned array. By default an array of the same dtype as input will be created.""") _size_foot_doc = ( """size : scalar or tuple, optional See footprint, below. Ignored if footprint is given. footprint : array, optional Either `size` or `footprint` must be defined. `size` gives the shape that is taken from the input array, at every element position, to define the input to the filter function. `footprint` is a boolean array that specifies (implicitly) a shape, but also which of the elements within this shape will get passed to the filter function. Thus ``size=(n,m)`` is equivalent to ``footprint=np.ones((n,m))``. We adjust `size` to the number of dimensions of the input array, so that, if the input array is shape (10,10,10), and `size` is 2, then the actual size used is (2,2,2). When `footprint` is given, `size` is ignored.""") _mode_reflect_doc = ( """mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional The `mode` parameter determines how the input array is extended beyond its boundaries. Default is 'reflect'. Behavior for each valid value is as follows: 'reflect' (`d c b a | a b c d | d c b a`) The input is extended by reflecting about the edge of the last pixel. This mode is also sometimes referred to as half-sample symmetric. 'constant' (`k k k k | a b c d | k k k k`) The input is extended by filling all values beyond the edge with the same constant value, defined by the `cval` parameter. 'nearest' (`a a a a | a b c d | d d d d`) The input is extended by replicating the last pixel. 'mirror' (`d c b | a b c d | c b a`) The input is extended by reflecting about the center of the last pixel. This mode is also sometimes referred to as whole-sample symmetric. 'wrap' (`a b c d | a b c d | a b c d`) The input is extended by wrapping around to the opposite edge. For consistency with the interpolation functions, the following mode names can also be used: 'grid-mirror' This is a synonym for 'reflect'. 'grid-constant' This is a synonym for 'constant'. 'grid-wrap' This is a synonym for 'wrap'.""") _mode_interp_constant_doc = ( """mode : {'reflect', 'grid-mirror', 'constant', 'grid-constant', 'nearest', \ 'mirror', 'grid-wrap', 'wrap'}, optional The `mode` parameter determines how the input array is extended beyond its boundaries. Default is 'constant'. Behavior for each valid value is as follows (see additional plots and details on :ref:`boundary modes <ndimage-interpolation-modes>`): 'reflect' (`d c b a | a b c d | d c b a`) The input is extended by reflecting about the edge of the last pixel. This mode is also sometimes referred to as half-sample symmetric. 'grid-mirror' This is a synonym for 'reflect'. 'constant' (`k k k k | a b c d | k k k k`) The input is extended by filling all values beyond the edge with the same constant value, defined by the `cval` parameter. No interpolation is performed beyond the edges of the input. 'grid-constant' (`k k k k | a b c d | k k k k`) The input is extended by filling all values beyond the edge with the same constant value, defined by the `cval` parameter. Interpolation occurs for samples outside the input's extent as well. 'nearest' (`a a a a | a b c d | d d d d`) The input is extended by replicating the last pixel. 'mirror' (`d c b | a b c d | c b a`) The input is extended by reflecting about the center of the last pixel. This mode is also sometimes referred to as whole-sample symmetric. 'grid-wrap' (`a b c d | a b c d | a b c d`) The input is extended by wrapping around to the opposite edge. 'wrap' (`d b c d | a b c d | b c a b`) The input is extended by wrapping around to the opposite edge, but in a way such that the last point and initial point exactly overlap. In this case it is not well defined which sample will be chosen at the point of overlap.""") _mode_interp_mirror_doc = ( _mode_interp_constant_doc.replace("Default is 'constant'", "Default is 'mirror'") ) assert _mode_interp_mirror_doc != _mode_interp_constant_doc, \ 'Default not replaced' _mode_multiple_doc = ( """mode : str or sequence, optional The `mode` parameter determines how the input array is extended when the filter overlaps a border. By passing a sequence of modes with length equal to the number of dimensions of the input array, different modes can be specified along each axis. Default value is 'reflect'. The valid values and their behavior is as follows: 'reflect' (`d c b a | a b c d | d c b a`) The input is extended by reflecting about the edge of the last pixel. This mode is also sometimes referred to as half-sample symmetric. 'constant' (`k k k k | a b c d | k k k k`) The input is extended by filling all values beyond the edge with the same constant value, defined by the `cval` parameter. 'nearest' (`a a a a | a b c d | d d d d`) The input is extended by replicating the last pixel. 'mirror' (`d c b | a b c d | c b a`) The input is extended by reflecting about the center of the last pixel. This mode is also sometimes referred to as whole-sample symmetric. 'wrap' (`a b c d | a b c d | a b c d`) The input is extended by wrapping around to the opposite edge. For consistency with the interpolation functions, the following mode names can also be used: 'grid-constant' This is a synonym for 'constant'. 'grid-mirror' This is a synonym for 'reflect'. 'grid-wrap' This is a synonym for 'wrap'.""") _cval_doc = ( """cval : scalar, optional Value to fill past edges of input if `mode` is 'constant'. Default is 0.0.""") _origin_doc = ( """origin : int, optional Controls the placement of the filter on the input array's pixels. A value of 0 (the default) centers the filter over the pixel, with positive values shifting the filter to the left, and negative ones to the right.""") _origin_multiple_doc = ( """origin : int or sequence, optional Controls the placement of the filter on the input array's pixels. A value of 0 (the default) centers the filter over the pixel, with positive values shifting the filter to the left, and negative ones to the right. By passing a sequence of origins with length equal to the number of dimensions of the input array, different shifts can be specified along each axis.""") _extra_arguments_doc = ( """extra_arguments : sequence, optional Sequence of extra positional arguments to pass to passed function.""") _extra_keywords_doc = ( """extra_keywords : dict, optional dict of extra keyword arguments to pass to passed function.""") _prefilter_doc = ( """prefilter : bool, optional Determines if the input array is prefiltered with `spline_filter` before interpolation. The default is True, which will create a temporary `float64` array of filtered values if `order > 1`. If setting this to False, the output will be slightly blurred if `order > 1`, unless the input is prefiltered, i.e. it is the result of calling `spline_filter` on the original input.""") docdict = { 'input': _input_doc, 'axis': _axis_doc, 'output': _output_doc, 'size_foot': _size_foot_doc, 'mode_interp_constant': _mode_interp_constant_doc, 'mode_interp_mirror': _mode_interp_mirror_doc, 'mode_reflect': _mode_reflect_doc, 'mode_multiple': _mode_multiple_doc, 'cval': _cval_doc, 'origin': _origin_doc, 'origin_multiple': _origin_multiple_doc, 'extra_arguments': _extra_arguments_doc, 'extra_keywords': _extra_keywords_doc, 'prefilter': _prefilter_doc } docfiller = doccer.filldoc(docdict)
8,516
39.751196
79
py
scipy
scipy-main/scipy/ndimage/filters.py
# This file is not meant for public use and will be removed in SciPy v2.0.0. # Use the `scipy.ndimage` namespace for importing the functions # included below. import warnings from . import _filters __all__ = [ # noqa: F822 'correlate1d', 'convolve1d', 'gaussian_filter1d', 'gaussian_filter', 'prewitt', 'sobel', 'generic_laplace', 'laplace', 'gaussian_laplace', 'generic_gradient_magnitude', 'gaussian_gradient_magnitude', 'correlate', 'convolve', 'uniform_filter1d', 'uniform_filter', 'minimum_filter1d', 'maximum_filter1d', 'minimum_filter', 'maximum_filter', 'rank_filter', 'median_filter', 'percentile_filter', 'generic_filter1d', 'generic_filter' ] def __dir__(): return __all__ def __getattr__(name): if name not in __all__: raise AttributeError( "scipy.ndimage.filters is deprecated and has no attribute " f"{name}. Try looking in scipy.ndimage instead.") warnings.warn(f"Please use `{name}` from the `scipy.ndimage` namespace, " "the `scipy.ndimage.filters` namespace is deprecated.", category=DeprecationWarning, stacklevel=2) return getattr(_filters, name)
1,193
32.166667
77
py