diff --git a/.gitattributes b/.gitattributes index c7c75c9cc852cba56ae98179d839a7470dad09bf..359314592d740013b6b725dc2294afde22acda53 100644 --- a/.gitattributes +++ b/.gitattributes @@ -826,3 +826,9 @@ infer_4_47_1/lib/libasan.so filter=lfs diff=lfs merge=lfs -text infer_4_47_1/lib/libitm.so.1 filter=lfs diff=lfs merge=lfs -text infer_4_47_1/lib/libncursesw.so.6 filter=lfs diff=lfs merge=lfs -text infer_4_47_1/lib/libbz2.so.1.0.8 filter=lfs diff=lfs merge=lfs -text +evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_test_odeint_banded.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_quadpack.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_odepack.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_vode.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_lsoda.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_dop.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/__init__.py b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1533b5c60b695fce0abf08e2163dfba3bdd4fb17 --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/__init__.py @@ -0,0 +1,122 @@ +""" +============================================= +Integration and ODEs (:mod:`scipy.integrate`) +============================================= + +.. currentmodule:: scipy.integrate + +Integrating functions, given function object +============================================ + +.. autosummary:: + :toctree: generated/ + + quad -- General purpose integration + quad_vec -- General purpose integration of vector-valued functions + cubature -- General purpose multi-dimensional integration of array-valued functions + dblquad -- General purpose double integration + tplquad -- General purpose triple integration + nquad -- General purpose N-D integration + tanhsinh -- General purpose elementwise integration + fixed_quad -- Integrate func(x) using Gaussian quadrature of order n + newton_cotes -- Weights and error coefficient for Newton-Cotes integration + lebedev_rule + qmc_quad -- N-D integration using Quasi-Monte Carlo quadrature + IntegrationWarning -- Warning on issues during integration + + +Integrating functions, given fixed samples +========================================== + +.. autosummary:: + :toctree: generated/ + + trapezoid -- Use trapezoidal rule to compute integral. + cumulative_trapezoid -- Use trapezoidal rule to cumulatively compute integral. + simpson -- Use Simpson's rule to compute integral from samples. + cumulative_simpson -- Use Simpson's rule to cumulatively compute integral from samples. + romb -- Use Romberg Integration to compute integral from + -- (2**k + 1) evenly-spaced samples. + +.. seealso:: + + :mod:`scipy.special` for orthogonal polynomials (special) for Gaussian + quadrature roots and weights for other weighting factors and regions. + +Summation +========= + +.. autosummary:: + :toctree: generated/ + + nsum + +Solving initial value problems for ODE systems +============================================== + +The solvers are implemented as individual classes, which can be used directly +(low-level usage) or through a convenience function. + +.. autosummary:: + :toctree: generated/ + + solve_ivp -- Convenient function for ODE integration. + RK23 -- Explicit Runge-Kutta solver of order 3(2). + RK45 -- Explicit Runge-Kutta solver of order 5(4). + DOP853 -- Explicit Runge-Kutta solver of order 8. + Radau -- Implicit Runge-Kutta solver of order 5. + BDF -- Implicit multi-step variable order (1 to 5) solver. + LSODA -- LSODA solver from ODEPACK Fortran package. + OdeSolver -- Base class for ODE solvers. + DenseOutput -- Local interpolant for computing a dense output. + OdeSolution -- Class which represents a continuous ODE solution. + + +Old API +------- + +These are the routines developed earlier for SciPy. They wrap older solvers +implemented in Fortran (mostly ODEPACK). While the interface to them is not +particularly convenient and certain features are missing compared to the new +API, the solvers themselves are of good quality and work fast as compiled +Fortran code. In some cases, it might be worth using this old API. + +.. autosummary:: + :toctree: generated/ + + odeint -- General integration of ordinary differential equations. + ode -- Integrate ODE using VODE and ZVODE routines. + complex_ode -- Convert a complex-valued ODE to real-valued and integrate. + ODEintWarning -- Warning raised during the execution of `odeint`. + + +Solving boundary value problems for ODE systems +=============================================== + +.. autosummary:: + :toctree: generated/ + + solve_bvp -- Solve a boundary value problem for a system of ODEs. +""" # noqa: E501 + + +from ._quadrature import * +from ._odepack_py import * +from ._quadpack_py import * +from ._ode import * +from ._bvp import solve_bvp +from ._ivp import (solve_ivp, OdeSolution, DenseOutput, + OdeSolver, RK23, RK45, DOP853, Radau, BDF, LSODA) +from ._quad_vec import quad_vec +from ._tanhsinh import nsum, tanhsinh +from ._cubature import cubature +from ._lebedev import lebedev_rule + +# Deprecated namespaces, to be removed in v2.0.0 +from . import dop, lsoda, vode, odepack, quadpack + +__all__ = [s for s in dir() if not s.startswith('_')] + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/__pycache__/_bvp.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/__pycache__/_bvp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..411b76dbdf44b4abc9b9668071ba740e000bb78d Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/__pycache__/_bvp.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/__pycache__/_ode.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/__pycache__/_ode.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a2214b67265a5100f64f8a325194677a5ee34b64 Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/__pycache__/_ode.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/__pycache__/dop.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/__pycache__/dop.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6d025371490e1b1f0595fec13161133ee48bf66c Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/__pycache__/dop.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/__pycache__/vode.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/__pycache__/vode.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5f4d52252c77d6491c849604bf03af0ab961dcb6 Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/__pycache__/vode.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_bvp.py b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_bvp.py new file mode 100644 index 0000000000000000000000000000000000000000..74406c89a689edc3de21fcb7274c90d41b8d2dcc --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_bvp.py @@ -0,0 +1,1154 @@ +"""Boundary value problem solver.""" +from warnings import warn + +import numpy as np +from numpy.linalg import pinv + +from scipy.sparse import coo_matrix, csc_matrix +from scipy.sparse.linalg import splu +from scipy.optimize import OptimizeResult + + +EPS = np.finfo(float).eps + + +def estimate_fun_jac(fun, x, y, p, f0=None): + """Estimate derivatives of an ODE system rhs with forward differences. + + Returns + ------- + df_dy : ndarray, shape (n, n, m) + Derivatives with respect to y. An element (i, j, q) corresponds to + d f_i(x_q, y_q) / d (y_q)_j. + df_dp : ndarray with shape (n, k, m) or None + Derivatives with respect to p. An element (i, j, q) corresponds to + d f_i(x_q, y_q, p) / d p_j. If `p` is empty, None is returned. + """ + n, m = y.shape + if f0 is None: + f0 = fun(x, y, p) + + dtype = y.dtype + + df_dy = np.empty((n, n, m), dtype=dtype) + h = EPS**0.5 * (1 + np.abs(y)) + for i in range(n): + y_new = y.copy() + y_new[i] += h[i] + hi = y_new[i] - y[i] + f_new = fun(x, y_new, p) + df_dy[:, i, :] = (f_new - f0) / hi + + k = p.shape[0] + if k == 0: + df_dp = None + else: + df_dp = np.empty((n, k, m), dtype=dtype) + h = EPS**0.5 * (1 + np.abs(p)) + for i in range(k): + p_new = p.copy() + p_new[i] += h[i] + hi = p_new[i] - p[i] + f_new = fun(x, y, p_new) + df_dp[:, i, :] = (f_new - f0) / hi + + return df_dy, df_dp + + +def estimate_bc_jac(bc, ya, yb, p, bc0=None): + """Estimate derivatives of boundary conditions with forward differences. + + Returns + ------- + dbc_dya : ndarray, shape (n + k, n) + Derivatives with respect to ya. An element (i, j) corresponds to + d bc_i / d ya_j. + dbc_dyb : ndarray, shape (n + k, n) + Derivatives with respect to yb. An element (i, j) corresponds to + d bc_i / d ya_j. + dbc_dp : ndarray with shape (n + k, k) or None + Derivatives with respect to p. An element (i, j) corresponds to + d bc_i / d p_j. If `p` is empty, None is returned. + """ + n = ya.shape[0] + k = p.shape[0] + + if bc0 is None: + bc0 = bc(ya, yb, p) + + dtype = ya.dtype + + dbc_dya = np.empty((n, n + k), dtype=dtype) + h = EPS**0.5 * (1 + np.abs(ya)) + for i in range(n): + ya_new = ya.copy() + ya_new[i] += h[i] + hi = ya_new[i] - ya[i] + bc_new = bc(ya_new, yb, p) + dbc_dya[i] = (bc_new - bc0) / hi + dbc_dya = dbc_dya.T + + h = EPS**0.5 * (1 + np.abs(yb)) + dbc_dyb = np.empty((n, n + k), dtype=dtype) + for i in range(n): + yb_new = yb.copy() + yb_new[i] += h[i] + hi = yb_new[i] - yb[i] + bc_new = bc(ya, yb_new, p) + dbc_dyb[i] = (bc_new - bc0) / hi + dbc_dyb = dbc_dyb.T + + if k == 0: + dbc_dp = None + else: + h = EPS**0.5 * (1 + np.abs(p)) + dbc_dp = np.empty((k, n + k), dtype=dtype) + for i in range(k): + p_new = p.copy() + p_new[i] += h[i] + hi = p_new[i] - p[i] + bc_new = bc(ya, yb, p_new) + dbc_dp[i] = (bc_new - bc0) / hi + dbc_dp = dbc_dp.T + + return dbc_dya, dbc_dyb, dbc_dp + + +def compute_jac_indices(n, m, k): + """Compute indices for the collocation system Jacobian construction. + + See `construct_global_jac` for the explanation. + """ + i_col = np.repeat(np.arange((m - 1) * n), n) + j_col = (np.tile(np.arange(n), n * (m - 1)) + + np.repeat(np.arange(m - 1) * n, n**2)) + + i_bc = np.repeat(np.arange((m - 1) * n, m * n + k), n) + j_bc = np.tile(np.arange(n), n + k) + + i_p_col = np.repeat(np.arange((m - 1) * n), k) + j_p_col = np.tile(np.arange(m * n, m * n + k), (m - 1) * n) + + i_p_bc = np.repeat(np.arange((m - 1) * n, m * n + k), k) + j_p_bc = np.tile(np.arange(m * n, m * n + k), n + k) + + i = np.hstack((i_col, i_col, i_bc, i_bc, i_p_col, i_p_bc)) + j = np.hstack((j_col, j_col + n, + j_bc, j_bc + (m - 1) * n, + j_p_col, j_p_bc)) + + return i, j + + +def stacked_matmul(a, b): + """Stacked matrix multiply: out[i,:,:] = np.dot(a[i,:,:], b[i,:,:]). + + Empirical optimization. Use outer Python loop and BLAS for large + matrices, otherwise use a single einsum call. + """ + if a.shape[1] > 50: + out = np.empty((a.shape[0], a.shape[1], b.shape[2])) + for i in range(a.shape[0]): + out[i] = np.dot(a[i], b[i]) + return out + else: + return np.einsum('...ij,...jk->...ik', a, b) + + +def construct_global_jac(n, m, k, i_jac, j_jac, h, df_dy, df_dy_middle, df_dp, + df_dp_middle, dbc_dya, dbc_dyb, dbc_dp): + """Construct the Jacobian of the collocation system. + + There are n * m + k functions: m - 1 collocations residuals, each + containing n components, followed by n + k boundary condition residuals. + + There are n * m + k variables: m vectors of y, each containing n + components, followed by k values of vector p. + + For example, let m = 4, n = 2 and k = 1, then the Jacobian will have + the following sparsity structure: + + 1 1 2 2 0 0 0 0 5 + 1 1 2 2 0 0 0 0 5 + 0 0 1 1 2 2 0 0 5 + 0 0 1 1 2 2 0 0 5 + 0 0 0 0 1 1 2 2 5 + 0 0 0 0 1 1 2 2 5 + + 3 3 0 0 0 0 4 4 6 + 3 3 0 0 0 0 4 4 6 + 3 3 0 0 0 0 4 4 6 + + Zeros denote identically zero values, other values denote different kinds + of blocks in the matrix (see below). The blank row indicates the separation + of collocation residuals from boundary conditions. And the blank column + indicates the separation of y values from p values. + + Refer to [1]_ (p. 306) for the formula of n x n blocks for derivatives + of collocation residuals with respect to y. + + Parameters + ---------- + n : int + Number of equations in the ODE system. + m : int + Number of nodes in the mesh. + k : int + Number of the unknown parameters. + i_jac, j_jac : ndarray + Row and column indices returned by `compute_jac_indices`. They + represent different blocks in the Jacobian matrix in the following + order (see the scheme above): + + * 1: m - 1 diagonal n x n blocks for the collocation residuals. + * 2: m - 1 off-diagonal n x n blocks for the collocation residuals. + * 3 : (n + k) x n block for the dependency of the boundary + conditions on ya. + * 4: (n + k) x n block for the dependency of the boundary + conditions on yb. + * 5: (m - 1) * n x k block for the dependency of the collocation + residuals on p. + * 6: (n + k) x k block for the dependency of the boundary + conditions on p. + + df_dy : ndarray, shape (n, n, m) + Jacobian of f with respect to y computed at the mesh nodes. + df_dy_middle : ndarray, shape (n, n, m - 1) + Jacobian of f with respect to y computed at the middle between the + mesh nodes. + df_dp : ndarray with shape (n, k, m) or None + Jacobian of f with respect to p computed at the mesh nodes. + df_dp_middle : ndarray with shape (n, k, m - 1) or None + Jacobian of f with respect to p computed at the middle between the + mesh nodes. + dbc_dya, dbc_dyb : ndarray, shape (n, n) + Jacobian of bc with respect to ya and yb. + dbc_dp : ndarray with shape (n, k) or None + Jacobian of bc with respect to p. + + Returns + ------- + J : csc_matrix, shape (n * m + k, n * m + k) + Jacobian of the collocation system in a sparse form. + + References + ---------- + .. [1] J. Kierzenka, L. F. Shampine, "A BVP Solver Based on Residual + Control and the Maltab PSE", ACM Trans. Math. Softw., Vol. 27, + Number 3, pp. 299-316, 2001. + """ + df_dy = np.transpose(df_dy, (2, 0, 1)) + df_dy_middle = np.transpose(df_dy_middle, (2, 0, 1)) + + h = h[:, np.newaxis, np.newaxis] + + dtype = df_dy.dtype + + # Computing diagonal n x n blocks. + dPhi_dy_0 = np.empty((m - 1, n, n), dtype=dtype) + dPhi_dy_0[:] = -np.identity(n) + dPhi_dy_0 -= h / 6 * (df_dy[:-1] + 2 * df_dy_middle) + T = stacked_matmul(df_dy_middle, df_dy[:-1]) + dPhi_dy_0 -= h**2 / 12 * T + + # Computing off-diagonal n x n blocks. + dPhi_dy_1 = np.empty((m - 1, n, n), dtype=dtype) + dPhi_dy_1[:] = np.identity(n) + dPhi_dy_1 -= h / 6 * (df_dy[1:] + 2 * df_dy_middle) + T = stacked_matmul(df_dy_middle, df_dy[1:]) + dPhi_dy_1 += h**2 / 12 * T + + values = np.hstack((dPhi_dy_0.ravel(), dPhi_dy_1.ravel(), dbc_dya.ravel(), + dbc_dyb.ravel())) + + if k > 0: + df_dp = np.transpose(df_dp, (2, 0, 1)) + df_dp_middle = np.transpose(df_dp_middle, (2, 0, 1)) + T = stacked_matmul(df_dy_middle, df_dp[:-1] - df_dp[1:]) + df_dp_middle += 0.125 * h * T + dPhi_dp = -h/6 * (df_dp[:-1] + df_dp[1:] + 4 * df_dp_middle) + values = np.hstack((values, dPhi_dp.ravel(), dbc_dp.ravel())) + + J = coo_matrix((values, (i_jac, j_jac))) + return csc_matrix(J) + + +def collocation_fun(fun, y, p, x, h): + """Evaluate collocation residuals. + + This function lies in the core of the method. The solution is sought + as a cubic C1 continuous spline with derivatives matching the ODE rhs + at given nodes `x`. Collocation conditions are formed from the equality + of the spline derivatives and rhs of the ODE system in the middle points + between nodes. + + Such method is classified to Lobbato IIIA family in ODE literature. + Refer to [1]_ for the formula and some discussion. + + Returns + ------- + col_res : ndarray, shape (n, m - 1) + Collocation residuals at the middle points of the mesh intervals. + y_middle : ndarray, shape (n, m - 1) + Values of the cubic spline evaluated at the middle points of the mesh + intervals. + f : ndarray, shape (n, m) + RHS of the ODE system evaluated at the mesh nodes. + f_middle : ndarray, shape (n, m - 1) + RHS of the ODE system evaluated at the middle points of the mesh + intervals (and using `y_middle`). + + References + ---------- + .. [1] J. Kierzenka, L. F. Shampine, "A BVP Solver Based on Residual + Control and the Maltab PSE", ACM Trans. Math. Softw., Vol. 27, + Number 3, pp. 299-316, 2001. + """ + f = fun(x, y, p) + y_middle = (0.5 * (y[:, 1:] + y[:, :-1]) - + 0.125 * h * (f[:, 1:] - f[:, :-1])) + f_middle = fun(x[:-1] + 0.5 * h, y_middle, p) + col_res = y[:, 1:] - y[:, :-1] - h / 6 * (f[:, :-1] + f[:, 1:] + + 4 * f_middle) + + return col_res, y_middle, f, f_middle + + +def prepare_sys(n, m, k, fun, bc, fun_jac, bc_jac, x, h): + """Create the function and the Jacobian for the collocation system.""" + x_middle = x[:-1] + 0.5 * h + i_jac, j_jac = compute_jac_indices(n, m, k) + + def col_fun(y, p): + return collocation_fun(fun, y, p, x, h) + + def sys_jac(y, p, y_middle, f, f_middle, bc0): + if fun_jac is None: + df_dy, df_dp = estimate_fun_jac(fun, x, y, p, f) + df_dy_middle, df_dp_middle = estimate_fun_jac( + fun, x_middle, y_middle, p, f_middle) + else: + df_dy, df_dp = fun_jac(x, y, p) + df_dy_middle, df_dp_middle = fun_jac(x_middle, y_middle, p) + + if bc_jac is None: + dbc_dya, dbc_dyb, dbc_dp = estimate_bc_jac(bc, y[:, 0], y[:, -1], + p, bc0) + else: + dbc_dya, dbc_dyb, dbc_dp = bc_jac(y[:, 0], y[:, -1], p) + + return construct_global_jac(n, m, k, i_jac, j_jac, h, df_dy, + df_dy_middle, df_dp, df_dp_middle, dbc_dya, + dbc_dyb, dbc_dp) + + return col_fun, sys_jac + + +def solve_newton(n, m, h, col_fun, bc, jac, y, p, B, bvp_tol, bc_tol): + """Solve the nonlinear collocation system by a Newton method. + + This is a simple Newton method with a backtracking line search. As + advised in [1]_, an affine-invariant criterion function F = ||J^-1 r||^2 + is used, where J is the Jacobian matrix at the current iteration and r is + the vector or collocation residuals (values of the system lhs). + + The method alters between full Newton iterations and the fixed-Jacobian + iterations based + + There are other tricks proposed in [1]_, but they are not used as they + don't seem to improve anything significantly, and even break the + convergence on some test problems I tried. + + All important parameters of the algorithm are defined inside the function. + + Parameters + ---------- + n : int + Number of equations in the ODE system. + m : int + Number of nodes in the mesh. + h : ndarray, shape (m-1,) + Mesh intervals. + col_fun : callable + Function computing collocation residuals. + bc : callable + Function computing boundary condition residuals. + jac : callable + Function computing the Jacobian of the whole system (including + collocation and boundary condition residuals). It is supposed to + return csc_matrix. + y : ndarray, shape (n, m) + Initial guess for the function values at the mesh nodes. + p : ndarray, shape (k,) + Initial guess for the unknown parameters. + B : ndarray with shape (n, n) or None + Matrix to force the S y(a) = 0 condition for a problems with the + singular term. If None, the singular term is assumed to be absent. + bvp_tol : float + Tolerance to which we want to solve a BVP. + bc_tol : float + Tolerance to which we want to satisfy the boundary conditions. + + Returns + ------- + y : ndarray, shape (n, m) + Final iterate for the function values at the mesh nodes. + p : ndarray, shape (k,) + Final iterate for the unknown parameters. + singular : bool + True, if the LU decomposition failed because Jacobian turned out + to be singular. + + References + ---------- + .. [1] U. Ascher, R. Mattheij and R. Russell "Numerical Solution of + Boundary Value Problems for Ordinary Differential Equations" + """ + # We know that the solution residuals at the middle points of the mesh + # are connected with collocation residuals r_middle = 1.5 * col_res / h. + # As our BVP solver tries to decrease relative residuals below a certain + # tolerance, it seems reasonable to terminated Newton iterations by + # comparison of r_middle / (1 + np.abs(f_middle)) with a certain threshold, + # which we choose to be 1.5 orders lower than the BVP tolerance. We rewrite + # the condition as col_res < tol_r * (1 + np.abs(f_middle)), then tol_r + # should be computed as follows: + tol_r = 2/3 * h * 5e-2 * bvp_tol + + # Maximum allowed number of Jacobian evaluation and factorization, in + # other words, the maximum number of full Newton iterations. A small value + # is recommended in the literature. + max_njev = 4 + + # Maximum number of iterations, considering that some of them can be + # performed with the fixed Jacobian. In theory, such iterations are cheap, + # but it's not that simple in Python. + max_iter = 8 + + # Minimum relative improvement of the criterion function to accept the + # step (Armijo constant). + sigma = 0.2 + + # Step size decrease factor for backtracking. + tau = 0.5 + + # Maximum number of backtracking steps, the minimum step is then + # tau ** n_trial. + n_trial = 4 + + col_res, y_middle, f, f_middle = col_fun(y, p) + bc_res = bc(y[:, 0], y[:, -1], p) + res = np.hstack((col_res.ravel(order='F'), bc_res)) + + njev = 0 + singular = False + recompute_jac = True + for iteration in range(max_iter): + if recompute_jac: + J = jac(y, p, y_middle, f, f_middle, bc_res) + njev += 1 + try: + LU = splu(J) + except RuntimeError: + singular = True + break + + step = LU.solve(res) + cost = np.dot(step, step) + + y_step = step[:m * n].reshape((n, m), order='F') + p_step = step[m * n:] + + alpha = 1 + for trial in range(n_trial + 1): + y_new = y - alpha * y_step + if B is not None: + y_new[:, 0] = np.dot(B, y_new[:, 0]) + p_new = p - alpha * p_step + + col_res, y_middle, f, f_middle = col_fun(y_new, p_new) + bc_res = bc(y_new[:, 0], y_new[:, -1], p_new) + res = np.hstack((col_res.ravel(order='F'), bc_res)) + + step_new = LU.solve(res) + cost_new = np.dot(step_new, step_new) + if cost_new < (1 - 2 * alpha * sigma) * cost: + break + + if trial < n_trial: + alpha *= tau + + y = y_new + p = p_new + + if njev == max_njev: + break + + if (np.all(np.abs(col_res) < tol_r * (1 + np.abs(f_middle))) and + np.all(np.abs(bc_res) < bc_tol)): + break + + # If the full step was taken, then we are going to continue with + # the same Jacobian. This is the approach of BVP_SOLVER. + if alpha == 1: + step = step_new + cost = cost_new + recompute_jac = False + else: + recompute_jac = True + + return y, p, singular + + +def print_iteration_header(): + print(f"{'Iteration':^15}{'Max residual':^15}{'Max BC residual':^15}" + f"{'Total nodes':^15}{'Nodes added':^15}") + + +def print_iteration_progress(iteration, residual, bc_residual, total_nodes, + nodes_added): + print(f"{iteration:^15}{residual:^15.2e}{bc_residual:^15.2e}" + f"{total_nodes:^15}{nodes_added:^15}") + + +class BVPResult(OptimizeResult): + pass + + +TERMINATION_MESSAGES = { + 0: "The algorithm converged to the desired accuracy.", + 1: "The maximum number of mesh nodes is exceeded.", + 2: "A singular Jacobian encountered when solving the collocation system.", + 3: "The solver was unable to satisfy boundary conditions tolerance on iteration 10." +} + + +def estimate_rms_residuals(fun, sol, x, h, p, r_middle, f_middle): + """Estimate rms values of collocation residuals using Lobatto quadrature. + + The residuals are defined as the difference between the derivatives of + our solution and rhs of the ODE system. We use relative residuals, i.e., + normalized by 1 + np.abs(f). RMS values are computed as sqrt from the + normalized integrals of the squared relative residuals over each interval. + Integrals are estimated using 5-point Lobatto quadrature [1]_, we use the + fact that residuals at the mesh nodes are identically zero. + + In [2] they don't normalize integrals by interval lengths, which gives + a higher rate of convergence of the residuals by the factor of h**0.5. + I chose to do such normalization for an ease of interpretation of return + values as RMS estimates. + + Returns + ------- + rms_res : ndarray, shape (m - 1,) + Estimated rms values of the relative residuals over each interval. + + References + ---------- + .. [1] http://mathworld.wolfram.com/LobattoQuadrature.html + .. [2] J. Kierzenka, L. F. Shampine, "A BVP Solver Based on Residual + Control and the Maltab PSE", ACM Trans. Math. Softw., Vol. 27, + Number 3, pp. 299-316, 2001. + """ + x_middle = x[:-1] + 0.5 * h + s = 0.5 * h * (3/7)**0.5 + x1 = x_middle + s + x2 = x_middle - s + y1 = sol(x1) + y2 = sol(x2) + y1_prime = sol(x1, 1) + y2_prime = sol(x2, 1) + f1 = fun(x1, y1, p) + f2 = fun(x2, y2, p) + r1 = y1_prime - f1 + r2 = y2_prime - f2 + + r_middle /= 1 + np.abs(f_middle) + r1 /= 1 + np.abs(f1) + r2 /= 1 + np.abs(f2) + + r1 = np.sum(np.real(r1 * np.conj(r1)), axis=0) + r2 = np.sum(np.real(r2 * np.conj(r2)), axis=0) + r_middle = np.sum(np.real(r_middle * np.conj(r_middle)), axis=0) + + return (0.5 * (32 / 45 * r_middle + 49 / 90 * (r1 + r2))) ** 0.5 + + +def create_spline(y, yp, x, h): + """Create a cubic spline given values and derivatives. + + Formulas for the coefficients are taken from interpolate.CubicSpline. + + Returns + ------- + sol : PPoly + Constructed spline as a PPoly instance. + """ + from scipy.interpolate import PPoly + + n, m = y.shape + c = np.empty((4, n, m - 1), dtype=y.dtype) + slope = (y[:, 1:] - y[:, :-1]) / h + t = (yp[:, :-1] + yp[:, 1:] - 2 * slope) / h + c[0] = t / h + c[1] = (slope - yp[:, :-1]) / h - t + c[2] = yp[:, :-1] + c[3] = y[:, :-1] + c = np.moveaxis(c, 1, 0) + + return PPoly(c, x, extrapolate=True, axis=1) + + +def modify_mesh(x, insert_1, insert_2): + """Insert nodes into a mesh. + + Nodes removal logic is not established, its impact on the solver is + presumably negligible. So, only insertion is done in this function. + + Parameters + ---------- + x : ndarray, shape (m,) + Mesh nodes. + insert_1 : ndarray + Intervals to each insert 1 new node in the middle. + insert_2 : ndarray + Intervals to each insert 2 new nodes, such that divide an interval + into 3 equal parts. + + Returns + ------- + x_new : ndarray + New mesh nodes. + + Notes + ----- + `insert_1` and `insert_2` should not have common values. + """ + # Because np.insert implementation apparently varies with a version of + # NumPy, we use a simple and reliable approach with sorting. + return np.sort(np.hstack(( + x, + 0.5 * (x[insert_1] + x[insert_1 + 1]), + (2 * x[insert_2] + x[insert_2 + 1]) / 3, + (x[insert_2] + 2 * x[insert_2 + 1]) / 3 + ))) + + +def wrap_functions(fun, bc, fun_jac, bc_jac, k, a, S, D, dtype): + """Wrap functions for unified usage in the solver.""" + if fun_jac is None: + fun_jac_wrapped = None + + if bc_jac is None: + bc_jac_wrapped = None + + if k == 0: + def fun_p(x, y, _): + return np.asarray(fun(x, y), dtype) + + def bc_wrapped(ya, yb, _): + return np.asarray(bc(ya, yb), dtype) + + if fun_jac is not None: + def fun_jac_p(x, y, _): + return np.asarray(fun_jac(x, y), dtype), None + + if bc_jac is not None: + def bc_jac_wrapped(ya, yb, _): + dbc_dya, dbc_dyb = bc_jac(ya, yb) + return (np.asarray(dbc_dya, dtype), + np.asarray(dbc_dyb, dtype), None) + else: + def fun_p(x, y, p): + return np.asarray(fun(x, y, p), dtype) + + def bc_wrapped(x, y, p): + return np.asarray(bc(x, y, p), dtype) + + if fun_jac is not None: + def fun_jac_p(x, y, p): + df_dy, df_dp = fun_jac(x, y, p) + return np.asarray(df_dy, dtype), np.asarray(df_dp, dtype) + + if bc_jac is not None: + def bc_jac_wrapped(ya, yb, p): + dbc_dya, dbc_dyb, dbc_dp = bc_jac(ya, yb, p) + return (np.asarray(dbc_dya, dtype), np.asarray(dbc_dyb, dtype), + np.asarray(dbc_dp, dtype)) + + if S is None: + fun_wrapped = fun_p + else: + def fun_wrapped(x, y, p): + f = fun_p(x, y, p) + if x[0] == a: + f[:, 0] = np.dot(D, f[:, 0]) + f[:, 1:] += np.dot(S, y[:, 1:]) / (x[1:] - a) + else: + f += np.dot(S, y) / (x - a) + return f + + if fun_jac is not None: + if S is None: + fun_jac_wrapped = fun_jac_p + else: + Sr = S[:, :, np.newaxis] + + def fun_jac_wrapped(x, y, p): + df_dy, df_dp = fun_jac_p(x, y, p) + if x[0] == a: + df_dy[:, :, 0] = np.dot(D, df_dy[:, :, 0]) + df_dy[:, :, 1:] += Sr / (x[1:] - a) + else: + df_dy += Sr / (x - a) + + return df_dy, df_dp + + return fun_wrapped, bc_wrapped, fun_jac_wrapped, bc_jac_wrapped + + +def solve_bvp(fun, bc, x, y, p=None, S=None, fun_jac=None, bc_jac=None, + tol=1e-3, max_nodes=1000, verbose=0, bc_tol=None): + """Solve a boundary value problem for a system of ODEs. + + This function numerically solves a first order system of ODEs subject to + two-point boundary conditions:: + + dy / dx = f(x, y, p) + S * y / (x - a), a <= x <= b + bc(y(a), y(b), p) = 0 + + Here x is a 1-D independent variable, y(x) is an n-D + vector-valued function and p is a k-D vector of unknown + parameters which is to be found along with y(x). For the problem to be + determined, there must be n + k boundary conditions, i.e., bc must be an + (n + k)-D function. + + The last singular term on the right-hand side of the system is optional. + It is defined by an n-by-n matrix S, such that the solution must satisfy + S y(a) = 0. This condition will be forced during iterations, so it must not + contradict boundary conditions. See [2]_ for the explanation how this term + is handled when solving BVPs numerically. + + Problems in a complex domain can be solved as well. In this case, y and p + are considered to be complex, and f and bc are assumed to be complex-valued + functions, but x stays real. Note that f and bc must be complex + differentiable (satisfy Cauchy-Riemann equations [4]_), otherwise you + should rewrite your problem for real and imaginary parts separately. To + solve a problem in a complex domain, pass an initial guess for y with a + complex data type (see below). + + Parameters + ---------- + fun : callable + Right-hand side of the system. The calling signature is ``fun(x, y)``, + or ``fun(x, y, p)`` if parameters are present. All arguments are + ndarray: ``x`` with shape (m,), ``y`` with shape (n, m), meaning that + ``y[:, i]`` corresponds to ``x[i]``, and ``p`` with shape (k,). The + return value must be an array with shape (n, m) and with the same + layout as ``y``. + bc : callable + Function evaluating residuals of the boundary conditions. The calling + signature is ``bc(ya, yb)``, or ``bc(ya, yb, p)`` if parameters are + present. All arguments are ndarray: ``ya`` and ``yb`` with shape (n,), + and ``p`` with shape (k,). The return value must be an array with + shape (n + k,). + x : array_like, shape (m,) + Initial mesh. Must be a strictly increasing sequence of real numbers + with ``x[0]=a`` and ``x[-1]=b``. + y : array_like, shape (n, m) + Initial guess for the function values at the mesh nodes, ith column + corresponds to ``x[i]``. For problems in a complex domain pass `y` + with a complex data type (even if the initial guess is purely real). + p : array_like with shape (k,) or None, optional + Initial guess for the unknown parameters. If None (default), it is + assumed that the problem doesn't depend on any parameters. + S : array_like with shape (n, n) or None + Matrix defining the singular term. If None (default), the problem is + solved without the singular term. + fun_jac : callable or None, optional + Function computing derivatives of f with respect to y and p. The + calling signature is ``fun_jac(x, y)``, or ``fun_jac(x, y, p)`` if + parameters are present. The return must contain 1 or 2 elements in the + following order: + + * df_dy : array_like with shape (n, n, m), where an element + (i, j, q) equals to d f_i(x_q, y_q, p) / d (y_q)_j. + * df_dp : array_like with shape (n, k, m), where an element + (i, j, q) equals to d f_i(x_q, y_q, p) / d p_j. + + Here q numbers nodes at which x and y are defined, whereas i and j + number vector components. If the problem is solved without unknown + parameters, df_dp should not be returned. + + If `fun_jac` is None (default), the derivatives will be estimated + by the forward finite differences. + bc_jac : callable or None, optional + Function computing derivatives of bc with respect to ya, yb, and p. + The calling signature is ``bc_jac(ya, yb)``, or ``bc_jac(ya, yb, p)`` + if parameters are present. The return must contain 2 or 3 elements in + the following order: + + * dbc_dya : array_like with shape (n, n), where an element (i, j) + equals to d bc_i(ya, yb, p) / d ya_j. + * dbc_dyb : array_like with shape (n, n), where an element (i, j) + equals to d bc_i(ya, yb, p) / d yb_j. + * dbc_dp : array_like with shape (n, k), where an element (i, j) + equals to d bc_i(ya, yb, p) / d p_j. + + If the problem is solved without unknown parameters, dbc_dp should not + be returned. + + If `bc_jac` is None (default), the derivatives will be estimated by + the forward finite differences. + tol : float, optional + Desired tolerance of the solution. If we define ``r = y' - f(x, y)``, + where y is the found solution, then the solver tries to achieve on each + mesh interval ``norm(r / (1 + abs(f)) < tol``, where ``norm`` is + estimated in a root mean squared sense (using a numerical quadrature + formula). Default is 1e-3. + max_nodes : int, optional + Maximum allowed number of the mesh nodes. If exceeded, the algorithm + terminates. Default is 1000. + verbose : {0, 1, 2}, optional + Level of algorithm's verbosity: + + * 0 (default) : work silently. + * 1 : display a termination report. + * 2 : display progress during iterations. + bc_tol : float, optional + Desired absolute tolerance for the boundary condition residuals: `bc` + value should satisfy ``abs(bc) < bc_tol`` component-wise. + Equals to `tol` by default. Up to 10 iterations are allowed to achieve this + tolerance. + + Returns + ------- + Bunch object with the following fields defined: + sol : PPoly + Found solution for y as `scipy.interpolate.PPoly` instance, a C1 + continuous cubic spline. + p : ndarray or None, shape (k,) + Found parameters. None, if the parameters were not present in the + problem. + x : ndarray, shape (m,) + Nodes of the final mesh. + y : ndarray, shape (n, m) + Solution values at the mesh nodes. + yp : ndarray, shape (n, m) + Solution derivatives at the mesh nodes. + rms_residuals : ndarray, shape (m - 1,) + RMS values of the relative residuals over each mesh interval (see the + description of `tol` parameter). + niter : int + Number of completed iterations. + status : int + Reason for algorithm termination: + + * 0: The algorithm converged to the desired accuracy. + * 1: The maximum number of mesh nodes is exceeded. + * 2: A singular Jacobian encountered when solving the collocation + system. + + message : string + Verbal description of the termination reason. + success : bool + True if the algorithm converged to the desired accuracy (``status=0``). + + Notes + ----- + This function implements a 4th order collocation algorithm with the + control of residuals similar to [1]_. A collocation system is solved + by a damped Newton method with an affine-invariant criterion function as + described in [3]_. + + Note that in [1]_ integral residuals are defined without normalization + by interval lengths. So, their definition is different by a multiplier of + h**0.5 (h is an interval length) from the definition used here. + + .. versionadded:: 0.18.0 + + References + ---------- + .. [1] J. Kierzenka, L. F. Shampine, "A BVP Solver Based on Residual + Control and the Maltab PSE", ACM Trans. Math. Softw., Vol. 27, + Number 3, pp. 299-316, 2001. + .. [2] L.F. Shampine, P. H. Muir and H. Xu, "A User-Friendly Fortran BVP + Solver". + .. [3] U. Ascher, R. Mattheij and R. Russell "Numerical Solution of + Boundary Value Problems for Ordinary Differential Equations". + .. [4] `Cauchy-Riemann equations + `_ on + Wikipedia. + + Examples + -------- + In the first example, we solve Bratu's problem:: + + y'' + k * exp(y) = 0 + y(0) = y(1) = 0 + + for k = 1. + + We rewrite the equation as a first-order system and implement its + right-hand side evaluation:: + + y1' = y2 + y2' = -exp(y1) + + >>> import numpy as np + >>> def fun(x, y): + ... return np.vstack((y[1], -np.exp(y[0]))) + + Implement evaluation of the boundary condition residuals: + + >>> def bc(ya, yb): + ... return np.array([ya[0], yb[0]]) + + Define the initial mesh with 5 nodes: + + >>> x = np.linspace(0, 1, 5) + + This problem is known to have two solutions. To obtain both of them, we + use two different initial guesses for y. We denote them by subscripts + a and b. + + >>> y_a = np.zeros((2, x.size)) + >>> y_b = np.zeros((2, x.size)) + >>> y_b[0] = 3 + + Now we are ready to run the solver. + + >>> from scipy.integrate import solve_bvp + >>> res_a = solve_bvp(fun, bc, x, y_a) + >>> res_b = solve_bvp(fun, bc, x, y_b) + + Let's plot the two found solutions. We take an advantage of having the + solution in a spline form to produce a smooth plot. + + >>> x_plot = np.linspace(0, 1, 100) + >>> y_plot_a = res_a.sol(x_plot)[0] + >>> y_plot_b = res_b.sol(x_plot)[0] + >>> import matplotlib.pyplot as plt + >>> plt.plot(x_plot, y_plot_a, label='y_a') + >>> plt.plot(x_plot, y_plot_b, label='y_b') + >>> plt.legend() + >>> plt.xlabel("x") + >>> plt.ylabel("y") + >>> plt.show() + + We see that the two solutions have similar shape, but differ in scale + significantly. + + In the second example, we solve a simple Sturm-Liouville problem:: + + y'' + k**2 * y = 0 + y(0) = y(1) = 0 + + It is known that a non-trivial solution y = A * sin(k * x) is possible for + k = pi * n, where n is an integer. To establish the normalization constant + A = 1 we add a boundary condition:: + + y'(0) = k + + Again, we rewrite our equation as a first-order system and implement its + right-hand side evaluation:: + + y1' = y2 + y2' = -k**2 * y1 + + >>> def fun(x, y, p): + ... k = p[0] + ... return np.vstack((y[1], -k**2 * y[0])) + + Note that parameters p are passed as a vector (with one element in our + case). + + Implement the boundary conditions: + + >>> def bc(ya, yb, p): + ... k = p[0] + ... return np.array([ya[0], yb[0], ya[1] - k]) + + Set up the initial mesh and guess for y. We aim to find the solution for + k = 2 * pi, to achieve that we set values of y to approximately follow + sin(2 * pi * x): + + >>> x = np.linspace(0, 1, 5) + >>> y = np.zeros((2, x.size)) + >>> y[0, 1] = 1 + >>> y[0, 3] = -1 + + Run the solver with 6 as an initial guess for k. + + >>> sol = solve_bvp(fun, bc, x, y, p=[6]) + + We see that the found k is approximately correct: + + >>> sol.p[0] + 6.28329460046 + + And, finally, plot the solution to see the anticipated sinusoid: + + >>> x_plot = np.linspace(0, 1, 100) + >>> y_plot = sol.sol(x_plot)[0] + >>> plt.plot(x_plot, y_plot) + >>> plt.xlabel("x") + >>> plt.ylabel("y") + >>> plt.show() + """ + x = np.asarray(x, dtype=float) + if x.ndim != 1: + raise ValueError("`x` must be 1 dimensional.") + h = np.diff(x) + if np.any(h <= 0): + raise ValueError("`x` must be strictly increasing.") + a = x[0] + + y = np.asarray(y) + if np.issubdtype(y.dtype, np.complexfloating): + dtype = complex + else: + dtype = float + y = y.astype(dtype, copy=False) + + if y.ndim != 2: + raise ValueError("`y` must be 2 dimensional.") + if y.shape[1] != x.shape[0]: + raise ValueError(f"`y` is expected to have {x.shape[0]} columns, but actually " + f"has {y.shape[1]}.") + + if p is None: + p = np.array([]) + else: + p = np.asarray(p, dtype=dtype) + if p.ndim != 1: + raise ValueError("`p` must be 1 dimensional.") + + if tol < 100 * EPS: + warn(f"`tol` is too low, setting to {100 * EPS:.2e}", stacklevel=2) + tol = 100 * EPS + + if verbose not in [0, 1, 2]: + raise ValueError("`verbose` must be in [0, 1, 2].") + + n = y.shape[0] + k = p.shape[0] + + if S is not None: + S = np.asarray(S, dtype=dtype) + if S.shape != (n, n): + raise ValueError(f"`S` is expected to have shape {(n, n)}, " + f"but actually has {S.shape}") + + # Compute I - S^+ S to impose necessary boundary conditions. + B = np.identity(n) - np.dot(pinv(S), S) + + y[:, 0] = np.dot(B, y[:, 0]) + + # Compute (I - S)^+ to correct derivatives at x=a. + D = pinv(np.identity(n) - S) + else: + B = None + D = None + + if bc_tol is None: + bc_tol = tol + + # Maximum number of iterations + max_iteration = 10 + + fun_wrapped, bc_wrapped, fun_jac_wrapped, bc_jac_wrapped = wrap_functions( + fun, bc, fun_jac, bc_jac, k, a, S, D, dtype) + + f = fun_wrapped(x, y, p) + if f.shape != y.shape: + raise ValueError(f"`fun` return is expected to have shape {y.shape}, " + f"but actually has {f.shape}.") + + bc_res = bc_wrapped(y[:, 0], y[:, -1], p) + if bc_res.shape != (n + k,): + raise ValueError(f"`bc` return is expected to have shape {(n + k,)}, " + f"but actually has {bc_res.shape}.") + + status = 0 + iteration = 0 + if verbose == 2: + print_iteration_header() + + while True: + m = x.shape[0] + + col_fun, jac_sys = prepare_sys(n, m, k, fun_wrapped, bc_wrapped, + fun_jac_wrapped, bc_jac_wrapped, x, h) + y, p, singular = solve_newton(n, m, h, col_fun, bc_wrapped, jac_sys, + y, p, B, tol, bc_tol) + iteration += 1 + + col_res, y_middle, f, f_middle = collocation_fun(fun_wrapped, y, + p, x, h) + bc_res = bc_wrapped(y[:, 0], y[:, -1], p) + max_bc_res = np.max(abs(bc_res)) + + # This relation is not trivial, but can be verified. + r_middle = 1.5 * col_res / h + sol = create_spline(y, f, x, h) + rms_res = estimate_rms_residuals(fun_wrapped, sol, x, h, p, + r_middle, f_middle) + max_rms_res = np.max(rms_res) + + if singular: + status = 2 + break + + insert_1, = np.nonzero((rms_res > tol) & (rms_res < 100 * tol)) + insert_2, = np.nonzero(rms_res >= 100 * tol) + nodes_added = insert_1.shape[0] + 2 * insert_2.shape[0] + + if m + nodes_added > max_nodes: + status = 1 + if verbose == 2: + nodes_added = f"({nodes_added})" + print_iteration_progress(iteration, max_rms_res, max_bc_res, + m, nodes_added) + break + + if verbose == 2: + print_iteration_progress(iteration, max_rms_res, max_bc_res, m, + nodes_added) + + if nodes_added > 0: + x = modify_mesh(x, insert_1, insert_2) + h = np.diff(x) + y = sol(x) + elif max_bc_res <= bc_tol: + status = 0 + break + elif iteration >= max_iteration: + status = 3 + break + + if verbose > 0: + if status == 0: + print(f"Solved in {iteration} iterations, number of nodes {x.shape[0]}. \n" + f"Maximum relative residual: {max_rms_res:.2e} \n" + f"Maximum boundary residual: {max_bc_res:.2e}") + elif status == 1: + print(f"Number of nodes is exceeded after iteration {iteration}. \n" + f"Maximum relative residual: {max_rms_res:.2e} \n" + f"Maximum boundary residual: {max_bc_res:.2e}") + elif status == 2: + print("Singular Jacobian encountered when solving the collocation " + f"system on iteration {iteration}. \n" + f"Maximum relative residual: {max_rms_res:.2e} \n" + f"Maximum boundary residual: {max_bc_res:.2e}") + elif status == 3: + print("The solver was unable to satisfy boundary conditions " + f"tolerance on iteration {iteration}. \n" + f"Maximum relative residual: {max_rms_res:.2e} \n" + f"Maximum boundary residual: {max_bc_res:.2e}") + + if p.size == 0: + p = None + + return BVPResult(sol=sol, p=p, x=x, y=y, yp=f, rms_residuals=rms_res, + niter=iteration, status=status, + message=TERMINATION_MESSAGES[status], success=status == 0) diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_cubature.py b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_cubature.py new file mode 100644 index 0000000000000000000000000000000000000000..3e6d8911d13eeaa2420ef65a12e9b4ba34400ca0 --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_cubature.py @@ -0,0 +1,728 @@ +import math +import heapq +import itertools + +from dataclasses import dataclass, field +from types import ModuleType +from typing import Any, TypeAlias + +from scipy._lib._array_api import ( + array_namespace, + xp_size, + xp_copy, + xp_broadcast_promote +) +from scipy._lib._util import MapWrapper + +from scipy.integrate._rules import ( + ProductNestedFixed, + GaussKronrodQuadrature, + GenzMalikCubature, +) +from scipy.integrate._rules._base import _split_subregion + +__all__ = ['cubature'] + +Array: TypeAlias = Any # To be changed to an array-api-typing Protocol later + + +@dataclass +class CubatureRegion: + estimate: Array + error: Array + a: Array + b: Array + _xp: ModuleType = field(repr=False) + + def __lt__(self, other): + # Consider regions with higher error estimates as being "less than" regions with + # lower order estimates, so that regions with high error estimates are placed at + # the top of the heap. + + this_err = self._xp.max(self._xp.abs(self.error)) + other_err = self._xp.max(self._xp.abs(other.error)) + + return this_err > other_err + + +@dataclass +class CubatureResult: + estimate: Array + error: Array + status: str + regions: list[CubatureRegion] + subdivisions: int + atol: float + rtol: float + + +def cubature(f, a, b, *, rule="gk21", rtol=1e-8, atol=0, max_subdivisions=10000, + args=(), workers=1, points=None): + r""" + Adaptive cubature of multidimensional array-valued function. + + Given an arbitrary integration rule, this function returns an estimate of the + integral to the requested tolerance over the region defined by the arrays `a` and + `b` specifying the corners of a hypercube. + + Convergence is not guaranteed for all integrals. + + Parameters + ---------- + f : callable + Function to integrate. `f` must have the signature:: + + f(x : ndarray, *args) -> ndarray + + `f` should accept arrays ``x`` of shape:: + + (npoints, ndim) + + and output arrays of shape:: + + (npoints, output_dim_1, ..., output_dim_n) + + In this case, `cubature` will return arrays of shape:: + + (output_dim_1, ..., output_dim_n) + a, b : array_like + Lower and upper limits of integration as 1D arrays specifying the left and right + endpoints of the intervals being integrated over. Limits can be infinite. + rule : str, optional + Rule used to estimate the integral. If passing a string, the options are + "gauss-kronrod" (21 node), or "genz-malik" (degree 7). If a rule like + "gauss-kronrod" is specified for an ``n``-dim integrand, the corresponding + Cartesian product rule is used. "gk21", "gk15" are also supported for + compatibility with `quad_vec`. See Notes. + rtol, atol : float, optional + Relative and absolute tolerances. Iterations are performed until the error is + estimated to be less than ``atol + rtol * abs(est)``. Here `rtol` controls + relative accuracy (number of correct digits), while `atol` controls absolute + accuracy (number of correct decimal places). To achieve the desired `rtol`, set + `atol` to be smaller than the smallest value that can be expected from + ``rtol * abs(y)`` so that `rtol` dominates the allowable error. If `atol` is + larger than ``rtol * abs(y)`` the number of correct digits is not guaranteed. + Conversely, to achieve the desired `atol`, set `rtol` such that + ``rtol * abs(y)`` is always smaller than `atol`. Default values are 1e-8 for + `rtol` and 0 for `atol`. + max_subdivisions : int, optional + Upper bound on the number of subdivisions to perform. Default is 10,000. + args : tuple, optional + Additional positional args passed to `f`, if any. + workers : int or map-like callable, optional + If `workers` is an integer, part of the computation is done in parallel + subdivided to this many tasks (using :class:`python:multiprocessing.pool.Pool`). + Supply `-1` to use all cores available to the Process. Alternatively, supply a + map-like callable, such as :meth:`python:multiprocessing.pool.Pool.map` for + evaluating the population in parallel. This evaluation is carried out as + ``workers(func, iterable)``. + points : list of array_like, optional + List of points to avoid evaluating `f` at, under the condition that the rule + being used does not evaluate `f` on the boundary of a region (which is the + case for all Genz-Malik and Gauss-Kronrod rules). This can be useful if `f` has + a singularity at the specified point. This should be a list of array-likes where + each element has length ``ndim``. Default is empty. See Examples. + + Returns + ------- + res : object + Object containing the results of the estimation. It has the following + attributes: + + estimate : ndarray + Estimate of the value of the integral over the overall region specified. + error : ndarray + Estimate of the error of the approximation over the overall region + specified. + status : str + Whether the estimation was successful. Can be either: "converged", + "not_converged". + subdivisions : int + Number of subdivisions performed. + atol, rtol : float + Requested tolerances for the approximation. + regions: list of object + List of objects containing the estimates of the integral over smaller + regions of the domain. + + Each object in ``regions`` has the following attributes: + + a, b : ndarray + Points describing the corners of the region. If the original integral + contained infinite limits or was over a region described by `region`, + then `a` and `b` are in the transformed coordinates. + estimate : ndarray + Estimate of the value of the integral over this region. + error : ndarray + Estimate of the error of the approximation over this region. + + Notes + ----- + The algorithm uses a similar algorithm to `quad_vec`, which itself is based on the + implementation of QUADPACK's DQAG* algorithms, implementing global error control and + adaptive subdivision. + + The source of the nodes and weights used for Gauss-Kronrod quadrature can be found + in [1]_, and the algorithm for calculating the nodes and weights in Genz-Malik + cubature can be found in [2]_. + + The rules currently supported via the `rule` argument are: + + - ``"gauss-kronrod"``, 21-node Gauss-Kronrod + - ``"genz-malik"``, n-node Genz-Malik + + If using Gauss-Kronrod for an ``n``-dim integrand where ``n > 2``, then the + corresponding Cartesian product rule will be found by taking the Cartesian product + of the nodes in the 1D case. This means that the number of nodes scales + exponentially as ``21^n`` in the Gauss-Kronrod case, which may be problematic in a + moderate number of dimensions. + + Genz-Malik is typically less accurate than Gauss-Kronrod but has much fewer nodes, + so in this situation using "genz-malik" might be preferable. + + Infinite limits are handled with an appropriate variable transformation. Assuming + ``a = [a_1, ..., a_n]`` and ``b = [b_1, ..., b_n]``: + + If :math:`a_i = -\infty` and :math:`b_i = \infty`, the i-th integration variable + will use the transformation :math:`x = \frac{1-|t|}{t}` and :math:`t \in (-1, 1)`. + + If :math:`a_i \ne \pm\infty` and :math:`b_i = \infty`, the i-th integration variable + will use the transformation :math:`x = a_i + \frac{1-t}{t}` and + :math:`t \in (0, 1)`. + + If :math:`a_i = -\infty` and :math:`b_i \ne \pm\infty`, the i-th integration + variable will use the transformation :math:`x = b_i - \frac{1-t}{t}` and + :math:`t \in (0, 1)`. + + References + ---------- + .. [1] R. Piessens, E. de Doncker, Quadpack: A Subroutine Package for Automatic + Integration, files: dqk21.f, dqk15.f (1983). + + .. [2] A.C. Genz, A.A. Malik, Remarks on algorithm 006: An adaptive algorithm for + numerical integration over an N-dimensional rectangular region, Journal of + Computational and Applied Mathematics, Volume 6, Issue 4, 1980, Pages 295-302, + ISSN 0377-0427 + :doi:`10.1016/0771-050X(80)90039-X` + + Examples + -------- + **1D integral with vector output**: + + .. math:: + + \int^1_0 \mathbf f(x) \text dx + + Where ``f(x) = x^n`` and ``n = np.arange(10)`` is a vector. Since no rule is + specified, the default "gk21" is used, which corresponds to Gauss-Kronrod + integration with 21 nodes. + + >>> import numpy as np + >>> from scipy.integrate import cubature + >>> def f(x, n): + ... # Make sure x and n are broadcastable + ... return x[:, np.newaxis]**n[np.newaxis, :] + >>> res = cubature( + ... f, + ... a=[0], + ... b=[1], + ... args=(np.arange(10),), + ... ) + >>> res.estimate + array([1. , 0.5 , 0.33333333, 0.25 , 0.2 , + 0.16666667, 0.14285714, 0.125 , 0.11111111, 0.1 ]) + + **7D integral with arbitrary-shaped array output**:: + + f(x) = cos(2*pi*r + alphas @ x) + + for some ``r`` and ``alphas``, and the integral is performed over the unit + hybercube, :math:`[0, 1]^7`. Since the integral is in a moderate number of + dimensions, "genz-malik" is used rather than the default "gauss-kronrod" to + avoid constructing a product rule with :math:`21^7 \approx 2 \times 10^9` nodes. + + >>> import numpy as np + >>> from scipy.integrate import cubature + >>> def f(x, r, alphas): + ... # f(x) = cos(2*pi*r + alphas @ x) + ... # Need to allow r and alphas to be arbitrary shape + ... npoints, ndim = x.shape[0], x.shape[-1] + ... alphas = alphas[np.newaxis, ...] + ... x = x.reshape(npoints, *([1]*(len(alphas.shape) - 1)), ndim) + ... return np.cos(2*np.pi*r + np.sum(alphas * x, axis=-1)) + >>> rng = np.random.default_rng() + >>> r, alphas = rng.random((2, 3)), rng.random((2, 3, 7)) + >>> res = cubature( + ... f=f, + ... a=np.array([0, 0, 0, 0, 0, 0, 0]), + ... b=np.array([1, 1, 1, 1, 1, 1, 1]), + ... rtol=1e-5, + ... rule="genz-malik", + ... args=(r, alphas), + ... ) + >>> res.estimate + array([[-0.79812452, 0.35246913, -0.52273628], + [ 0.88392779, 0.59139899, 0.41895111]]) + + **Parallel computation with** `workers`: + + >>> from concurrent.futures import ThreadPoolExecutor + >>> with ThreadPoolExecutor() as executor: + ... res = cubature( + ... f=f, + ... a=np.array([0, 0, 0, 0, 0, 0, 0]), + ... b=np.array([1, 1, 1, 1, 1, 1, 1]), + ... rtol=1e-5, + ... rule="genz-malik", + ... args=(r, alphas), + ... workers=executor.map, + ... ) + >>> res.estimate + array([[-0.79812452, 0.35246913, -0.52273628], + [ 0.88392779, 0.59139899, 0.41895111]]) + + **2D integral with infinite limits**: + + .. math:: + + \int^{ \infty }_{ -\infty } + \int^{ \infty }_{ -\infty } + e^{-x^2-y^2} + \text dy + \text dx + + >>> def gaussian(x): + ... return np.exp(-np.sum(x**2, axis=-1)) + >>> res = cubature(gaussian, [-np.inf, -np.inf], [np.inf, np.inf]) + >>> res.estimate + 3.1415926 + + **1D integral with singularities avoided using** `points`: + + .. math:: + + \int^{ 1 }_{ -1 } + \frac{\sin(x)}{x} + \text dx + + It is necessary to use the `points` parameter to avoid evaluating `f` at the origin. + + >>> def sinc(x): + ... return np.sin(x)/x + >>> res = cubature(sinc, [-1], [1], points=[[0]]) + >>> res.estimate + 1.8921661 + """ + + # It is also possible to use a custom rule, but this is not yet part of the public + # API. An example of this can be found in the class scipy.integrate._rules.Rule. + + xp = array_namespace(a, b) + max_subdivisions = float("inf") if max_subdivisions is None else max_subdivisions + points = [] if points is None else points + + # Convert a and b to arrays and convert each point in points to an array, promoting + # each to a common floating dtype. + a, b, *points = xp_broadcast_promote(a, b, *points, force_floating=True) + result_dtype = a.dtype + + if xp_size(a) == 0 or xp_size(b) == 0: + raise ValueError("`a` and `b` must be nonempty") + + if a.ndim != 1 or b.ndim != 1: + raise ValueError("`a` and `b` must be 1D arrays") + + # If the rule is a string, convert to a corresponding product rule + if isinstance(rule, str): + ndim = xp_size(a) + + if rule == "genz-malik": + rule = GenzMalikCubature(ndim, xp=xp) + else: + quadratues = { + "gauss-kronrod": GaussKronrodQuadrature(21, xp=xp), + + # Also allow names quad_vec uses: + "gk21": GaussKronrodQuadrature(21, xp=xp), + "gk15": GaussKronrodQuadrature(15, xp=xp), + } + + base_rule = quadratues.get(rule) + + if base_rule is None: + raise ValueError(f"unknown rule {rule}") + + rule = ProductNestedFixed([base_rule] * ndim) + + # If any of limits are the wrong way around (a > b), flip them and keep track of + # the sign. + sign = (-1) ** xp.sum(xp.astype(a > b, xp.int8), dtype=result_dtype) + + a_flipped = xp.min(xp.stack([a, b]), axis=0) + b_flipped = xp.max(xp.stack([a, b]), axis=0) + + a, b = a_flipped, b_flipped + + # If any of the limits are infinite, apply a transformation + if xp.any(xp.isinf(a)) or xp.any(xp.isinf(b)): + f = _InfiniteLimitsTransform(f, a, b, xp=xp) + a, b = f.transformed_limits + + # Map points from the original coordinates to the new transformed coordinates. + # + # `points` is a list of arrays of shape (ndim,), but transformations are applied + # to arrays of shape (npoints, ndim). + # + # It is not possible to combine all the points into one array and then apply + # f.inv to all of them at once since `points` needs to remain iterable. + # Instead, each point is reshaped to an array of shape (1, ndim), `f.inv` is + # applied, and then each is reshaped back to (ndim,). + points = [xp.reshape(point, (1, -1)) for point in points] + points = [f.inv(point) for point in points] + points = [xp.reshape(point, (-1,)) for point in points] + + # Include any problematic points introduced by the transformation + points.extend(f.points) + + # If any problematic points are specified, divide the initial region so that these + # points lie on the edge of a subregion. + # + # This means ``f`` won't be evaluated there if the rule being used has no evaluation + # points on the boundary. + if len(points) == 0: + initial_regions = [(a, b)] + else: + initial_regions = _split_region_at_points(a, b, points, xp) + + regions = [] + est = 0.0 + err = 0.0 + + for a_k, b_k in initial_regions: + est_k = rule.estimate(f, a_k, b_k, args) + err_k = rule.estimate_error(f, a_k, b_k, args) + regions.append(CubatureRegion(est_k, err_k, a_k, b_k, xp)) + + est += est_k + err += err_k + + subdivisions = 0 + success = True + + with MapWrapper(workers) as mapwrapper: + while xp.any(err > atol + rtol * xp.abs(est)): + # region_k is the region with highest estimated error + region_k = heapq.heappop(regions) + + est_k = region_k.estimate + err_k = region_k.error + + a_k, b_k = region_k.a, region_k.b + + # Subtract the estimate of the integral and its error over this region from + # the current global estimates, since these will be refined in the loop over + # all subregions. + est -= est_k + err -= err_k + + # Find all 2^ndim subregions formed by splitting region_k along each axis, + # e.g. for 1D integrals this splits an estimate over an interval into an + # estimate over two subintervals, for 3D integrals this splits an estimate + # over a cube into 8 subcubes. + # + # For each of the new subregions, calculate an estimate for the integral and + # the error there, and push these regions onto the heap for potential + # further subdividing. + + executor_args = zip( + itertools.repeat(f), + itertools.repeat(rule), + itertools.repeat(args), + _split_subregion(a_k, b_k, xp), + ) + + for subdivision_result in mapwrapper(_process_subregion, executor_args): + a_k_sub, b_k_sub, est_sub, err_sub = subdivision_result + + est += est_sub + err += err_sub + + new_region = CubatureRegion(est_sub, err_sub, a_k_sub, b_k_sub, xp) + + heapq.heappush(regions, new_region) + + subdivisions += 1 + + if subdivisions >= max_subdivisions: + success = False + break + + status = "converged" if success else "not_converged" + + # Apply sign change to handle any limits which were initially flipped. + est = sign * est + + return CubatureResult( + estimate=est, + error=err, + status=status, + subdivisions=subdivisions, + regions=regions, + atol=atol, + rtol=rtol, + ) + + +def _process_subregion(data): + f, rule, args, coord = data + a_k_sub, b_k_sub = coord + + est_sub = rule.estimate(f, a_k_sub, b_k_sub, args) + err_sub = rule.estimate_error(f, a_k_sub, b_k_sub, args) + + return a_k_sub, b_k_sub, est_sub, err_sub + + +def _is_strictly_in_region(a, b, point, xp): + if xp.all(point == a) or xp.all(point == b): + return False + + return xp.all(a <= point) and xp.all(point <= b) + + +def _split_region_at_points(a, b, points, xp): + """ + Given the integration limits `a` and `b` describing a rectangular region and a list + of `points`, find the list of ``[(a_1, b_1), ..., (a_l, b_l)]`` which breaks up the + initial region into smaller subregion such that no `points` lie strictly inside + any of the subregions. + """ + + regions = [(a, b)] + + for point in points: + if xp.any(xp.isinf(point)): + # If a point is specified at infinity, ignore. + # + # This case occurs when points are given by the user to avoid, but after + # applying a transformation, they are removed. + continue + + new_subregions = [] + + for a_k, b_k in regions: + if _is_strictly_in_region(a_k, b_k, point, xp): + subregions = _split_subregion(a_k, b_k, xp, point) + + for left, right in subregions: + # Skip any zero-width regions. + if xp.any(left == right): + continue + else: + new_subregions.append((left, right)) + + new_subregions.extend(subregions) + + else: + new_subregions.append((a_k, b_k)) + + regions = new_subregions + + return regions + + +class _VariableTransform: + """ + A transformation that can be applied to an integral. + """ + + @property + def transformed_limits(self): + """ + New limits of integration after applying the transformation. + """ + + raise NotImplementedError + + @property + def points(self): + """ + Any problematic points introduced by the transformation. + + These should be specified as points where ``_VariableTransform(f)(self, point)`` + would be problematic. + + For example, if the transformation ``x = 1/((1-t)(1+t))`` is applied to a + univariate integral, then points should return ``[ [1], [-1] ]``. + """ + + return [] + + def inv(self, x): + """ + Map points ``x`` to ``t`` such that if ``f`` is the original function and ``g`` + is the function after the transformation is applied, then:: + + f(x) = g(self.inv(x)) + """ + + raise NotImplementedError + + def __call__(self, t, *args, **kwargs): + """ + Apply the transformation to ``f`` and multiply by the Jacobian determinant. + This should be the new integrand after the transformation has been applied so + that the following is satisfied:: + + f_transformed = _VariableTransform(f) + + cubature(f, a, b) == cubature( + f_transformed, + *f_transformed.transformed_limits(a, b), + ) + """ + + raise NotImplementedError + + +class _InfiniteLimitsTransform(_VariableTransform): + r""" + Transformation for handling infinite limits. + + Assuming ``a = [a_1, ..., a_n]`` and ``b = [b_1, ..., b_n]``: + + If :math:`a_i = -\infty` and :math:`b_i = \infty`, the i-th integration variable + will use the transformation :math:`x = \frac{1-|t|}{t}` and :math:`t \in (-1, 1)`. + + If :math:`a_i \ne \pm\infty` and :math:`b_i = \infty`, the i-th integration variable + will use the transformation :math:`x = a_i + \frac{1-t}{t}` and + :math:`t \in (0, 1)`. + + If :math:`a_i = -\infty` and :math:`b_i \ne \pm\infty`, the i-th integration + variable will use the transformation :math:`x = b_i - \frac{1-t}{t}` and + :math:`t \in (0, 1)`. + """ + + def __init__(self, f, a, b, xp): + self._xp = xp + + self._f = f + self._orig_a = a + self._orig_b = b + + # (-oo, oo) will be mapped to (-1, 1). + self._double_inf_pos = (a == -math.inf) & (b == math.inf) + + # (start, oo) will be mapped to (0, 1). + start_inf_mask = (a != -math.inf) & (b == math.inf) + + # (-oo, end) will be mapped to (0, 1). + inf_end_mask = (a == -math.inf) & (b != math.inf) + + # This is handled by making the transformation t = -x and reducing it to + # the other semi-infinite case. + self._semi_inf_pos = start_inf_mask | inf_end_mask + + # Since we flip the limits, we don't need to separately multiply the + # integrand by -1. + self._orig_a[inf_end_mask] = -b[inf_end_mask] + self._orig_b[inf_end_mask] = -a[inf_end_mask] + + self._num_inf = self._xp.sum( + self._xp.astype(self._double_inf_pos | self._semi_inf_pos, self._xp.int64), + ).__int__() + + @property + def transformed_limits(self): + a = xp_copy(self._orig_a) + b = xp_copy(self._orig_b) + + a[self._double_inf_pos] = -1 + b[self._double_inf_pos] = 1 + + a[self._semi_inf_pos] = 0 + b[self._semi_inf_pos] = 1 + + return a, b + + @property + def points(self): + # If there are infinite limits, then the origin becomes a problematic point + # due to a division by zero there. + + # If the function using this class only wraps f when a and b contain infinite + # limits, this condition will always be met (as is the case with cubature). + # + # If a and b do not contain infinite limits but f is still wrapped with this + # class, then without this condition the initial region of integration will + # be split around the origin unnecessarily. + if self._num_inf != 0: + return [self._xp.zeros(self._orig_a.shape)] + else: + return [] + + def inv(self, x): + t = xp_copy(x) + npoints = x.shape[0] + + double_inf_mask = self._xp.tile( + self._double_inf_pos[self._xp.newaxis, :], + (npoints, 1), + ) + + semi_inf_mask = self._xp.tile( + self._semi_inf_pos[self._xp.newaxis, :], + (npoints, 1), + ) + + # If any components of x are 0, then this component will be mapped to infinity + # under the transformation used for doubly-infinite limits. + # + # Handle the zero values and non-zero values separately to avoid division by + # zero. + zero_mask = x[double_inf_mask] == 0 + non_zero_mask = double_inf_mask & ~zero_mask + t[zero_mask] = math.inf + t[non_zero_mask] = 1/(x[non_zero_mask] + self._xp.sign(x[non_zero_mask])) + + start = self._xp.tile(self._orig_a[self._semi_inf_pos], (npoints,)) + t[semi_inf_mask] = 1/(x[semi_inf_mask] - start + 1) + + return t + + def __call__(self, t, *args, **kwargs): + x = xp_copy(t) + npoints = t.shape[0] + + double_inf_mask = self._xp.tile( + self._double_inf_pos[self._xp.newaxis, :], + (npoints, 1), + ) + + semi_inf_mask = self._xp.tile( + self._semi_inf_pos[self._xp.newaxis, :], + (npoints, 1), + ) + + # For (-oo, oo) -> (-1, 1), use the transformation x = (1-|t|)/t. + x[double_inf_mask] = ( + (1 - self._xp.abs(t[double_inf_mask])) / t[double_inf_mask] + ) + + start = self._xp.tile(self._orig_a[self._semi_inf_pos], (npoints,)) + + # For (start, oo) -> (0, 1), use the transformation x = start + (1-t)/t. + x[semi_inf_mask] = start + (1 - t[semi_inf_mask]) / t[semi_inf_mask] + + jacobian_det = 1/self._xp.prod( + self._xp.reshape( + t[semi_inf_mask | double_inf_mask]**2, + (-1, self._num_inf), + ), + axis=-1, + ) + + f_x = self._f(x, *args, **kwargs) + jacobian_det = self._xp.reshape(jacobian_det, (-1, *([1]*(len(f_x.shape) - 1)))) + + return f_x * jacobian_det diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_dop.cpython-310-x86_64-linux-gnu.so b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_dop.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..4694829833847eb7498a34b96ac0a679ee2e277d --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_dop.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9d2ae0102227169f215e6fcea4a55e39b893876371e913db713a6b426ddc1304 +size 116977 diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/__init__.py b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f3c8aaa36588651ae5e48b58fbb1d443bc71fc77 --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/__init__.py @@ -0,0 +1,8 @@ +"""Suite of ODE solvers implemented in Python.""" +from .ivp import solve_ivp +from .rk import RK23, RK45, DOP853 +from .radau import Radau +from .bdf import BDF +from .lsoda import LSODA +from .common import OdeSolution +from .base import DenseOutput, OdeSolver diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/__init__.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ba4cc1f1b7e4e9b227a5862f8a27feed18385024 Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/base.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4f9679efe359f3816f2e553da3f068b24d11f680 Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/base.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/bdf.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/bdf.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6d00634afff4faf1716fab591c2284d76df2c50a Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/bdf.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/common.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fad27e7afd239265a979f1d4e6ef6510ec512ea4 Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/common.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/dop853_coefficients.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/dop853_coefficients.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b53fcacf6f05395e62a87152ab4b880426b3bc8f Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/dop853_coefficients.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/ivp.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/ivp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d8def4431686f1c5494fae7e59d3379ad6b8fa07 Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/ivp.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/lsoda.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/lsoda.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ce2a43aafd70738df3ccf0a50ec7a01ccb883f8d Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/lsoda.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/radau.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/radau.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e1ee3bf943d294947c94ed6e61a91d68410bdfb4 Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/radau.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/rk.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/rk.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d44de7355081cc5ff5e2f929068d76f7c138c06d Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/__pycache__/rk.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/base.py b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/base.py new file mode 100644 index 0000000000000000000000000000000000000000..46db9a69dfb3e7aee5c150ac6795234cd455dfe5 --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/base.py @@ -0,0 +1,290 @@ +import numpy as np + + +def check_arguments(fun, y0, support_complex): + """Helper function for checking arguments common to all solvers.""" + y0 = np.asarray(y0) + if np.issubdtype(y0.dtype, np.complexfloating): + if not support_complex: + raise ValueError("`y0` is complex, but the chosen solver does " + "not support integration in a complex domain.") + dtype = complex + else: + dtype = float + y0 = y0.astype(dtype, copy=False) + + if y0.ndim != 1: + raise ValueError("`y0` must be 1-dimensional.") + + if not np.isfinite(y0).all(): + raise ValueError("All components of the initial state `y0` must be finite.") + + def fun_wrapped(t, y): + return np.asarray(fun(t, y), dtype=dtype) + + return fun_wrapped, y0 + + +class OdeSolver: + """Base class for ODE solvers. + + In order to implement a new solver you need to follow the guidelines: + + 1. A constructor must accept parameters presented in the base class + (listed below) along with any other parameters specific to a solver. + 2. A constructor must accept arbitrary extraneous arguments + ``**extraneous``, but warn that these arguments are irrelevant + using `common.warn_extraneous` function. Do not pass these + arguments to the base class. + 3. A solver must implement a private method `_step_impl(self)` which + propagates a solver one step further. It must return tuple + ``(success, message)``, where ``success`` is a boolean indicating + whether a step was successful, and ``message`` is a string + containing description of a failure if a step failed or None + otherwise. + 4. A solver must implement a private method `_dense_output_impl(self)`, + which returns a `DenseOutput` object covering the last successful + step. + 5. A solver must have attributes listed below in Attributes section. + Note that ``t_old`` and ``step_size`` are updated automatically. + 6. Use `fun(self, t, y)` method for the system rhs evaluation, this + way the number of function evaluations (`nfev`) will be tracked + automatically. + 7. For convenience, a base class provides `fun_single(self, t, y)` and + `fun_vectorized(self, t, y)` for evaluating the rhs in + non-vectorized and vectorized fashions respectively (regardless of + how `fun` from the constructor is implemented). These calls don't + increment `nfev`. + 8. If a solver uses a Jacobian matrix and LU decompositions, it should + track the number of Jacobian evaluations (`njev`) and the number of + LU decompositions (`nlu`). + 9. By convention, the function evaluations used to compute a finite + difference approximation of the Jacobian should not be counted in + `nfev`, thus use `fun_single(self, t, y)` or + `fun_vectorized(self, t, y)` when computing a finite difference + approximation of the Jacobian. + + Parameters + ---------- + fun : callable + Right-hand side of the system: the time derivative of the state ``y`` + at time ``t``. The calling signature is ``fun(t, y)``, where ``t`` is a + scalar and ``y`` is an ndarray with ``len(y) = len(y0)``. ``fun`` must + return an array of the same shape as ``y``. See `vectorized` for more + information. + t0 : float + Initial time. + y0 : array_like, shape (n,) + Initial state. + t_bound : float + Boundary time --- the integration won't continue beyond it. It also + determines the direction of the integration. + vectorized : bool + Whether `fun` can be called in a vectorized fashion. Default is False. + + If ``vectorized`` is False, `fun` will always be called with ``y`` of + shape ``(n,)``, where ``n = len(y0)``. + + If ``vectorized`` is True, `fun` may be called with ``y`` of shape + ``(n, k)``, where ``k`` is an integer. In this case, `fun` must behave + such that ``fun(t, y)[:, i] == fun(t, y[:, i])`` (i.e. each column of + the returned array is the time derivative of the state corresponding + with a column of ``y``). + + Setting ``vectorized=True`` allows for faster finite difference + approximation of the Jacobian by methods 'Radau' and 'BDF', but + will result in slower execution for other methods. It can also + result in slower overall execution for 'Radau' and 'BDF' in some + circumstances (e.g. small ``len(y0)``). + support_complex : bool, optional + Whether integration in a complex domain should be supported. + Generally determined by a derived solver class capabilities. + Default is False. + + Attributes + ---------- + n : int + Number of equations. + status : string + Current status of the solver: 'running', 'finished' or 'failed'. + t_bound : float + Boundary time. + direction : float + Integration direction: +1 or -1. + t : float + Current time. + y : ndarray + Current state. + t_old : float + Previous time. None if no steps were made yet. + step_size : float + Size of the last successful step. None if no steps were made yet. + nfev : int + Number of the system's rhs evaluations. + njev : int + Number of the Jacobian evaluations. + nlu : int + Number of LU decompositions. + """ + TOO_SMALL_STEP = "Required step size is less than spacing between numbers." + + def __init__(self, fun, t0, y0, t_bound, vectorized, + support_complex=False): + self.t_old = None + self.t = t0 + self._fun, self.y = check_arguments(fun, y0, support_complex) + self.t_bound = t_bound + self.vectorized = vectorized + + if vectorized: + def fun_single(t, y): + return self._fun(t, y[:, None]).ravel() + fun_vectorized = self._fun + else: + fun_single = self._fun + + def fun_vectorized(t, y): + f = np.empty_like(y) + for i, yi in enumerate(y.T): + f[:, i] = self._fun(t, yi) + return f + + def fun(t, y): + self.nfev += 1 + return self.fun_single(t, y) + + self.fun = fun + self.fun_single = fun_single + self.fun_vectorized = fun_vectorized + + self.direction = np.sign(t_bound - t0) if t_bound != t0 else 1 + self.n = self.y.size + self.status = 'running' + + self.nfev = 0 + self.njev = 0 + self.nlu = 0 + + @property + def step_size(self): + if self.t_old is None: + return None + else: + return np.abs(self.t - self.t_old) + + def step(self): + """Perform one integration step. + + Returns + ------- + message : string or None + Report from the solver. Typically a reason for a failure if + `self.status` is 'failed' after the step was taken or None + otherwise. + """ + if self.status != 'running': + raise RuntimeError("Attempt to step on a failed or finished " + "solver.") + + if self.n == 0 or self.t == self.t_bound: + # Handle corner cases of empty solver or no integration. + self.t_old = self.t + self.t = self.t_bound + message = None + self.status = 'finished' + else: + t = self.t + success, message = self._step_impl() + + if not success: + self.status = 'failed' + else: + self.t_old = t + if self.direction * (self.t - self.t_bound) >= 0: + self.status = 'finished' + + return message + + def dense_output(self): + """Compute a local interpolant over the last successful step. + + Returns + ------- + sol : `DenseOutput` + Local interpolant over the last successful step. + """ + if self.t_old is None: + raise RuntimeError("Dense output is available after a successful " + "step was made.") + + if self.n == 0 or self.t == self.t_old: + # Handle corner cases of empty solver and no integration. + return ConstantDenseOutput(self.t_old, self.t, self.y) + else: + return self._dense_output_impl() + + def _step_impl(self): + raise NotImplementedError + + def _dense_output_impl(self): + raise NotImplementedError + + +class DenseOutput: + """Base class for local interpolant over step made by an ODE solver. + + It interpolates between `t_min` and `t_max` (see Attributes below). + Evaluation outside this interval is not forbidden, but the accuracy is not + guaranteed. + + Attributes + ---------- + t_min, t_max : float + Time range of the interpolation. + """ + def __init__(self, t_old, t): + self.t_old = t_old + self.t = t + self.t_min = min(t, t_old) + self.t_max = max(t, t_old) + + def __call__(self, t): + """Evaluate the interpolant. + + Parameters + ---------- + t : float or array_like with shape (n_points,) + Points to evaluate the solution at. + + Returns + ------- + y : ndarray, shape (n,) or (n, n_points) + Computed values. Shape depends on whether `t` was a scalar or a + 1-D array. + """ + t = np.asarray(t) + if t.ndim > 1: + raise ValueError("`t` must be a float or a 1-D array.") + return self._call_impl(t) + + def _call_impl(self, t): + raise NotImplementedError + + +class ConstantDenseOutput(DenseOutput): + """Constant value interpolator. + + This class used for degenerate integration cases: equal integration limits + or a system with 0 equations. + """ + def __init__(self, t_old, t, value): + super().__init__(t_old, t) + self.value = value + + def _call_impl(self, t): + if t.ndim == 0: + return self.value + else: + ret = np.empty((self.value.shape[0], t.shape[0])) + ret[:] = self.value[:, None] + return ret diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/bdf.py b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/bdf.py new file mode 100644 index 0000000000000000000000000000000000000000..33b47a642b976e623edc9047f6465e328095dcd2 --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/bdf.py @@ -0,0 +1,478 @@ +import numpy as np +from scipy.linalg import lu_factor, lu_solve +from scipy.sparse import issparse, csc_matrix, eye +from scipy.sparse.linalg import splu +from scipy.optimize._numdiff import group_columns +from .common import (validate_max_step, validate_tol, select_initial_step, + norm, EPS, num_jac, validate_first_step, + warn_extraneous) +from .base import OdeSolver, DenseOutput + + +MAX_ORDER = 5 +NEWTON_MAXITER = 4 +MIN_FACTOR = 0.2 +MAX_FACTOR = 10 + + +def compute_R(order, factor): + """Compute the matrix for changing the differences array.""" + I = np.arange(1, order + 1)[:, None] + J = np.arange(1, order + 1) + M = np.zeros((order + 1, order + 1)) + M[1:, 1:] = (I - 1 - factor * J) / I + M[0] = 1 + return np.cumprod(M, axis=0) + + +def change_D(D, order, factor): + """Change differences array in-place when step size is changed.""" + R = compute_R(order, factor) + U = compute_R(order, 1) + RU = R.dot(U) + D[:order + 1] = np.dot(RU.T, D[:order + 1]) + + +def solve_bdf_system(fun, t_new, y_predict, c, psi, LU, solve_lu, scale, tol): + """Solve the algebraic system resulting from BDF method.""" + d = 0 + y = y_predict.copy() + dy_norm_old = None + converged = False + for k in range(NEWTON_MAXITER): + f = fun(t_new, y) + if not np.all(np.isfinite(f)): + break + + dy = solve_lu(LU, c * f - psi - d) + dy_norm = norm(dy / scale) + + if dy_norm_old is None: + rate = None + else: + rate = dy_norm / dy_norm_old + + if (rate is not None and (rate >= 1 or + rate ** (NEWTON_MAXITER - k) / (1 - rate) * dy_norm > tol)): + break + + y += dy + d += dy + + if (dy_norm == 0 or + rate is not None and rate / (1 - rate) * dy_norm < tol): + converged = True + break + + dy_norm_old = dy_norm + + return converged, k + 1, y, d + + +class BDF(OdeSolver): + """Implicit method based on backward-differentiation formulas. + + This is a variable order method with the order varying automatically from + 1 to 5. The general framework of the BDF algorithm is described in [1]_. + This class implements a quasi-constant step size as explained in [2]_. + The error estimation strategy for the constant-step BDF is derived in [3]_. + An accuracy enhancement using modified formulas (NDF) [2]_ is also implemented. + + Can be applied in the complex domain. + + Parameters + ---------- + fun : callable + Right-hand side of the system: the time derivative of the state ``y`` + at time ``t``. The calling signature is ``fun(t, y)``, where ``t`` is a + scalar and ``y`` is an ndarray with ``len(y) = len(y0)``. ``fun`` must + return an array of the same shape as ``y``. See `vectorized` for more + information. + t0 : float + Initial time. + y0 : array_like, shape (n,) + Initial state. + t_bound : float + Boundary time - the integration won't continue beyond it. It also + determines the direction of the integration. + first_step : float or None, optional + Initial step size. Default is ``None`` which means that the algorithm + should choose. + max_step : float, optional + Maximum allowed step size. Default is np.inf, i.e., the step size is not + bounded and determined solely by the solver. + rtol, atol : float and array_like, optional + Relative and absolute tolerances. The solver keeps the local error + estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a + relative accuracy (number of correct digits), while `atol` controls + absolute accuracy (number of correct decimal places). To achieve the + desired `rtol`, set `atol` to be smaller than the smallest value that + can be expected from ``rtol * abs(y)`` so that `rtol` dominates the + allowable error. If `atol` is larger than ``rtol * abs(y)`` the + number of correct digits is not guaranteed. Conversely, to achieve the + desired `atol` set `rtol` such that ``rtol * abs(y)`` is always smaller + than `atol`. If components of y have different scales, it might be + beneficial to set different `atol` values for different components by + passing array_like with shape (n,) for `atol`. Default values are + 1e-3 for `rtol` and 1e-6 for `atol`. + jac : {None, array_like, sparse_matrix, callable}, optional + Jacobian matrix of the right-hand side of the system with respect to y, + required by this method. The Jacobian matrix has shape (n, n) and its + element (i, j) is equal to ``d f_i / d y_j``. + There are three ways to define the Jacobian: + + * If array_like or sparse_matrix, the Jacobian is assumed to + be constant. + * If callable, the Jacobian is assumed to depend on both + t and y; it will be called as ``jac(t, y)`` as necessary. + For the 'Radau' and 'BDF' methods, the return value might be a + sparse matrix. + * If None (default), the Jacobian will be approximated by + finite differences. + + It is generally recommended to provide the Jacobian rather than + relying on a finite-difference approximation. + jac_sparsity : {None, array_like, sparse matrix}, optional + Defines a sparsity structure of the Jacobian matrix for a + finite-difference approximation. Its shape must be (n, n). This argument + is ignored if `jac` is not `None`. If the Jacobian has only few non-zero + elements in *each* row, providing the sparsity structure will greatly + speed up the computations [4]_. A zero entry means that a corresponding + element in the Jacobian is always zero. If None (default), the Jacobian + is assumed to be dense. + vectorized : bool, optional + Whether `fun` can be called in a vectorized fashion. Default is False. + + If ``vectorized`` is False, `fun` will always be called with ``y`` of + shape ``(n,)``, where ``n = len(y0)``. + + If ``vectorized`` is True, `fun` may be called with ``y`` of shape + ``(n, k)``, where ``k`` is an integer. In this case, `fun` must behave + such that ``fun(t, y)[:, i] == fun(t, y[:, i])`` (i.e. each column of + the returned array is the time derivative of the state corresponding + with a column of ``y``). + + Setting ``vectorized=True`` allows for faster finite difference + approximation of the Jacobian by this method, but may result in slower + execution overall in some circumstances (e.g. small ``len(y0)``). + + Attributes + ---------- + n : int + Number of equations. + status : string + Current status of the solver: 'running', 'finished' or 'failed'. + t_bound : float + Boundary time. + direction : float + Integration direction: +1 or -1. + t : float + Current time. + y : ndarray + Current state. + t_old : float + Previous time. None if no steps were made yet. + step_size : float + Size of the last successful step. None if no steps were made yet. + nfev : int + Number of evaluations of the right-hand side. + njev : int + Number of evaluations of the Jacobian. + nlu : int + Number of LU decompositions. + + References + ---------- + .. [1] G. D. Byrne, A. C. Hindmarsh, "A Polyalgorithm for the Numerical + Solution of Ordinary Differential Equations", ACM Transactions on + Mathematical Software, Vol. 1, No. 1, pp. 71-96, March 1975. + .. [2] L. F. Shampine, M. W. Reichelt, "THE MATLAB ODE SUITE", SIAM J. SCI. + COMPUTE., Vol. 18, No. 1, pp. 1-22, January 1997. + .. [3] E. Hairer, G. Wanner, "Solving Ordinary Differential Equations I: + Nonstiff Problems", Sec. III.2. + .. [4] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of + sparse Jacobian matrices", Journal of the Institute of Mathematics + and its Applications, 13, pp. 117-120, 1974. + """ + def __init__(self, fun, t0, y0, t_bound, max_step=np.inf, + rtol=1e-3, atol=1e-6, jac=None, jac_sparsity=None, + vectorized=False, first_step=None, **extraneous): + warn_extraneous(extraneous) + super().__init__(fun, t0, y0, t_bound, vectorized, + support_complex=True) + self.max_step = validate_max_step(max_step) + self.rtol, self.atol = validate_tol(rtol, atol, self.n) + f = self.fun(self.t, self.y) + if first_step is None: + self.h_abs = select_initial_step(self.fun, self.t, self.y, + t_bound, max_step, f, + self.direction, 1, + self.rtol, self.atol) + else: + self.h_abs = validate_first_step(first_step, t0, t_bound) + self.h_abs_old = None + self.error_norm_old = None + + self.newton_tol = max(10 * EPS / rtol, min(0.03, rtol ** 0.5)) + + self.jac_factor = None + self.jac, self.J = self._validate_jac(jac, jac_sparsity) + if issparse(self.J): + def lu(A): + self.nlu += 1 + return splu(A) + + def solve_lu(LU, b): + return LU.solve(b) + + I = eye(self.n, format='csc', dtype=self.y.dtype) + else: + def lu(A): + self.nlu += 1 + return lu_factor(A, overwrite_a=True) + + def solve_lu(LU, b): + return lu_solve(LU, b, overwrite_b=True) + + I = np.identity(self.n, dtype=self.y.dtype) + + self.lu = lu + self.solve_lu = solve_lu + self.I = I + + kappa = np.array([0, -0.1850, -1/9, -0.0823, -0.0415, 0]) + self.gamma = np.hstack((0, np.cumsum(1 / np.arange(1, MAX_ORDER + 1)))) + self.alpha = (1 - kappa) * self.gamma + self.error_const = kappa * self.gamma + 1 / np.arange(1, MAX_ORDER + 2) + + D = np.empty((MAX_ORDER + 3, self.n), dtype=self.y.dtype) + D[0] = self.y + D[1] = f * self.h_abs * self.direction + self.D = D + + self.order = 1 + self.n_equal_steps = 0 + self.LU = None + + def _validate_jac(self, jac, sparsity): + t0 = self.t + y0 = self.y + + if jac is None: + if sparsity is not None: + if issparse(sparsity): + sparsity = csc_matrix(sparsity) + groups = group_columns(sparsity) + sparsity = (sparsity, groups) + + def jac_wrapped(t, y): + self.njev += 1 + f = self.fun_single(t, y) + J, self.jac_factor = num_jac(self.fun_vectorized, t, y, f, + self.atol, self.jac_factor, + sparsity) + return J + J = jac_wrapped(t0, y0) + elif callable(jac): + J = jac(t0, y0) + self.njev += 1 + if issparse(J): + J = csc_matrix(J, dtype=y0.dtype) + + def jac_wrapped(t, y): + self.njev += 1 + return csc_matrix(jac(t, y), dtype=y0.dtype) + else: + J = np.asarray(J, dtype=y0.dtype) + + def jac_wrapped(t, y): + self.njev += 1 + return np.asarray(jac(t, y), dtype=y0.dtype) + + if J.shape != (self.n, self.n): + raise ValueError(f"`jac` is expected to have shape {(self.n, self.n)}," + f" but actually has {J.shape}.") + else: + if issparse(jac): + J = csc_matrix(jac, dtype=y0.dtype) + else: + J = np.asarray(jac, dtype=y0.dtype) + + if J.shape != (self.n, self.n): + raise ValueError(f"`jac` is expected to have shape {(self.n, self.n)}," + f" but actually has {J.shape}.") + jac_wrapped = None + + return jac_wrapped, J + + def _step_impl(self): + t = self.t + D = self.D + + max_step = self.max_step + min_step = 10 * np.abs(np.nextafter(t, self.direction * np.inf) - t) + if self.h_abs > max_step: + h_abs = max_step + change_D(D, self.order, max_step / self.h_abs) + self.n_equal_steps = 0 + elif self.h_abs < min_step: + h_abs = min_step + change_D(D, self.order, min_step / self.h_abs) + self.n_equal_steps = 0 + else: + h_abs = self.h_abs + + atol = self.atol + rtol = self.rtol + order = self.order + + alpha = self.alpha + gamma = self.gamma + error_const = self.error_const + + J = self.J + LU = self.LU + current_jac = self.jac is None + + step_accepted = False + while not step_accepted: + if h_abs < min_step: + return False, self.TOO_SMALL_STEP + + h = h_abs * self.direction + t_new = t + h + + if self.direction * (t_new - self.t_bound) > 0: + t_new = self.t_bound + change_D(D, order, np.abs(t_new - t) / h_abs) + self.n_equal_steps = 0 + LU = None + + h = t_new - t + h_abs = np.abs(h) + + y_predict = np.sum(D[:order + 1], axis=0) + + scale = atol + rtol * np.abs(y_predict) + psi = np.dot(D[1: order + 1].T, gamma[1: order + 1]) / alpha[order] + + converged = False + c = h / alpha[order] + while not converged: + if LU is None: + LU = self.lu(self.I - c * J) + + converged, n_iter, y_new, d = solve_bdf_system( + self.fun, t_new, y_predict, c, psi, LU, self.solve_lu, + scale, self.newton_tol) + + if not converged: + if current_jac: + break + J = self.jac(t_new, y_predict) + LU = None + current_jac = True + + if not converged: + factor = 0.5 + h_abs *= factor + change_D(D, order, factor) + self.n_equal_steps = 0 + LU = None + continue + + safety = 0.9 * (2 * NEWTON_MAXITER + 1) / (2 * NEWTON_MAXITER + + n_iter) + + scale = atol + rtol * np.abs(y_new) + error = error_const[order] * d + error_norm = norm(error / scale) + + if error_norm > 1: + factor = max(MIN_FACTOR, + safety * error_norm ** (-1 / (order + 1))) + h_abs *= factor + change_D(D, order, factor) + self.n_equal_steps = 0 + # As we didn't have problems with convergence, we don't + # reset LU here. + else: + step_accepted = True + + self.n_equal_steps += 1 + + self.t = t_new + self.y = y_new + + self.h_abs = h_abs + self.J = J + self.LU = LU + + # Update differences. The principal relation here is + # D^{j + 1} y_n = D^{j} y_n - D^{j} y_{n - 1}. Keep in mind that D + # contained difference for previous interpolating polynomial and + # d = D^{k + 1} y_n. Thus this elegant code follows. + D[order + 2] = d - D[order + 1] + D[order + 1] = d + for i in reversed(range(order + 1)): + D[i] += D[i + 1] + + if self.n_equal_steps < order + 1: + return True, None + + if order > 1: + error_m = error_const[order - 1] * D[order] + error_m_norm = norm(error_m / scale) + else: + error_m_norm = np.inf + + if order < MAX_ORDER: + error_p = error_const[order + 1] * D[order + 2] + error_p_norm = norm(error_p / scale) + else: + error_p_norm = np.inf + + error_norms = np.array([error_m_norm, error_norm, error_p_norm]) + with np.errstate(divide='ignore'): + factors = error_norms ** (-1 / np.arange(order, order + 3)) + + delta_order = np.argmax(factors) - 1 + order += delta_order + self.order = order + + factor = min(MAX_FACTOR, safety * np.max(factors)) + self.h_abs *= factor + change_D(D, order, factor) + self.n_equal_steps = 0 + self.LU = None + + return True, None + + def _dense_output_impl(self): + return BdfDenseOutput(self.t_old, self.t, self.h_abs * self.direction, + self.order, self.D[:self.order + 1].copy()) + + +class BdfDenseOutput(DenseOutput): + def __init__(self, t_old, t, h, order, D): + super().__init__(t_old, t) + self.order = order + self.t_shift = self.t - h * np.arange(self.order) + self.denom = h * (1 + np.arange(self.order)) + self.D = D + + def _call_impl(self, t): + if t.ndim == 0: + x = (t - self.t_shift) / self.denom + p = np.cumprod(x) + else: + x = (t - self.t_shift[:, None]) / self.denom[:, None] + p = np.cumprod(x, axis=0) + + y = np.dot(self.D[1:].T, p) + if y.ndim == 1: + y += self.D[0] + else: + y += self.D[0, :, None] + + return y diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/common.py b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/common.py new file mode 100644 index 0000000000000000000000000000000000000000..82f54bc0637bd8a69cb9e6f06ae56483462b46f6 --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/common.py @@ -0,0 +1,451 @@ +from itertools import groupby +from warnings import warn +import numpy as np +from scipy.sparse import find, coo_matrix + + +EPS = np.finfo(float).eps + + +def validate_first_step(first_step, t0, t_bound): + """Assert that first_step is valid and return it.""" + if first_step <= 0: + raise ValueError("`first_step` must be positive.") + if first_step > np.abs(t_bound - t0): + raise ValueError("`first_step` exceeds bounds.") + return first_step + + +def validate_max_step(max_step): + """Assert that max_Step is valid and return it.""" + if max_step <= 0: + raise ValueError("`max_step` must be positive.") + return max_step + + +def warn_extraneous(extraneous): + """Display a warning for extraneous keyword arguments. + + The initializer of each solver class is expected to collect keyword + arguments that it doesn't understand and warn about them. This function + prints a warning for each key in the supplied dictionary. + + Parameters + ---------- + extraneous : dict + Extraneous keyword arguments + """ + if extraneous: + warn("The following arguments have no effect for a chosen solver: " + f"{', '.join(f'`{x}`' for x in extraneous)}.", + stacklevel=3) + + +def validate_tol(rtol, atol, n): + """Validate tolerance values.""" + + if np.any(rtol < 100 * EPS): + warn("At least one element of `rtol` is too small. " + f"Setting `rtol = np.maximum(rtol, {100 * EPS})`.", + stacklevel=3) + rtol = np.maximum(rtol, 100 * EPS) + + atol = np.asarray(atol) + if atol.ndim > 0 and atol.shape != (n,): + raise ValueError("`atol` has wrong shape.") + + if np.any(atol < 0): + raise ValueError("`atol` must be positive.") + + return rtol, atol + + +def norm(x): + """Compute RMS norm.""" + return np.linalg.norm(x) / x.size ** 0.5 + + +def select_initial_step(fun, t0, y0, t_bound, + max_step, f0, direction, order, rtol, atol): + """Empirically select a good initial step. + + The algorithm is described in [1]_. + + Parameters + ---------- + fun : callable + Right-hand side of the system. + t0 : float + Initial value of the independent variable. + y0 : ndarray, shape (n,) + Initial value of the dependent variable. + t_bound : float + End-point of integration interval; used to ensure that t0+step<=tbound + and that fun is only evaluated in the interval [t0,tbound] + max_step : float + Maximum allowable step size. + f0 : ndarray, shape (n,) + Initial value of the derivative, i.e., ``fun(t0, y0)``. + direction : float + Integration direction. + order : float + Error estimator order. It means that the error controlled by the + algorithm is proportional to ``step_size ** (order + 1)`. + rtol : float + Desired relative tolerance. + atol : float + Desired absolute tolerance. + + Returns + ------- + h_abs : float + Absolute value of the suggested initial step. + + References + ---------- + .. [1] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential + Equations I: Nonstiff Problems", Sec. II.4. + """ + if y0.size == 0: + return np.inf + + interval_length = abs(t_bound - t0) + if interval_length == 0.0: + return 0.0 + + scale = atol + np.abs(y0) * rtol + d0 = norm(y0 / scale) + d1 = norm(f0 / scale) + if d0 < 1e-5 or d1 < 1e-5: + h0 = 1e-6 + else: + h0 = 0.01 * d0 / d1 + # Check t0+h0*direction doesn't take us beyond t_bound + h0 = min(h0, interval_length) + y1 = y0 + h0 * direction * f0 + f1 = fun(t0 + h0 * direction, y1) + d2 = norm((f1 - f0) / scale) / h0 + + if d1 <= 1e-15 and d2 <= 1e-15: + h1 = max(1e-6, h0 * 1e-3) + else: + h1 = (0.01 / max(d1, d2)) ** (1 / (order + 1)) + + return min(100 * h0, h1, interval_length, max_step) + + +class OdeSolution: + """Continuous ODE solution. + + It is organized as a collection of `DenseOutput` objects which represent + local interpolants. It provides an algorithm to select a right interpolant + for each given point. + + The interpolants cover the range between `t_min` and `t_max` (see + Attributes below). Evaluation outside this interval is not forbidden, but + the accuracy is not guaranteed. + + When evaluating at a breakpoint (one of the values in `ts`) a segment with + the lower index is selected. + + Parameters + ---------- + ts : array_like, shape (n_segments + 1,) + Time instants between which local interpolants are defined. Must + be strictly increasing or decreasing (zero segment with two points is + also allowed). + interpolants : list of DenseOutput with n_segments elements + Local interpolants. An i-th interpolant is assumed to be defined + between ``ts[i]`` and ``ts[i + 1]``. + alt_segment : boolean + Requests the alternative interpolant segment selection scheme. At each + solver integration point, two interpolant segments are available. The + default (False) and alternative (True) behaviours select the segment + for which the requested time corresponded to ``t`` and ``t_old``, + respectively. This functionality is only relevant for testing the + interpolants' accuracy: different integrators use different + construction strategies. + + Attributes + ---------- + t_min, t_max : float + Time range of the interpolation. + """ + def __init__(self, ts, interpolants, alt_segment=False): + ts = np.asarray(ts) + d = np.diff(ts) + # The first case covers integration on zero segment. + if not ((ts.size == 2 and ts[0] == ts[-1]) + or np.all(d > 0) or np.all(d < 0)): + raise ValueError("`ts` must be strictly increasing or decreasing.") + + self.n_segments = len(interpolants) + if ts.shape != (self.n_segments + 1,): + raise ValueError("Numbers of time stamps and interpolants " + "don't match.") + + self.ts = ts + self.interpolants = interpolants + if ts[-1] >= ts[0]: + self.t_min = ts[0] + self.t_max = ts[-1] + self.ascending = True + self.side = "right" if alt_segment else "left" + self.ts_sorted = ts + else: + self.t_min = ts[-1] + self.t_max = ts[0] + self.ascending = False + self.side = "left" if alt_segment else "right" + self.ts_sorted = ts[::-1] + + def _call_single(self, t): + # Here we preserve a certain symmetry that when t is in self.ts, + # if alt_segment=False, then we prioritize a segment with a lower + # index. + ind = np.searchsorted(self.ts_sorted, t, side=self.side) + + segment = min(max(ind - 1, 0), self.n_segments - 1) + if not self.ascending: + segment = self.n_segments - 1 - segment + + return self.interpolants[segment](t) + + def __call__(self, t): + """Evaluate the solution. + + Parameters + ---------- + t : float or array_like with shape (n_points,) + Points to evaluate at. + + Returns + ------- + y : ndarray, shape (n_states,) or (n_states, n_points) + Computed values. Shape depends on whether `t` is a scalar or a + 1-D array. + """ + t = np.asarray(t) + + if t.ndim == 0: + return self._call_single(t) + + order = np.argsort(t) + reverse = np.empty_like(order) + reverse[order] = np.arange(order.shape[0]) + t_sorted = t[order] + + # See comment in self._call_single. + segments = np.searchsorted(self.ts_sorted, t_sorted, side=self.side) + segments -= 1 + segments[segments < 0] = 0 + segments[segments > self.n_segments - 1] = self.n_segments - 1 + if not self.ascending: + segments = self.n_segments - 1 - segments + + ys = [] + group_start = 0 + for segment, group in groupby(segments): + group_end = group_start + len(list(group)) + y = self.interpolants[segment](t_sorted[group_start:group_end]) + ys.append(y) + group_start = group_end + + ys = np.hstack(ys) + ys = ys[:, reverse] + + return ys + + +NUM_JAC_DIFF_REJECT = EPS ** 0.875 +NUM_JAC_DIFF_SMALL = EPS ** 0.75 +NUM_JAC_DIFF_BIG = EPS ** 0.25 +NUM_JAC_MIN_FACTOR = 1e3 * EPS +NUM_JAC_FACTOR_INCREASE = 10 +NUM_JAC_FACTOR_DECREASE = 0.1 + + +def num_jac(fun, t, y, f, threshold, factor, sparsity=None): + """Finite differences Jacobian approximation tailored for ODE solvers. + + This function computes finite difference approximation to the Jacobian + matrix of `fun` with respect to `y` using forward differences. + The Jacobian matrix has shape (n, n) and its element (i, j) is equal to + ``d f_i / d y_j``. + + A special feature of this function is the ability to correct the step + size from iteration to iteration. The main idea is to keep the finite + difference significantly separated from its round-off error which + approximately equals ``EPS * np.abs(f)``. It reduces a possibility of a + huge error and assures that the estimated derivative are reasonably close + to the true values (i.e., the finite difference approximation is at least + qualitatively reflects the structure of the true Jacobian). + + Parameters + ---------- + fun : callable + Right-hand side of the system implemented in a vectorized fashion. + t : float + Current time. + y : ndarray, shape (n,) + Current state. + f : ndarray, shape (n,) + Value of the right hand side at (t, y). + threshold : float + Threshold for `y` value used for computing the step size as + ``factor * np.maximum(np.abs(y), threshold)``. Typically, the value of + absolute tolerance (atol) for a solver should be passed as `threshold`. + factor : ndarray with shape (n,) or None + Factor to use for computing the step size. Pass None for the very + evaluation, then use the value returned from this function. + sparsity : tuple (structure, groups) or None + Sparsity structure of the Jacobian, `structure` must be csc_matrix. + + Returns + ------- + J : ndarray or csc_matrix, shape (n, n) + Jacobian matrix. + factor : ndarray, shape (n,) + Suggested `factor` for the next evaluation. + """ + y = np.asarray(y) + n = y.shape[0] + if n == 0: + return np.empty((0, 0)), factor + + if factor is None: + factor = np.full(n, EPS ** 0.5) + else: + factor = factor.copy() + + # Direct the step as ODE dictates, hoping that such a step won't lead to + # a problematic region. For complex ODEs it makes sense to use the real + # part of f as we use steps along real axis. + f_sign = 2 * (np.real(f) >= 0).astype(float) - 1 + y_scale = f_sign * np.maximum(threshold, np.abs(y)) + h = (y + factor * y_scale) - y + + # Make sure that the step is not 0 to start with. Not likely it will be + # executed often. + for i in np.nonzero(h == 0)[0]: + while h[i] == 0: + factor[i] *= 10 + h[i] = (y[i] + factor[i] * y_scale[i]) - y[i] + + if sparsity is None: + return _dense_num_jac(fun, t, y, f, h, factor, y_scale) + else: + structure, groups = sparsity + return _sparse_num_jac(fun, t, y, f, h, factor, y_scale, + structure, groups) + + +def _dense_num_jac(fun, t, y, f, h, factor, y_scale): + n = y.shape[0] + h_vecs = np.diag(h) + f_new = fun(t, y[:, None] + h_vecs) + diff = f_new - f[:, None] + max_ind = np.argmax(np.abs(diff), axis=0) + r = np.arange(n) + max_diff = np.abs(diff[max_ind, r]) + scale = np.maximum(np.abs(f[max_ind]), np.abs(f_new[max_ind, r])) + + diff_too_small = max_diff < NUM_JAC_DIFF_REJECT * scale + if np.any(diff_too_small): + ind, = np.nonzero(diff_too_small) + new_factor = NUM_JAC_FACTOR_INCREASE * factor[ind] + h_new = (y[ind] + new_factor * y_scale[ind]) - y[ind] + h_vecs[ind, ind] = h_new + f_new = fun(t, y[:, None] + h_vecs[:, ind]) + diff_new = f_new - f[:, None] + max_ind = np.argmax(np.abs(diff_new), axis=0) + r = np.arange(ind.shape[0]) + max_diff_new = np.abs(diff_new[max_ind, r]) + scale_new = np.maximum(np.abs(f[max_ind]), np.abs(f_new[max_ind, r])) + + update = max_diff[ind] * scale_new < max_diff_new * scale[ind] + if np.any(update): + update, = np.nonzero(update) + update_ind = ind[update] + factor[update_ind] = new_factor[update] + h[update_ind] = h_new[update] + diff[:, update_ind] = diff_new[:, update] + scale[update_ind] = scale_new[update] + max_diff[update_ind] = max_diff_new[update] + + diff /= h + + factor[max_diff < NUM_JAC_DIFF_SMALL * scale] *= NUM_JAC_FACTOR_INCREASE + factor[max_diff > NUM_JAC_DIFF_BIG * scale] *= NUM_JAC_FACTOR_DECREASE + factor = np.maximum(factor, NUM_JAC_MIN_FACTOR) + + return diff, factor + + +def _sparse_num_jac(fun, t, y, f, h, factor, y_scale, structure, groups): + n = y.shape[0] + n_groups = np.max(groups) + 1 + h_vecs = np.empty((n_groups, n)) + for group in range(n_groups): + e = np.equal(group, groups) + h_vecs[group] = h * e + h_vecs = h_vecs.T + + f_new = fun(t, y[:, None] + h_vecs) + df = f_new - f[:, None] + + i, j, _ = find(structure) + diff = coo_matrix((df[i, groups[j]], (i, j)), shape=(n, n)).tocsc() + max_ind = np.array(abs(diff).argmax(axis=0)).ravel() + r = np.arange(n) + max_diff = np.asarray(np.abs(diff[max_ind, r])).ravel() + scale = np.maximum(np.abs(f[max_ind]), + np.abs(f_new[max_ind, groups[r]])) + + diff_too_small = max_diff < NUM_JAC_DIFF_REJECT * scale + if np.any(diff_too_small): + ind, = np.nonzero(diff_too_small) + new_factor = NUM_JAC_FACTOR_INCREASE * factor[ind] + h_new = (y[ind] + new_factor * y_scale[ind]) - y[ind] + h_new_all = np.zeros(n) + h_new_all[ind] = h_new + + groups_unique = np.unique(groups[ind]) + groups_map = np.empty(n_groups, dtype=int) + h_vecs = np.empty((groups_unique.shape[0], n)) + for k, group in enumerate(groups_unique): + e = np.equal(group, groups) + h_vecs[k] = h_new_all * e + groups_map[group] = k + h_vecs = h_vecs.T + + f_new = fun(t, y[:, None] + h_vecs) + df = f_new - f[:, None] + i, j, _ = find(structure[:, ind]) + diff_new = coo_matrix((df[i, groups_map[groups[ind[j]]]], + (i, j)), shape=(n, ind.shape[0])).tocsc() + + max_ind_new = np.array(abs(diff_new).argmax(axis=0)).ravel() + r = np.arange(ind.shape[0]) + max_diff_new = np.asarray(np.abs(diff_new[max_ind_new, r])).ravel() + scale_new = np.maximum( + np.abs(f[max_ind_new]), + np.abs(f_new[max_ind_new, groups_map[groups[ind]]])) + + update = max_diff[ind] * scale_new < max_diff_new * scale[ind] + if np.any(update): + update, = np.nonzero(update) + update_ind = ind[update] + factor[update_ind] = new_factor[update] + h[update_ind] = h_new[update] + diff[:, update_ind] = diff_new[:, update] + scale[update_ind] = scale_new[update] + max_diff[update_ind] = max_diff_new[update] + + diff.data /= np.repeat(h, np.diff(diff.indptr)) + + factor[max_diff < NUM_JAC_DIFF_SMALL * scale] *= NUM_JAC_FACTOR_INCREASE + factor[max_diff > NUM_JAC_DIFF_BIG * scale] *= NUM_JAC_FACTOR_DECREASE + factor = np.maximum(factor, NUM_JAC_MIN_FACTOR) + + return diff, factor diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/dop853_coefficients.py b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/dop853_coefficients.py new file mode 100644 index 0000000000000000000000000000000000000000..f39f2f3650d321e2c475d4e220f9769139118a5e --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/dop853_coefficients.py @@ -0,0 +1,193 @@ +import numpy as np + +N_STAGES = 12 +N_STAGES_EXTENDED = 16 +INTERPOLATOR_POWER = 7 + +C = np.array([0.0, + 0.526001519587677318785587544488e-01, + 0.789002279381515978178381316732e-01, + 0.118350341907227396726757197510, + 0.281649658092772603273242802490, + 0.333333333333333333333333333333, + 0.25, + 0.307692307692307692307692307692, + 0.651282051282051282051282051282, + 0.6, + 0.857142857142857142857142857142, + 1.0, + 1.0, + 0.1, + 0.2, + 0.777777777777777777777777777778]) + +A = np.zeros((N_STAGES_EXTENDED, N_STAGES_EXTENDED)) +A[1, 0] = 5.26001519587677318785587544488e-2 + +A[2, 0] = 1.97250569845378994544595329183e-2 +A[2, 1] = 5.91751709536136983633785987549e-2 + +A[3, 0] = 2.95875854768068491816892993775e-2 +A[3, 2] = 8.87627564304205475450678981324e-2 + +A[4, 0] = 2.41365134159266685502369798665e-1 +A[4, 2] = -8.84549479328286085344864962717e-1 +A[4, 3] = 9.24834003261792003115737966543e-1 + +A[5, 0] = 3.7037037037037037037037037037e-2 +A[5, 3] = 1.70828608729473871279604482173e-1 +A[5, 4] = 1.25467687566822425016691814123e-1 + +A[6, 0] = 3.7109375e-2 +A[6, 3] = 1.70252211019544039314978060272e-1 +A[6, 4] = 6.02165389804559606850219397283e-2 +A[6, 5] = -1.7578125e-2 + +A[7, 0] = 3.70920001185047927108779319836e-2 +A[7, 3] = 1.70383925712239993810214054705e-1 +A[7, 4] = 1.07262030446373284651809199168e-1 +A[7, 5] = -1.53194377486244017527936158236e-2 +A[7, 6] = 8.27378916381402288758473766002e-3 + +A[8, 0] = 6.24110958716075717114429577812e-1 +A[8, 3] = -3.36089262944694129406857109825 +A[8, 4] = -8.68219346841726006818189891453e-1 +A[8, 5] = 2.75920996994467083049415600797e1 +A[8, 6] = 2.01540675504778934086186788979e1 +A[8, 7] = -4.34898841810699588477366255144e1 + +A[9, 0] = 4.77662536438264365890433908527e-1 +A[9, 3] = -2.48811461997166764192642586468 +A[9, 4] = -5.90290826836842996371446475743e-1 +A[9, 5] = 2.12300514481811942347288949897e1 +A[9, 6] = 1.52792336328824235832596922938e1 +A[9, 7] = -3.32882109689848629194453265587e1 +A[9, 8] = -2.03312017085086261358222928593e-2 + +A[10, 0] = -9.3714243008598732571704021658e-1 +A[10, 3] = 5.18637242884406370830023853209 +A[10, 4] = 1.09143734899672957818500254654 +A[10, 5] = -8.14978701074692612513997267357 +A[10, 6] = -1.85200656599969598641566180701e1 +A[10, 7] = 2.27394870993505042818970056734e1 +A[10, 8] = 2.49360555267965238987089396762 +A[10, 9] = -3.0467644718982195003823669022 + +A[11, 0] = 2.27331014751653820792359768449 +A[11, 3] = -1.05344954667372501984066689879e1 +A[11, 4] = -2.00087205822486249909675718444 +A[11, 5] = -1.79589318631187989172765950534e1 +A[11, 6] = 2.79488845294199600508499808837e1 +A[11, 7] = -2.85899827713502369474065508674 +A[11, 8] = -8.87285693353062954433549289258 +A[11, 9] = 1.23605671757943030647266201528e1 +A[11, 10] = 6.43392746015763530355970484046e-1 + +A[12, 0] = 5.42937341165687622380535766363e-2 +A[12, 5] = 4.45031289275240888144113950566 +A[12, 6] = 1.89151789931450038304281599044 +A[12, 7] = -5.8012039600105847814672114227 +A[12, 8] = 3.1116436695781989440891606237e-1 +A[12, 9] = -1.52160949662516078556178806805e-1 +A[12, 10] = 2.01365400804030348374776537501e-1 +A[12, 11] = 4.47106157277725905176885569043e-2 + +A[13, 0] = 5.61675022830479523392909219681e-2 +A[13, 6] = 2.53500210216624811088794765333e-1 +A[13, 7] = -2.46239037470802489917441475441e-1 +A[13, 8] = -1.24191423263816360469010140626e-1 +A[13, 9] = 1.5329179827876569731206322685e-1 +A[13, 10] = 8.20105229563468988491666602057e-3 +A[13, 11] = 7.56789766054569976138603589584e-3 +A[13, 12] = -8.298e-3 + +A[14, 0] = 3.18346481635021405060768473261e-2 +A[14, 5] = 2.83009096723667755288322961402e-2 +A[14, 6] = 5.35419883074385676223797384372e-2 +A[14, 7] = -5.49237485713909884646569340306e-2 +A[14, 10] = -1.08347328697249322858509316994e-4 +A[14, 11] = 3.82571090835658412954920192323e-4 +A[14, 12] = -3.40465008687404560802977114492e-4 +A[14, 13] = 1.41312443674632500278074618366e-1 + +A[15, 0] = -4.28896301583791923408573538692e-1 +A[15, 5] = -4.69762141536116384314449447206 +A[15, 6] = 7.68342119606259904184240953878 +A[15, 7] = 4.06898981839711007970213554331 +A[15, 8] = 3.56727187455281109270669543021e-1 +A[15, 12] = -1.39902416515901462129418009734e-3 +A[15, 13] = 2.9475147891527723389556272149 +A[15, 14] = -9.15095847217987001081870187138 + + +B = A[N_STAGES, :N_STAGES] + +E3 = np.zeros(N_STAGES + 1) +E3[:-1] = B.copy() +E3[0] -= 0.244094488188976377952755905512 +E3[8] -= 0.733846688281611857341361741547 +E3[11] -= 0.220588235294117647058823529412e-1 + +E5 = np.zeros(N_STAGES + 1) +E5[0] = 0.1312004499419488073250102996e-1 +E5[5] = -0.1225156446376204440720569753e+1 +E5[6] = -0.4957589496572501915214079952 +E5[7] = 0.1664377182454986536961530415e+1 +E5[8] = -0.3503288487499736816886487290 +E5[9] = 0.3341791187130174790297318841 +E5[10] = 0.8192320648511571246570742613e-1 +E5[11] = -0.2235530786388629525884427845e-1 + +# First 3 coefficients are computed separately. +D = np.zeros((INTERPOLATOR_POWER - 3, N_STAGES_EXTENDED)) +D[0, 0] = -0.84289382761090128651353491142e+1 +D[0, 5] = 0.56671495351937776962531783590 +D[0, 6] = -0.30689499459498916912797304727e+1 +D[0, 7] = 0.23846676565120698287728149680e+1 +D[0, 8] = 0.21170345824450282767155149946e+1 +D[0, 9] = -0.87139158377797299206789907490 +D[0, 10] = 0.22404374302607882758541771650e+1 +D[0, 11] = 0.63157877876946881815570249290 +D[0, 12] = -0.88990336451333310820698117400e-1 +D[0, 13] = 0.18148505520854727256656404962e+2 +D[0, 14] = -0.91946323924783554000451984436e+1 +D[0, 15] = -0.44360363875948939664310572000e+1 + +D[1, 0] = 0.10427508642579134603413151009e+2 +D[1, 5] = 0.24228349177525818288430175319e+3 +D[1, 6] = 0.16520045171727028198505394887e+3 +D[1, 7] = -0.37454675472269020279518312152e+3 +D[1, 8] = -0.22113666853125306036270938578e+2 +D[1, 9] = 0.77334326684722638389603898808e+1 +D[1, 10] = -0.30674084731089398182061213626e+2 +D[1, 11] = -0.93321305264302278729567221706e+1 +D[1, 12] = 0.15697238121770843886131091075e+2 +D[1, 13] = -0.31139403219565177677282850411e+2 +D[1, 14] = -0.93529243588444783865713862664e+1 +D[1, 15] = 0.35816841486394083752465898540e+2 + +D[2, 0] = 0.19985053242002433820987653617e+2 +D[2, 5] = -0.38703730874935176555105901742e+3 +D[2, 6] = -0.18917813819516756882830838328e+3 +D[2, 7] = 0.52780815920542364900561016686e+3 +D[2, 8] = -0.11573902539959630126141871134e+2 +D[2, 9] = 0.68812326946963000169666922661e+1 +D[2, 10] = -0.10006050966910838403183860980e+1 +D[2, 11] = 0.77771377980534432092869265740 +D[2, 12] = -0.27782057523535084065932004339e+1 +D[2, 13] = -0.60196695231264120758267380846e+2 +D[2, 14] = 0.84320405506677161018159903784e+2 +D[2, 15] = 0.11992291136182789328035130030e+2 + +D[3, 0] = -0.25693933462703749003312586129e+2 +D[3, 5] = -0.15418974869023643374053993627e+3 +D[3, 6] = -0.23152937917604549567536039109e+3 +D[3, 7] = 0.35763911791061412378285349910e+3 +D[3, 8] = 0.93405324183624310003907691704e+2 +D[3, 9] = -0.37458323136451633156875139351e+2 +D[3, 10] = 0.10409964950896230045147246184e+3 +D[3, 11] = 0.29840293426660503123344363579e+2 +D[3, 12] = -0.43533456590011143754432175058e+2 +D[3, 13] = 0.96324553959188282948394950600e+2 +D[3, 14] = -0.39177261675615439165231486172e+2 +D[3, 15] = -0.14972683625798562581422125276e+3 diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/ivp.py b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/ivp.py new file mode 100644 index 0000000000000000000000000000000000000000..0ac43f2169c17c230ee6b45b4fa7f301b6d810ef --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/ivp.py @@ -0,0 +1,748 @@ +import inspect +import numpy as np +from .bdf import BDF +from .radau import Radau +from .rk import RK23, RK45, DOP853 +from .lsoda import LSODA +from scipy.optimize import OptimizeResult +from .common import EPS, OdeSolution +from .base import OdeSolver + + +METHODS = {'RK23': RK23, + 'RK45': RK45, + 'DOP853': DOP853, + 'Radau': Radau, + 'BDF': BDF, + 'LSODA': LSODA} + + +MESSAGES = {0: "The solver successfully reached the end of the integration interval.", + 1: "A termination event occurred."} + + +class OdeResult(OptimizeResult): + pass + + +def prepare_events(events): + """Standardize event functions and extract attributes.""" + if callable(events): + events = (events,) + + max_events = np.empty(len(events)) + direction = np.empty(len(events)) + for i, event in enumerate(events): + terminal = getattr(event, 'terminal', None) + direction[i] = getattr(event, 'direction', 0) + + message = ('The `terminal` attribute of each event ' + 'must be a boolean or positive integer.') + if terminal is None or terminal == 0: + max_events[i] = np.inf + elif int(terminal) == terminal and terminal > 0: + max_events[i] = terminal + else: + raise ValueError(message) + + return events, max_events, direction + + +def solve_event_equation(event, sol, t_old, t): + """Solve an equation corresponding to an ODE event. + + The equation is ``event(t, y(t)) = 0``, here ``y(t)`` is known from an + ODE solver using some sort of interpolation. It is solved by + `scipy.optimize.brentq` with xtol=atol=4*EPS. + + Parameters + ---------- + event : callable + Function ``event(t, y)``. + sol : callable + Function ``sol(t)`` which evaluates an ODE solution between `t_old` + and `t`. + t_old, t : float + Previous and new values of time. They will be used as a bracketing + interval. + + Returns + ------- + root : float + Found solution. + """ + from scipy.optimize import brentq + return brentq(lambda t: event(t, sol(t)), t_old, t, + xtol=4 * EPS, rtol=4 * EPS) + + +def handle_events(sol, events, active_events, event_count, max_events, + t_old, t): + """Helper function to handle events. + + Parameters + ---------- + sol : DenseOutput + Function ``sol(t)`` which evaluates an ODE solution between `t_old` + and `t`. + events : list of callables, length n_events + Event functions with signatures ``event(t, y)``. + active_events : ndarray + Indices of events which occurred. + event_count : ndarray + Current number of occurrences for each event. + max_events : ndarray, shape (n_events,) + Number of occurrences allowed for each event before integration + termination is issued. + t_old, t : float + Previous and new values of time. + + Returns + ------- + root_indices : ndarray + Indices of events which take zero between `t_old` and `t` and before + a possible termination. + roots : ndarray + Values of t at which events occurred. + terminate : bool + Whether a terminal event occurred. + """ + roots = [solve_event_equation(events[event_index], sol, t_old, t) + for event_index in active_events] + + roots = np.asarray(roots) + + if np.any(event_count[active_events] >= max_events[active_events]): + if t > t_old: + order = np.argsort(roots) + else: + order = np.argsort(-roots) + active_events = active_events[order] + roots = roots[order] + t = np.nonzero(event_count[active_events] + >= max_events[active_events])[0][0] + active_events = active_events[:t + 1] + roots = roots[:t + 1] + terminate = True + else: + terminate = False + + return active_events, roots, terminate + + +def find_active_events(g, g_new, direction): + """Find which event occurred during an integration step. + + Parameters + ---------- + g, g_new : array_like, shape (n_events,) + Values of event functions at a current and next points. + direction : ndarray, shape (n_events,) + Event "direction" according to the definition in `solve_ivp`. + + Returns + ------- + active_events : ndarray + Indices of events which occurred during the step. + """ + g, g_new = np.asarray(g), np.asarray(g_new) + up = (g <= 0) & (g_new >= 0) + down = (g >= 0) & (g_new <= 0) + either = up | down + mask = (up & (direction > 0) | + down & (direction < 0) | + either & (direction == 0)) + + return np.nonzero(mask)[0] + + +def solve_ivp(fun, t_span, y0, method='RK45', t_eval=None, dense_output=False, + events=None, vectorized=False, args=None, **options): + """Solve an initial value problem for a system of ODEs. + + This function numerically integrates a system of ordinary differential + equations given an initial value:: + + dy / dt = f(t, y) + y(t0) = y0 + + Here t is a 1-D independent variable (time), y(t) is an + N-D vector-valued function (state), and an N-D + vector-valued function f(t, y) determines the differential equations. + The goal is to find y(t) approximately satisfying the differential + equations, given an initial value y(t0)=y0. + + Some of the solvers support integration in the complex domain, but note + that for stiff ODE solvers, the right-hand side must be + complex-differentiable (satisfy Cauchy-Riemann equations [11]_). + To solve a problem in the complex domain, pass y0 with a complex data type. + Another option always available is to rewrite your problem for real and + imaginary parts separately. + + Parameters + ---------- + fun : callable + Right-hand side of the system: the time derivative of the state ``y`` + at time ``t``. The calling signature is ``fun(t, y)``, where ``t`` is a + scalar and ``y`` is an ndarray with ``len(y) = len(y0)``. Additional + arguments need to be passed if ``args`` is used (see documentation of + ``args`` argument). ``fun`` must return an array of the same shape as + ``y``. See `vectorized` for more information. + t_span : 2-member sequence + Interval of integration (t0, tf). The solver starts with t=t0 and + integrates until it reaches t=tf. Both t0 and tf must be floats + or values interpretable by the float conversion function. + y0 : array_like, shape (n,) + Initial state. For problems in the complex domain, pass `y0` with a + complex data type (even if the initial value is purely real). + method : string or `OdeSolver`, optional + Integration method to use: + + * 'RK45' (default): Explicit Runge-Kutta method of order 5(4) [1]_. + The error is controlled assuming accuracy of the fourth-order + method, but steps are taken using the fifth-order accurate + formula (local extrapolation is done). A quartic interpolation + polynomial is used for the dense output [2]_. Can be applied in + the complex domain. + * 'RK23': Explicit Runge-Kutta method of order 3(2) [3]_. The error + is controlled assuming accuracy of the second-order method, but + steps are taken using the third-order accurate formula (local + extrapolation is done). A cubic Hermite polynomial is used for the + dense output. Can be applied in the complex domain. + * 'DOP853': Explicit Runge-Kutta method of order 8 [13]_. + Python implementation of the "DOP853" algorithm originally + written in Fortran [14]_. A 7-th order interpolation polynomial + accurate to 7-th order is used for the dense output. + Can be applied in the complex domain. + * 'Radau': Implicit Runge-Kutta method of the Radau IIA family of + order 5 [4]_. The error is controlled with a third-order accurate + embedded formula. A cubic polynomial which satisfies the + collocation conditions is used for the dense output. + * 'BDF': Implicit multi-step variable-order (1 to 5) method based + on a backward differentiation formula for the derivative + approximation [5]_. The implementation follows the one described + in [6]_. A quasi-constant step scheme is used and accuracy is + enhanced using the NDF modification. Can be applied in the + complex domain. + * 'LSODA': Adams/BDF method with automatic stiffness detection and + switching [7]_, [8]_. This is a wrapper of the Fortran solver + from ODEPACK. + + Explicit Runge-Kutta methods ('RK23', 'RK45', 'DOP853') should be used + for non-stiff problems and implicit methods ('Radau', 'BDF') for + stiff problems [9]_. Among Runge-Kutta methods, 'DOP853' is recommended + for solving with high precision (low values of `rtol` and `atol`). + + If not sure, first try to run 'RK45'. If it makes unusually many + iterations, diverges, or fails, your problem is likely to be stiff and + you should use 'Radau' or 'BDF'. 'LSODA' can also be a good universal + choice, but it might be somewhat less convenient to work with as it + wraps old Fortran code. + + You can also pass an arbitrary class derived from `OdeSolver` which + implements the solver. + t_eval : array_like or None, optional + Times at which to store the computed solution, must be sorted and lie + within `t_span`. If None (default), use points selected by the solver. + dense_output : bool, optional + Whether to compute a continuous solution. Default is False. + events : callable, or list of callables, optional + Events to track. If None (default), no events will be tracked. + Each event occurs at the zeros of a continuous function of time and + state. Each function must have the signature ``event(t, y)`` where + additional argument have to be passed if ``args`` is used (see + documentation of ``args`` argument). Each function must return a + float. The solver will find an accurate value of `t` at which + ``event(t, y(t)) = 0`` using a root-finding algorithm. By default, + all zeros will be found. The solver looks for a sign change over + each step, so if multiple zero crossings occur within one step, + events may be missed. Additionally each `event` function might + have the following attributes: + + terminal: bool or int, optional + When boolean, whether to terminate integration if this event occurs. + When integral, termination occurs after the specified the number of + occurrences of this event. + Implicitly False if not assigned. + direction: float, optional + Direction of a zero crossing. If `direction` is positive, + `event` will only trigger when going from negative to positive, + and vice versa if `direction` is negative. If 0, then either + direction will trigger event. Implicitly 0 if not assigned. + + You can assign attributes like ``event.terminal = True`` to any + function in Python. + vectorized : bool, optional + Whether `fun` can be called in a vectorized fashion. Default is False. + + If ``vectorized`` is False, `fun` will always be called with ``y`` of + shape ``(n,)``, where ``n = len(y0)``. + + If ``vectorized`` is True, `fun` may be called with ``y`` of shape + ``(n, k)``, where ``k`` is an integer. In this case, `fun` must behave + such that ``fun(t, y)[:, i] == fun(t, y[:, i])`` (i.e. each column of + the returned array is the time derivative of the state corresponding + with a column of ``y``). + + Setting ``vectorized=True`` allows for faster finite difference + approximation of the Jacobian by methods 'Radau' and 'BDF', but + will result in slower execution for other methods and for 'Radau' and + 'BDF' in some circumstances (e.g. small ``len(y0)``). + args : tuple, optional + Additional arguments to pass to the user-defined functions. If given, + the additional arguments are passed to all user-defined functions. + So if, for example, `fun` has the signature ``fun(t, y, a, b, c)``, + then `jac` (if given) and any event functions must have the same + signature, and `args` must be a tuple of length 3. + **options + Options passed to a chosen solver. All options available for already + implemented solvers are listed below. + first_step : float or None, optional + Initial step size. Default is `None` which means that the algorithm + should choose. + max_step : float, optional + Maximum allowed step size. Default is np.inf, i.e., the step size is not + bounded and determined solely by the solver. + rtol, atol : float or array_like, optional + Relative and absolute tolerances. The solver keeps the local error + estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a + relative accuracy (number of correct digits), while `atol` controls + absolute accuracy (number of correct decimal places). To achieve the + desired `rtol`, set `atol` to be smaller than the smallest value that + can be expected from ``rtol * abs(y)`` so that `rtol` dominates the + allowable error. If `atol` is larger than ``rtol * abs(y)`` the + number of correct digits is not guaranteed. Conversely, to achieve the + desired `atol` set `rtol` such that ``rtol * abs(y)`` is always smaller + than `atol`. If components of y have different scales, it might be + beneficial to set different `atol` values for different components by + passing array_like with shape (n,) for `atol`. Default values are + 1e-3 for `rtol` and 1e-6 for `atol`. + jac : array_like, sparse_matrix, callable or None, optional + Jacobian matrix of the right-hand side of the system with respect + to y, required by the 'Radau', 'BDF' and 'LSODA' method. The + Jacobian matrix has shape (n, n) and its element (i, j) is equal to + ``d f_i / d y_j``. There are three ways to define the Jacobian: + + * If array_like or sparse_matrix, the Jacobian is assumed to + be constant. Not supported by 'LSODA'. + * If callable, the Jacobian is assumed to depend on both + t and y; it will be called as ``jac(t, y)``, as necessary. + Additional arguments have to be passed if ``args`` is + used (see documentation of ``args`` argument). + For 'Radau' and 'BDF' methods, the return value might be a + sparse matrix. + * If None (default), the Jacobian will be approximated by + finite differences. + + It is generally recommended to provide the Jacobian rather than + relying on a finite-difference approximation. + jac_sparsity : array_like, sparse matrix or None, optional + Defines a sparsity structure of the Jacobian matrix for a finite- + difference approximation. Its shape must be (n, n). This argument + is ignored if `jac` is not `None`. If the Jacobian has only few + non-zero elements in *each* row, providing the sparsity structure + will greatly speed up the computations [10]_. A zero entry means that + a corresponding element in the Jacobian is always zero. If None + (default), the Jacobian is assumed to be dense. + Not supported by 'LSODA', see `lband` and `uband` instead. + lband, uband : int or None, optional + Parameters defining the bandwidth of the Jacobian for the 'LSODA' + method, i.e., ``jac[i, j] != 0 only for i - lband <= j <= i + uband``. + Default is None. Setting these requires your jac routine to return the + Jacobian in the packed format: the returned array must have ``n`` + columns and ``uband + lband + 1`` rows in which Jacobian diagonals are + written. Specifically ``jac_packed[uband + i - j , j] = jac[i, j]``. + The same format is used in `scipy.linalg.solve_banded` (check for an + illustration). These parameters can be also used with ``jac=None`` to + reduce the number of Jacobian elements estimated by finite differences. + min_step : float, optional + The minimum allowed step size for 'LSODA' method. + By default `min_step` is zero. + + Returns + ------- + Bunch object with the following fields defined: + t : ndarray, shape (n_points,) + Time points. + y : ndarray, shape (n, n_points) + Values of the solution at `t`. + sol : `OdeSolution` or None + Found solution as `OdeSolution` instance; None if `dense_output` was + set to False. + t_events : list of ndarray or None + Contains for each event type a list of arrays at which an event of + that type event was detected. None if `events` was None. + y_events : list of ndarray or None + For each value of `t_events`, the corresponding value of the solution. + None if `events` was None. + nfev : int + Number of evaluations of the right-hand side. + njev : int + Number of evaluations of the Jacobian. + nlu : int + Number of LU decompositions. + status : int + Reason for algorithm termination: + + * -1: Integration step failed. + * 0: The solver successfully reached the end of `tspan`. + * 1: A termination event occurred. + + message : string + Human-readable description of the termination reason. + success : bool + True if the solver reached the interval end or a termination event + occurred (``status >= 0``). + + References + ---------- + .. [1] J. R. Dormand, P. J. Prince, "A family of embedded Runge-Kutta + formulae", Journal of Computational and Applied Mathematics, Vol. 6, + No. 1, pp. 19-26, 1980. + .. [2] L. W. Shampine, "Some Practical Runge-Kutta Formulas", Mathematics + of Computation,, Vol. 46, No. 173, pp. 135-150, 1986. + .. [3] P. Bogacki, L.F. Shampine, "A 3(2) Pair of Runge-Kutta Formulas", + Appl. Math. Lett. Vol. 2, No. 4. pp. 321-325, 1989. + .. [4] E. Hairer, G. Wanner, "Solving Ordinary Differential Equations II: + Stiff and Differential-Algebraic Problems", Sec. IV.8. + .. [5] `Backward Differentiation Formula + `_ + on Wikipedia. + .. [6] L. F. Shampine, M. W. Reichelt, "THE MATLAB ODE SUITE", SIAM J. SCI. + COMPUTE., Vol. 18, No. 1, pp. 1-22, January 1997. + .. [7] A. C. Hindmarsh, "ODEPACK, A Systematized Collection of ODE + Solvers," IMACS Transactions on Scientific Computation, Vol 1., + pp. 55-64, 1983. + .. [8] L. Petzold, "Automatic selection of methods for solving stiff and + nonstiff systems of ordinary differential equations", SIAM Journal + on Scientific and Statistical Computing, Vol. 4, No. 1, pp. 136-148, + 1983. + .. [9] `Stiff equation `_ on + Wikipedia. + .. [10] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of + sparse Jacobian matrices", Journal of the Institute of Mathematics + and its Applications, 13, pp. 117-120, 1974. + .. [11] `Cauchy-Riemann equations + `_ on + Wikipedia. + .. [12] `Lotka-Volterra equations + `_ + on Wikipedia. + .. [13] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential + Equations I: Nonstiff Problems", Sec. II. + .. [14] `Page with original Fortran code of DOP853 + `_. + + Examples + -------- + Basic exponential decay showing automatically chosen time points. + + >>> import numpy as np + >>> from scipy.integrate import solve_ivp + >>> def exponential_decay(t, y): return -0.5 * y + >>> sol = solve_ivp(exponential_decay, [0, 10], [2, 4, 8]) + >>> print(sol.t) + [ 0. 0.11487653 1.26364188 3.06061781 4.81611105 6.57445806 + 8.33328988 10. ] + >>> print(sol.y) + [[2. 1.88836035 1.06327177 0.43319312 0.18017253 0.07483045 + 0.03107158 0.01350781] + [4. 3.7767207 2.12654355 0.86638624 0.36034507 0.14966091 + 0.06214316 0.02701561] + [8. 7.5534414 4.25308709 1.73277247 0.72069014 0.29932181 + 0.12428631 0.05403123]] + + Specifying points where the solution is desired. + + >>> sol = solve_ivp(exponential_decay, [0, 10], [2, 4, 8], + ... t_eval=[0, 1, 2, 4, 10]) + >>> print(sol.t) + [ 0 1 2 4 10] + >>> print(sol.y) + [[2. 1.21305369 0.73534021 0.27066736 0.01350938] + [4. 2.42610739 1.47068043 0.54133472 0.02701876] + [8. 4.85221478 2.94136085 1.08266944 0.05403753]] + + Cannon fired upward with terminal event upon impact. The ``terminal`` and + ``direction`` fields of an event are applied by monkey patching a function. + Here ``y[0]`` is position and ``y[1]`` is velocity. The projectile starts + at position 0 with velocity +10. Note that the integration never reaches + t=100 because the event is terminal. + + >>> def upward_cannon(t, y): return [y[1], -0.5] + >>> def hit_ground(t, y): return y[0] + >>> hit_ground.terminal = True + >>> hit_ground.direction = -1 + >>> sol = solve_ivp(upward_cannon, [0, 100], [0, 10], events=hit_ground) + >>> print(sol.t_events) + [array([40.])] + >>> print(sol.t) + [0.00000000e+00 9.99900010e-05 1.09989001e-03 1.10988901e-02 + 1.11088891e-01 1.11098890e+00 1.11099890e+01 4.00000000e+01] + + Use `dense_output` and `events` to find position, which is 100, at the apex + of the cannonball's trajectory. Apex is not defined as terminal, so both + apex and hit_ground are found. There is no information at t=20, so the sol + attribute is used to evaluate the solution. The sol attribute is returned + by setting ``dense_output=True``. Alternatively, the `y_events` attribute + can be used to access the solution at the time of the event. + + >>> def apex(t, y): return y[1] + >>> sol = solve_ivp(upward_cannon, [0, 100], [0, 10], + ... events=(hit_ground, apex), dense_output=True) + >>> print(sol.t_events) + [array([40.]), array([20.])] + >>> print(sol.t) + [0.00000000e+00 9.99900010e-05 1.09989001e-03 1.10988901e-02 + 1.11088891e-01 1.11098890e+00 1.11099890e+01 4.00000000e+01] + >>> print(sol.sol(sol.t_events[1][0])) + [100. 0.] + >>> print(sol.y_events) + [array([[-5.68434189e-14, -1.00000000e+01]]), + array([[1.00000000e+02, 1.77635684e-15]])] + + As an example of a system with additional parameters, we'll implement + the Lotka-Volterra equations [12]_. + + >>> def lotkavolterra(t, z, a, b, c, d): + ... x, y = z + ... return [a*x - b*x*y, -c*y + d*x*y] + ... + + We pass in the parameter values a=1.5, b=1, c=3 and d=1 with the `args` + argument. + + >>> sol = solve_ivp(lotkavolterra, [0, 15], [10, 5], args=(1.5, 1, 3, 1), + ... dense_output=True) + + Compute a dense solution and plot it. + + >>> t = np.linspace(0, 15, 300) + >>> z = sol.sol(t) + >>> import matplotlib.pyplot as plt + >>> plt.plot(t, z.T) + >>> plt.xlabel('t') + >>> plt.legend(['x', 'y'], shadow=True) + >>> plt.title('Lotka-Volterra System') + >>> plt.show() + + A couple examples of using solve_ivp to solve the differential + equation ``y' = Ay`` with complex matrix ``A``. + + >>> A = np.array([[-0.25 + 0.14j, 0, 0.33 + 0.44j], + ... [0.25 + 0.58j, -0.2 + 0.14j, 0], + ... [0, 0.2 + 0.4j, -0.1 + 0.97j]]) + + Solving an IVP with ``A`` from above and ``y`` as 3x1 vector: + + >>> def deriv_vec(t, y): + ... return A @ y + >>> result = solve_ivp(deriv_vec, [0, 25], + ... np.array([10 + 0j, 20 + 0j, 30 + 0j]), + ... t_eval=np.linspace(0, 25, 101)) + >>> print(result.y[:, 0]) + [10.+0.j 20.+0.j 30.+0.j] + >>> print(result.y[:, -1]) + [18.46291039+45.25653651j 10.01569306+36.23293216j + -4.98662741+80.07360388j] + + Solving an IVP with ``A`` from above with ``y`` as 3x3 matrix : + + >>> def deriv_mat(t, y): + ... return (A @ y.reshape(3, 3)).flatten() + >>> y0 = np.array([[2 + 0j, 3 + 0j, 4 + 0j], + ... [5 + 0j, 6 + 0j, 7 + 0j], + ... [9 + 0j, 34 + 0j, 78 + 0j]]) + + >>> result = solve_ivp(deriv_mat, [0, 25], y0.flatten(), + ... t_eval=np.linspace(0, 25, 101)) + >>> print(result.y[:, 0].reshape(3, 3)) + [[ 2.+0.j 3.+0.j 4.+0.j] + [ 5.+0.j 6.+0.j 7.+0.j] + [ 9.+0.j 34.+0.j 78.+0.j]] + >>> print(result.y[:, -1].reshape(3, 3)) + [[ 5.67451179 +12.07938445j 17.2888073 +31.03278837j + 37.83405768 +63.25138759j] + [ 3.39949503 +11.82123994j 21.32530996 +44.88668871j + 53.17531184+103.80400411j] + [ -2.26105874 +22.19277664j -15.1255713 +70.19616341j + -38.34616845+153.29039931j]] + + + """ + if method not in METHODS and not ( + inspect.isclass(method) and issubclass(method, OdeSolver)): + raise ValueError(f"`method` must be one of {METHODS} or OdeSolver class.") + + t0, tf = map(float, t_span) + + if args is not None: + # Wrap the user's fun (and jac, if given) in lambdas to hide the + # additional parameters. Pass in the original fun as a keyword + # argument to keep it in the scope of the lambda. + try: + _ = [*(args)] + except TypeError as exp: + suggestion_tuple = ( + "Supplied 'args' cannot be unpacked. Please supply `args`" + f" as a tuple (e.g. `args=({args},)`)" + ) + raise TypeError(suggestion_tuple) from exp + + def fun(t, x, fun=fun): + return fun(t, x, *args) + jac = options.get('jac') + if callable(jac): + options['jac'] = lambda t, x: jac(t, x, *args) + + if t_eval is not None: + t_eval = np.asarray(t_eval) + if t_eval.ndim != 1: + raise ValueError("`t_eval` must be 1-dimensional.") + + if np.any(t_eval < min(t0, tf)) or np.any(t_eval > max(t0, tf)): + raise ValueError("Values in `t_eval` are not within `t_span`.") + + d = np.diff(t_eval) + if tf > t0 and np.any(d <= 0) or tf < t0 and np.any(d >= 0): + raise ValueError("Values in `t_eval` are not properly sorted.") + + if tf > t0: + t_eval_i = 0 + else: + # Make order of t_eval decreasing to use np.searchsorted. + t_eval = t_eval[::-1] + # This will be an upper bound for slices. + t_eval_i = t_eval.shape[0] + + if method in METHODS: + method = METHODS[method] + + solver = method(fun, t0, y0, tf, vectorized=vectorized, **options) + + if t_eval is None: + ts = [t0] + ys = [y0] + elif t_eval is not None and dense_output: + ts = [] + ti = [t0] + ys = [] + else: + ts = [] + ys = [] + + interpolants = [] + + if events is not None: + events, max_events, event_dir = prepare_events(events) + event_count = np.zeros(len(events)) + if args is not None: + # Wrap user functions in lambdas to hide the additional parameters. + # The original event function is passed as a keyword argument to the + # lambda to keep the original function in scope (i.e., avoid the + # late binding closure "gotcha"). + events = [lambda t, x, event=event: event(t, x, *args) + for event in events] + g = [event(t0, y0) for event in events] + t_events = [[] for _ in range(len(events))] + y_events = [[] for _ in range(len(events))] + else: + t_events = None + y_events = None + + status = None + while status is None: + message = solver.step() + + if solver.status == 'finished': + status = 0 + elif solver.status == 'failed': + status = -1 + break + + t_old = solver.t_old + t = solver.t + y = solver.y + + if dense_output: + sol = solver.dense_output() + interpolants.append(sol) + else: + sol = None + + if events is not None: + g_new = [event(t, y) for event in events] + active_events = find_active_events(g, g_new, event_dir) + if active_events.size > 0: + if sol is None: + sol = solver.dense_output() + + event_count[active_events] += 1 + root_indices, roots, terminate = handle_events( + sol, events, active_events, event_count, max_events, + t_old, t) + + for e, te in zip(root_indices, roots): + t_events[e].append(te) + y_events[e].append(sol(te)) + + if terminate: + status = 1 + t = roots[-1] + y = sol(t) + + g = g_new + + if t_eval is None: + ts.append(t) + ys.append(y) + else: + # The value in t_eval equal to t will be included. + if solver.direction > 0: + t_eval_i_new = np.searchsorted(t_eval, t, side='right') + t_eval_step = t_eval[t_eval_i:t_eval_i_new] + else: + t_eval_i_new = np.searchsorted(t_eval, t, side='left') + # It has to be done with two slice operations, because + # you can't slice to 0th element inclusive using backward + # slicing. + t_eval_step = t_eval[t_eval_i_new:t_eval_i][::-1] + + if t_eval_step.size > 0: + if sol is None: + sol = solver.dense_output() + ts.append(t_eval_step) + ys.append(sol(t_eval_step)) + t_eval_i = t_eval_i_new + + if t_eval is not None and dense_output: + ti.append(t) + + message = MESSAGES.get(status, message) + + if t_events is not None: + t_events = [np.asarray(te) for te in t_events] + y_events = [np.asarray(ye) for ye in y_events] + + if t_eval is None: + ts = np.array(ts) + ys = np.vstack(ys).T + elif ts: + ts = np.hstack(ts) + ys = np.hstack(ys) + + if dense_output: + if t_eval is None: + sol = OdeSolution( + ts, interpolants, alt_segment=True if method in [BDF, LSODA] else False + ) + else: + sol = OdeSolution( + ti, interpolants, alt_segment=True if method in [BDF, LSODA] else False + ) + else: + sol = None + + return OdeResult(t=ts, y=ys, sol=sol, t_events=t_events, y_events=y_events, + nfev=solver.nfev, njev=solver.njev, nlu=solver.nlu, + status=status, message=message, success=status >= 0) diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/lsoda.py b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/lsoda.py new file mode 100644 index 0000000000000000000000000000000000000000..2a5a7c530c04eddc9beff44e2d4f6df439d5ef01 --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/lsoda.py @@ -0,0 +1,224 @@ +import numpy as np +from scipy.integrate import ode +from .common import validate_tol, validate_first_step, warn_extraneous +from .base import OdeSolver, DenseOutput + + +class LSODA(OdeSolver): + """Adams/BDF method with automatic stiffness detection and switching. + + This is a wrapper to the Fortran solver from ODEPACK [1]_. It switches + automatically between the nonstiff Adams method and the stiff BDF method. + The method was originally detailed in [2]_. + + Parameters + ---------- + fun : callable + Right-hand side of the system: the time derivative of the state ``y`` + at time ``t``. The calling signature is ``fun(t, y)``, where ``t`` is a + scalar and ``y`` is an ndarray with ``len(y) = len(y0)``. ``fun`` must + return an array of the same shape as ``y``. See `vectorized` for more + information. + t0 : float + Initial time. + y0 : array_like, shape (n,) + Initial state. + t_bound : float + Boundary time - the integration won't continue beyond it. It also + determines the direction of the integration. + first_step : float or None, optional + Initial step size. Default is ``None`` which means that the algorithm + should choose. + min_step : float, optional + Minimum allowed step size. Default is 0.0, i.e., the step size is not + bounded and determined solely by the solver. + max_step : float, optional + Maximum allowed step size. Default is np.inf, i.e., the step size is not + bounded and determined solely by the solver. + rtol, atol : float and array_like, optional + Relative and absolute tolerances. The solver keeps the local error + estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a + relative accuracy (number of correct digits), while `atol` controls + absolute accuracy (number of correct decimal places). To achieve the + desired `rtol`, set `atol` to be smaller than the smallest value that + can be expected from ``rtol * abs(y)`` so that `rtol` dominates the + allowable error. If `atol` is larger than ``rtol * abs(y)`` the + number of correct digits is not guaranteed. Conversely, to achieve the + desired `atol` set `rtol` such that ``rtol * abs(y)`` is always smaller + than `atol`. If components of y have different scales, it might be + beneficial to set different `atol` values for different components by + passing array_like with shape (n,) for `atol`. Default values are + 1e-3 for `rtol` and 1e-6 for `atol`. + jac : None or callable, optional + Jacobian matrix of the right-hand side of the system with respect to + ``y``. The Jacobian matrix has shape (n, n) and its element (i, j) is + equal to ``d f_i / d y_j``. The function will be called as + ``jac(t, y)``. If None (default), the Jacobian will be + approximated by finite differences. It is generally recommended to + provide the Jacobian rather than relying on a finite-difference + approximation. + lband, uband : int or None + Parameters defining the bandwidth of the Jacobian, + i.e., ``jac[i, j] != 0 only for i - lband <= j <= i + uband``. Setting + these requires your jac routine to return the Jacobian in the packed format: + the returned array must have ``n`` columns and ``uband + lband + 1`` + rows in which Jacobian diagonals are written. Specifically + ``jac_packed[uband + i - j , j] = jac[i, j]``. The same format is used + in `scipy.linalg.solve_banded` (check for an illustration). + These parameters can be also used with ``jac=None`` to reduce the + number of Jacobian elements estimated by finite differences. + vectorized : bool, optional + Whether `fun` may be called in a vectorized fashion. False (default) + is recommended for this solver. + + If ``vectorized`` is False, `fun` will always be called with ``y`` of + shape ``(n,)``, where ``n = len(y0)``. + + If ``vectorized`` is True, `fun` may be called with ``y`` of shape + ``(n, k)``, where ``k`` is an integer. In this case, `fun` must behave + such that ``fun(t, y)[:, i] == fun(t, y[:, i])`` (i.e. each column of + the returned array is the time derivative of the state corresponding + with a column of ``y``). + + Setting ``vectorized=True`` allows for faster finite difference + approximation of the Jacobian by methods 'Radau' and 'BDF', but + will result in slower execution for this solver. + + Attributes + ---------- + n : int + Number of equations. + status : string + Current status of the solver: 'running', 'finished' or 'failed'. + t_bound : float + Boundary time. + direction : float + Integration direction: +1 or -1. + t : float + Current time. + y : ndarray + Current state. + t_old : float + Previous time. None if no steps were made yet. + nfev : int + Number of evaluations of the right-hand side. + njev : int + Number of evaluations of the Jacobian. + + References + ---------- + .. [1] A. C. Hindmarsh, "ODEPACK, A Systematized Collection of ODE + Solvers," IMACS Transactions on Scientific Computation, Vol 1., + pp. 55-64, 1983. + .. [2] L. Petzold, "Automatic selection of methods for solving stiff and + nonstiff systems of ordinary differential equations", SIAM Journal + on Scientific and Statistical Computing, Vol. 4, No. 1, pp. 136-148, + 1983. + """ + def __init__(self, fun, t0, y0, t_bound, first_step=None, min_step=0.0, + max_step=np.inf, rtol=1e-3, atol=1e-6, jac=None, lband=None, + uband=None, vectorized=False, **extraneous): + warn_extraneous(extraneous) + super().__init__(fun, t0, y0, t_bound, vectorized) + + if first_step is None: + first_step = 0 # LSODA value for automatic selection. + else: + first_step = validate_first_step(first_step, t0, t_bound) + + first_step *= self.direction + + if max_step == np.inf: + max_step = 0 # LSODA value for infinity. + elif max_step <= 0: + raise ValueError("`max_step` must be positive.") + + if min_step < 0: + raise ValueError("`min_step` must be nonnegative.") + + rtol, atol = validate_tol(rtol, atol, self.n) + + solver = ode(self.fun, jac) + solver.set_integrator('lsoda', rtol=rtol, atol=atol, max_step=max_step, + min_step=min_step, first_step=first_step, + lband=lband, uband=uband) + solver.set_initial_value(y0, t0) + + # Inject t_bound into rwork array as needed for itask=5. + solver._integrator.rwork[0] = self.t_bound + solver._integrator.call_args[4] = solver._integrator.rwork + + self._lsoda_solver = solver + + def _step_impl(self): + solver = self._lsoda_solver + integrator = solver._integrator + + # From lsoda.step and lsoda.integrate itask=5 means take a single + # step and do not go past t_bound. + itask = integrator.call_args[2] + integrator.call_args[2] = 5 + solver._y, solver.t = integrator.run( + solver.f, solver.jac or (lambda: None), solver._y, solver.t, + self.t_bound, solver.f_params, solver.jac_params) + integrator.call_args[2] = itask + + if solver.successful(): + self.t = solver.t + self.y = solver._y + # From LSODA Fortran source njev is equal to nlu. + self.njev = integrator.iwork[12] + self.nlu = integrator.iwork[12] + return True, None + else: + return False, 'Unexpected istate in LSODA.' + + def _dense_output_impl(self): + iwork = self._lsoda_solver._integrator.iwork + rwork = self._lsoda_solver._integrator.rwork + + # We want to produce the Nordsieck history array, yh, up to the order + # used in the last successful iteration. The step size is unimportant + # because it will be scaled out in LsodaDenseOutput. Some additional + # work may be required because ODEPACK's LSODA implementation produces + # the Nordsieck history in the state needed for the next iteration. + + # iwork[13] contains order from last successful iteration, while + # iwork[14] contains order to be attempted next. + order = iwork[13] + + # rwork[11] contains the step size to be attempted next, while + # rwork[10] contains step size from last successful iteration. + h = rwork[11] + + # rwork[20:20 + (iwork[14] + 1) * self.n] contains entries of the + # Nordsieck array in state needed for next iteration. We want + # the entries up to order for the last successful step so use the + # following. + yh = np.reshape(rwork[20:20 + (order + 1) * self.n], + (self.n, order + 1), order='F').copy() + if iwork[14] < order: + # If the order is set to decrease then the final column of yh + # has not been updated within ODEPACK's LSODA + # implementation because this column will not be used in the + # next iteration. We must rescale this column to make the + # associated step size consistent with the other columns. + yh[:, -1] *= (h / rwork[10]) ** order + + return LsodaDenseOutput(self.t_old, self.t, h, order, yh) + + +class LsodaDenseOutput(DenseOutput): + def __init__(self, t_old, t, h, order, yh): + super().__init__(t_old, t) + self.h = h + self.yh = yh + self.p = np.arange(order + 1) + + def _call_impl(self, t): + if t.ndim == 0: + x = ((t - self.t) / self.h) ** self.p + else: + x = ((t - self.t) / self.h) ** self.p[:, None] + + return np.dot(self.yh, x) diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/radau.py b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/radau.py new file mode 100644 index 0000000000000000000000000000000000000000..0d572b48de51ebc7e8f8fd278ce1000bdef581b5 --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/radau.py @@ -0,0 +1,572 @@ +import numpy as np +from scipy.linalg import lu_factor, lu_solve +from scipy.sparse import csc_matrix, issparse, eye +from scipy.sparse.linalg import splu +from scipy.optimize._numdiff import group_columns +from .common import (validate_max_step, validate_tol, select_initial_step, + norm, num_jac, EPS, warn_extraneous, + validate_first_step) +from .base import OdeSolver, DenseOutput + +S6 = 6 ** 0.5 + +# Butcher tableau. A is not used directly, see below. +C = np.array([(4 - S6) / 10, (4 + S6) / 10, 1]) +E = np.array([-13 - 7 * S6, -13 + 7 * S6, -1]) / 3 + +# Eigendecomposition of A is done: A = T L T**-1. There is 1 real eigenvalue +# and a complex conjugate pair. They are written below. +MU_REAL = 3 + 3 ** (2 / 3) - 3 ** (1 / 3) +MU_COMPLEX = (3 + 0.5 * (3 ** (1 / 3) - 3 ** (2 / 3)) + - 0.5j * (3 ** (5 / 6) + 3 ** (7 / 6))) + +# These are transformation matrices. +T = np.array([ + [0.09443876248897524, -0.14125529502095421, 0.03002919410514742], + [0.25021312296533332, 0.20412935229379994, -0.38294211275726192], + [1, 1, 0]]) +TI = np.array([ + [4.17871859155190428, 0.32768282076106237, 0.52337644549944951], + [-4.17871859155190428, -0.32768282076106237, 0.47662355450055044], + [0.50287263494578682, -2.57192694985560522, 0.59603920482822492]]) +# These linear combinations are used in the algorithm. +TI_REAL = TI[0] +TI_COMPLEX = TI[1] + 1j * TI[2] + +# Interpolator coefficients. +P = np.array([ + [13/3 + 7*S6/3, -23/3 - 22*S6/3, 10/3 + 5 * S6], + [13/3 - 7*S6/3, -23/3 + 22*S6/3, 10/3 - 5 * S6], + [1/3, -8/3, 10/3]]) + + +NEWTON_MAXITER = 6 # Maximum number of Newton iterations. +MIN_FACTOR = 0.2 # Minimum allowed decrease in a step size. +MAX_FACTOR = 10 # Maximum allowed increase in a step size. + + +def solve_collocation_system(fun, t, y, h, Z0, scale, tol, + LU_real, LU_complex, solve_lu): + """Solve the collocation system. + + Parameters + ---------- + fun : callable + Right-hand side of the system. + t : float + Current time. + y : ndarray, shape (n,) + Current state. + h : float + Step to try. + Z0 : ndarray, shape (3, n) + Initial guess for the solution. It determines new values of `y` at + ``t + h * C`` as ``y + Z0``, where ``C`` is the Radau method constants. + scale : ndarray, shape (n) + Problem tolerance scale, i.e. ``rtol * abs(y) + atol``. + tol : float + Tolerance to which solve the system. This value is compared with + the normalized by `scale` error. + LU_real, LU_complex + LU decompositions of the system Jacobians. + solve_lu : callable + Callable which solves a linear system given a LU decomposition. The + signature is ``solve_lu(LU, b)``. + + Returns + ------- + converged : bool + Whether iterations converged. + n_iter : int + Number of completed iterations. + Z : ndarray, shape (3, n) + Found solution. + rate : float + The rate of convergence. + """ + n = y.shape[0] + M_real = MU_REAL / h + M_complex = MU_COMPLEX / h + + W = TI.dot(Z0) + Z = Z0 + + F = np.empty((3, n)) + ch = h * C + + dW_norm_old = None + dW = np.empty_like(W) + converged = False + rate = None + for k in range(NEWTON_MAXITER): + for i in range(3): + F[i] = fun(t + ch[i], y + Z[i]) + + if not np.all(np.isfinite(F)): + break + + f_real = F.T.dot(TI_REAL) - M_real * W[0] + f_complex = F.T.dot(TI_COMPLEX) - M_complex * (W[1] + 1j * W[2]) + + dW_real = solve_lu(LU_real, f_real) + dW_complex = solve_lu(LU_complex, f_complex) + + dW[0] = dW_real + dW[1] = dW_complex.real + dW[2] = dW_complex.imag + + dW_norm = norm(dW / scale) + if dW_norm_old is not None: + rate = dW_norm / dW_norm_old + + if (rate is not None and (rate >= 1 or + rate ** (NEWTON_MAXITER - k) / (1 - rate) * dW_norm > tol)): + break + + W += dW + Z = T.dot(W) + + if (dW_norm == 0 or + rate is not None and rate / (1 - rate) * dW_norm < tol): + converged = True + break + + dW_norm_old = dW_norm + + return converged, k + 1, Z, rate + + +def predict_factor(h_abs, h_abs_old, error_norm, error_norm_old): + """Predict by which factor to increase/decrease the step size. + + The algorithm is described in [1]_. + + Parameters + ---------- + h_abs, h_abs_old : float + Current and previous values of the step size, `h_abs_old` can be None + (see Notes). + error_norm, error_norm_old : float + Current and previous values of the error norm, `error_norm_old` can + be None (see Notes). + + Returns + ------- + factor : float + Predicted factor. + + Notes + ----- + If `h_abs_old` and `error_norm_old` are both not None then a two-step + algorithm is used, otherwise a one-step algorithm is used. + + References + ---------- + .. [1] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential + Equations II: Stiff and Differential-Algebraic Problems", Sec. IV.8. + """ + if error_norm_old is None or h_abs_old is None or error_norm == 0: + multiplier = 1 + else: + multiplier = h_abs / h_abs_old * (error_norm_old / error_norm) ** 0.25 + + with np.errstate(divide='ignore'): + factor = min(1, multiplier) * error_norm ** -0.25 + + return factor + + +class Radau(OdeSolver): + """Implicit Runge-Kutta method of Radau IIA family of order 5. + + The implementation follows [1]_. The error is controlled with a + third-order accurate embedded formula. A cubic polynomial which satisfies + the collocation conditions is used for the dense output. + + Parameters + ---------- + fun : callable + Right-hand side of the system: the time derivative of the state ``y`` + at time ``t``. The calling signature is ``fun(t, y)``, where ``t`` is a + scalar and ``y`` is an ndarray with ``len(y) = len(y0)``. ``fun`` must + return an array of the same shape as ``y``. See `vectorized` for more + information. + t0 : float + Initial time. + y0 : array_like, shape (n,) + Initial state. + t_bound : float + Boundary time - the integration won't continue beyond it. It also + determines the direction of the integration. + first_step : float or None, optional + Initial step size. Default is ``None`` which means that the algorithm + should choose. + max_step : float, optional + Maximum allowed step size. Default is np.inf, i.e., the step size is not + bounded and determined solely by the solver. + rtol, atol : float and array_like, optional + Relative and absolute tolerances. The solver keeps the local error + estimates less than ``atol + rtol * abs(y)``. HHere `rtol` controls a + relative accuracy (number of correct digits), while `atol` controls + absolute accuracy (number of correct decimal places). To achieve the + desired `rtol`, set `atol` to be smaller than the smallest value that + can be expected from ``rtol * abs(y)`` so that `rtol` dominates the + allowable error. If `atol` is larger than ``rtol * abs(y)`` the + number of correct digits is not guaranteed. Conversely, to achieve the + desired `atol` set `rtol` such that ``rtol * abs(y)`` is always smaller + than `atol`. If components of y have different scales, it might be + beneficial to set different `atol` values for different components by + passing array_like with shape (n,) for `atol`. Default values are + 1e-3 for `rtol` and 1e-6 for `atol`. + jac : {None, array_like, sparse_matrix, callable}, optional + Jacobian matrix of the right-hand side of the system with respect to + y, required by this method. The Jacobian matrix has shape (n, n) and + its element (i, j) is equal to ``d f_i / d y_j``. + There are three ways to define the Jacobian: + + * If array_like or sparse_matrix, the Jacobian is assumed to + be constant. + * If callable, the Jacobian is assumed to depend on both + t and y; it will be called as ``jac(t, y)`` as necessary. + For the 'Radau' and 'BDF' methods, the return value might be a + sparse matrix. + * If None (default), the Jacobian will be approximated by + finite differences. + + It is generally recommended to provide the Jacobian rather than + relying on a finite-difference approximation. + jac_sparsity : {None, array_like, sparse matrix}, optional + Defines a sparsity structure of the Jacobian matrix for a + finite-difference approximation. Its shape must be (n, n). This argument + is ignored if `jac` is not `None`. If the Jacobian has only few non-zero + elements in *each* row, providing the sparsity structure will greatly + speed up the computations [2]_. A zero entry means that a corresponding + element in the Jacobian is always zero. If None (default), the Jacobian + is assumed to be dense. + vectorized : bool, optional + Whether `fun` can be called in a vectorized fashion. Default is False. + + If ``vectorized`` is False, `fun` will always be called with ``y`` of + shape ``(n,)``, where ``n = len(y0)``. + + If ``vectorized`` is True, `fun` may be called with ``y`` of shape + ``(n, k)``, where ``k`` is an integer. In this case, `fun` must behave + such that ``fun(t, y)[:, i] == fun(t, y[:, i])`` (i.e. each column of + the returned array is the time derivative of the state corresponding + with a column of ``y``). + + Setting ``vectorized=True`` allows for faster finite difference + approximation of the Jacobian by this method, but may result in slower + execution overall in some circumstances (e.g. small ``len(y0)``). + + Attributes + ---------- + n : int + Number of equations. + status : string + Current status of the solver: 'running', 'finished' or 'failed'. + t_bound : float + Boundary time. + direction : float + Integration direction: +1 or -1. + t : float + Current time. + y : ndarray + Current state. + t_old : float + Previous time. None if no steps were made yet. + step_size : float + Size of the last successful step. None if no steps were made yet. + nfev : int + Number of evaluations of the right-hand side. + njev : int + Number of evaluations of the Jacobian. + nlu : int + Number of LU decompositions. + + References + ---------- + .. [1] E. Hairer, G. Wanner, "Solving Ordinary Differential Equations II: + Stiff and Differential-Algebraic Problems", Sec. IV.8. + .. [2] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of + sparse Jacobian matrices", Journal of the Institute of Mathematics + and its Applications, 13, pp. 117-120, 1974. + """ + def __init__(self, fun, t0, y0, t_bound, max_step=np.inf, + rtol=1e-3, atol=1e-6, jac=None, jac_sparsity=None, + vectorized=False, first_step=None, **extraneous): + warn_extraneous(extraneous) + super().__init__(fun, t0, y0, t_bound, vectorized) + self.y_old = None + self.max_step = validate_max_step(max_step) + self.rtol, self.atol = validate_tol(rtol, atol, self.n) + self.f = self.fun(self.t, self.y) + # Select initial step assuming the same order which is used to control + # the error. + if first_step is None: + self.h_abs = select_initial_step( + self.fun, self.t, self.y, t_bound, max_step, self.f, self.direction, + 3, self.rtol, self.atol) + else: + self.h_abs = validate_first_step(first_step, t0, t_bound) + self.h_abs_old = None + self.error_norm_old = None + + self.newton_tol = max(10 * EPS / rtol, min(0.03, rtol ** 0.5)) + self.sol = None + + self.jac_factor = None + self.jac, self.J = self._validate_jac(jac, jac_sparsity) + if issparse(self.J): + def lu(A): + self.nlu += 1 + return splu(A) + + def solve_lu(LU, b): + return LU.solve(b) + + I = eye(self.n, format='csc') + else: + def lu(A): + self.nlu += 1 + return lu_factor(A, overwrite_a=True) + + def solve_lu(LU, b): + return lu_solve(LU, b, overwrite_b=True) + + I = np.identity(self.n) + + self.lu = lu + self.solve_lu = solve_lu + self.I = I + + self.current_jac = True + self.LU_real = None + self.LU_complex = None + self.Z = None + + def _validate_jac(self, jac, sparsity): + t0 = self.t + y0 = self.y + + if jac is None: + if sparsity is not None: + if issparse(sparsity): + sparsity = csc_matrix(sparsity) + groups = group_columns(sparsity) + sparsity = (sparsity, groups) + + def jac_wrapped(t, y, f): + self.njev += 1 + J, self.jac_factor = num_jac(self.fun_vectorized, t, y, f, + self.atol, self.jac_factor, + sparsity) + return J + J = jac_wrapped(t0, y0, self.f) + elif callable(jac): + J = jac(t0, y0) + self.njev = 1 + if issparse(J): + J = csc_matrix(J) + + def jac_wrapped(t, y, _=None): + self.njev += 1 + return csc_matrix(jac(t, y), dtype=float) + + else: + J = np.asarray(J, dtype=float) + + def jac_wrapped(t, y, _=None): + self.njev += 1 + return np.asarray(jac(t, y), dtype=float) + + if J.shape != (self.n, self.n): + raise ValueError(f"`jac` is expected to have shape {(self.n, self.n)}," + f" but actually has {J.shape}.") + else: + if issparse(jac): + J = csc_matrix(jac) + else: + J = np.asarray(jac, dtype=float) + + if J.shape != (self.n, self.n): + raise ValueError(f"`jac` is expected to have shape {(self.n, self.n)}," + f" but actually has {J.shape}.") + jac_wrapped = None + + return jac_wrapped, J + + def _step_impl(self): + t = self.t + y = self.y + f = self.f + + max_step = self.max_step + atol = self.atol + rtol = self.rtol + + min_step = 10 * np.abs(np.nextafter(t, self.direction * np.inf) - t) + if self.h_abs > max_step: + h_abs = max_step + h_abs_old = None + error_norm_old = None + elif self.h_abs < min_step: + h_abs = min_step + h_abs_old = None + error_norm_old = None + else: + h_abs = self.h_abs + h_abs_old = self.h_abs_old + error_norm_old = self.error_norm_old + + J = self.J + LU_real = self.LU_real + LU_complex = self.LU_complex + + current_jac = self.current_jac + jac = self.jac + + rejected = False + step_accepted = False + message = None + while not step_accepted: + if h_abs < min_step: + return False, self.TOO_SMALL_STEP + + h = h_abs * self.direction + t_new = t + h + + if self.direction * (t_new - self.t_bound) > 0: + t_new = self.t_bound + + h = t_new - t + h_abs = np.abs(h) + + if self.sol is None: + Z0 = np.zeros((3, y.shape[0])) + else: + Z0 = self.sol(t + h * C).T - y + + scale = atol + np.abs(y) * rtol + + converged = False + while not converged: + if LU_real is None or LU_complex is None: + LU_real = self.lu(MU_REAL / h * self.I - J) + LU_complex = self.lu(MU_COMPLEX / h * self.I - J) + + converged, n_iter, Z, rate = solve_collocation_system( + self.fun, t, y, h, Z0, scale, self.newton_tol, + LU_real, LU_complex, self.solve_lu) + + if not converged: + if current_jac: + break + + J = self.jac(t, y, f) + current_jac = True + LU_real = None + LU_complex = None + + if not converged: + h_abs *= 0.5 + LU_real = None + LU_complex = None + continue + + y_new = y + Z[-1] + ZE = Z.T.dot(E) / h + error = self.solve_lu(LU_real, f + ZE) + scale = atol + np.maximum(np.abs(y), np.abs(y_new)) * rtol + error_norm = norm(error / scale) + safety = 0.9 * (2 * NEWTON_MAXITER + 1) / (2 * NEWTON_MAXITER + + n_iter) + + if rejected and error_norm > 1: + error = self.solve_lu(LU_real, self.fun(t, y + error) + ZE) + error_norm = norm(error / scale) + + if error_norm > 1: + factor = predict_factor(h_abs, h_abs_old, + error_norm, error_norm_old) + h_abs *= max(MIN_FACTOR, safety * factor) + + LU_real = None + LU_complex = None + rejected = True + else: + step_accepted = True + + recompute_jac = jac is not None and n_iter > 2 and rate > 1e-3 + + factor = predict_factor(h_abs, h_abs_old, error_norm, error_norm_old) + factor = min(MAX_FACTOR, safety * factor) + + if not recompute_jac and factor < 1.2: + factor = 1 + else: + LU_real = None + LU_complex = None + + f_new = self.fun(t_new, y_new) + if recompute_jac: + J = jac(t_new, y_new, f_new) + current_jac = True + elif jac is not None: + current_jac = False + + self.h_abs_old = self.h_abs + self.error_norm_old = error_norm + + self.h_abs = h_abs * factor + + self.y_old = y + + self.t = t_new + self.y = y_new + self.f = f_new + + self.Z = Z + + self.LU_real = LU_real + self.LU_complex = LU_complex + self.current_jac = current_jac + self.J = J + + self.t_old = t + self.sol = self._compute_dense_output() + + return step_accepted, message + + def _compute_dense_output(self): + Q = np.dot(self.Z.T, P) + return RadauDenseOutput(self.t_old, self.t, self.y_old, Q) + + def _dense_output_impl(self): + return self.sol + + +class RadauDenseOutput(DenseOutput): + def __init__(self, t_old, t, y_old, Q): + super().__init__(t_old, t) + self.h = t - t_old + self.Q = Q + self.order = Q.shape[1] - 1 + self.y_old = y_old + + def _call_impl(self, t): + x = (t - self.t_old) / self.h + if t.ndim == 0: + p = np.tile(x, self.order + 1) + p = np.cumprod(p) + else: + p = np.tile(x, (self.order + 1, 1)) + p = np.cumprod(p, axis=0) + # Here we don't multiply by h, not a mistake. + y = np.dot(self.Q, p) + if y.ndim == 2: + y += self.y_old[:, None] + else: + y += self.y_old + + return y diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/rk.py b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/rk.py new file mode 100644 index 0000000000000000000000000000000000000000..62a5347ffe91afc754e9b818d0b34c010d0c4d12 --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/rk.py @@ -0,0 +1,601 @@ +import numpy as np +from .base import OdeSolver, DenseOutput +from .common import (validate_max_step, validate_tol, select_initial_step, + norm, warn_extraneous, validate_first_step) +from . import dop853_coefficients + +# Multiply steps computed from asymptotic behaviour of errors by this. +SAFETY = 0.9 + +MIN_FACTOR = 0.2 # Minimum allowed decrease in a step size. +MAX_FACTOR = 10 # Maximum allowed increase in a step size. + + +def rk_step(fun, t, y, f, h, A, B, C, K): + """Perform a single Runge-Kutta step. + + This function computes a prediction of an explicit Runge-Kutta method and + also estimates the error of a less accurate method. + + Notation for Butcher tableau is as in [1]_. + + Parameters + ---------- + fun : callable + Right-hand side of the system. + t : float + Current time. + y : ndarray, shape (n,) + Current state. + f : ndarray, shape (n,) + Current value of the derivative, i.e., ``fun(x, y)``. + h : float + Step to use. + A : ndarray, shape (n_stages, n_stages) + Coefficients for combining previous RK stages to compute the next + stage. For explicit methods the coefficients at and above the main + diagonal are zeros. + B : ndarray, shape (n_stages,) + Coefficients for combining RK stages for computing the final + prediction. + C : ndarray, shape (n_stages,) + Coefficients for incrementing time for consecutive RK stages. + The value for the first stage is always zero. + K : ndarray, shape (n_stages + 1, n) + Storage array for putting RK stages here. Stages are stored in rows. + The last row is a linear combination of the previous rows with + coefficients + + Returns + ------- + y_new : ndarray, shape (n,) + Solution at t + h computed with a higher accuracy. + f_new : ndarray, shape (n,) + Derivative ``fun(t + h, y_new)``. + + References + ---------- + .. [1] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential + Equations I: Nonstiff Problems", Sec. II.4. + """ + K[0] = f + for s, (a, c) in enumerate(zip(A[1:], C[1:]), start=1): + dy = np.dot(K[:s].T, a[:s]) * h + K[s] = fun(t + c * h, y + dy) + + y_new = y + h * np.dot(K[:-1].T, B) + f_new = fun(t + h, y_new) + + K[-1] = f_new + + return y_new, f_new + + +class RungeKutta(OdeSolver): + """Base class for explicit Runge-Kutta methods.""" + C: np.ndarray = NotImplemented + A: np.ndarray = NotImplemented + B: np.ndarray = NotImplemented + E: np.ndarray = NotImplemented + P: np.ndarray = NotImplemented + order: int = NotImplemented + error_estimator_order: int = NotImplemented + n_stages: int = NotImplemented + + def __init__(self, fun, t0, y0, t_bound, max_step=np.inf, + rtol=1e-3, atol=1e-6, vectorized=False, + first_step=None, **extraneous): + warn_extraneous(extraneous) + super().__init__(fun, t0, y0, t_bound, vectorized, + support_complex=True) + self.y_old = None + self.max_step = validate_max_step(max_step) + self.rtol, self.atol = validate_tol(rtol, atol, self.n) + self.f = self.fun(self.t, self.y) + if first_step is None: + self.h_abs = select_initial_step( + self.fun, self.t, self.y, t_bound, max_step, self.f, self.direction, + self.error_estimator_order, self.rtol, self.atol) + else: + self.h_abs = validate_first_step(first_step, t0, t_bound) + self.K = np.empty((self.n_stages + 1, self.n), dtype=self.y.dtype) + self.error_exponent = -1 / (self.error_estimator_order + 1) + self.h_previous = None + + def _estimate_error(self, K, h): + return np.dot(K.T, self.E) * h + + def _estimate_error_norm(self, K, h, scale): + return norm(self._estimate_error(K, h) / scale) + + def _step_impl(self): + t = self.t + y = self.y + + max_step = self.max_step + rtol = self.rtol + atol = self.atol + + min_step = 10 * np.abs(np.nextafter(t, self.direction * np.inf) - t) + + if self.h_abs > max_step: + h_abs = max_step + elif self.h_abs < min_step: + h_abs = min_step + else: + h_abs = self.h_abs + + step_accepted = False + step_rejected = False + + while not step_accepted: + if h_abs < min_step: + return False, self.TOO_SMALL_STEP + + h = h_abs * self.direction + t_new = t + h + + if self.direction * (t_new - self.t_bound) > 0: + t_new = self.t_bound + + h = t_new - t + h_abs = np.abs(h) + + y_new, f_new = rk_step(self.fun, t, y, self.f, h, self.A, + self.B, self.C, self.K) + scale = atol + np.maximum(np.abs(y), np.abs(y_new)) * rtol + error_norm = self._estimate_error_norm(self.K, h, scale) + + if error_norm < 1: + if error_norm == 0: + factor = MAX_FACTOR + else: + factor = min(MAX_FACTOR, + SAFETY * error_norm ** self.error_exponent) + + if step_rejected: + factor = min(1, factor) + + h_abs *= factor + + step_accepted = True + else: + h_abs *= max(MIN_FACTOR, + SAFETY * error_norm ** self.error_exponent) + step_rejected = True + + self.h_previous = h + self.y_old = y + + self.t = t_new + self.y = y_new + + self.h_abs = h_abs + self.f = f_new + + return True, None + + def _dense_output_impl(self): + Q = self.K.T.dot(self.P) + return RkDenseOutput(self.t_old, self.t, self.y_old, Q) + + +class RK23(RungeKutta): + """Explicit Runge-Kutta method of order 3(2). + + This uses the Bogacki-Shampine pair of formulas [1]_. The error is controlled + assuming accuracy of the second-order method, but steps are taken using the + third-order accurate formula (local extrapolation is done). A cubic Hermite + polynomial is used for the dense output. + + Can be applied in the complex domain. + + Parameters + ---------- + fun : callable + Right-hand side of the system: the time derivative of the state ``y`` + at time ``t``. The calling signature is ``fun(t, y)``, where ``t`` is a + scalar and ``y`` is an ndarray with ``len(y) = len(y0)``. ``fun`` must + return an array of the same shape as ``y``. See `vectorized` for more + information. + t0 : float + Initial time. + y0 : array_like, shape (n,) + Initial state. + t_bound : float + Boundary time - the integration won't continue beyond it. It also + determines the direction of the integration. + first_step : float or None, optional + Initial step size. Default is ``None`` which means that the algorithm + should choose. + max_step : float, optional + Maximum allowed step size. Default is np.inf, i.e., the step size is not + bounded and determined solely by the solver. + rtol, atol : float and array_like, optional + Relative and absolute tolerances. The solver keeps the local error + estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a + relative accuracy (number of correct digits), while `atol` controls + absolute accuracy (number of correct decimal places). To achieve the + desired `rtol`, set `atol` to be smaller than the smallest value that + can be expected from ``rtol * abs(y)`` so that `rtol` dominates the + allowable error. If `atol` is larger than ``rtol * abs(y)`` the + number of correct digits is not guaranteed. Conversely, to achieve the + desired `atol` set `rtol` such that ``rtol * abs(y)`` is always smaller + than `atol`. If components of y have different scales, it might be + beneficial to set different `atol` values for different components by + passing array_like with shape (n,) for `atol`. Default values are + 1e-3 for `rtol` and 1e-6 for `atol`. + vectorized : bool, optional + Whether `fun` may be called in a vectorized fashion. False (default) + is recommended for this solver. + + If ``vectorized`` is False, `fun` will always be called with ``y`` of + shape ``(n,)``, where ``n = len(y0)``. + + If ``vectorized`` is True, `fun` may be called with ``y`` of shape + ``(n, k)``, where ``k`` is an integer. In this case, `fun` must behave + such that ``fun(t, y)[:, i] == fun(t, y[:, i])`` (i.e. each column of + the returned array is the time derivative of the state corresponding + with a column of ``y``). + + Setting ``vectorized=True`` allows for faster finite difference + approximation of the Jacobian by methods 'Radau' and 'BDF', but + will result in slower execution for this solver. + + Attributes + ---------- + n : int + Number of equations. + status : string + Current status of the solver: 'running', 'finished' or 'failed'. + t_bound : float + Boundary time. + direction : float + Integration direction: +1 or -1. + t : float + Current time. + y : ndarray + Current state. + t_old : float + Previous time. None if no steps were made yet. + step_size : float + Size of the last successful step. None if no steps were made yet. + nfev : int + Number evaluations of the system's right-hand side. + njev : int + Number of evaluations of the Jacobian. + Is always 0 for this solver as it does not use the Jacobian. + nlu : int + Number of LU decompositions. Is always 0 for this solver. + + References + ---------- + .. [1] P. Bogacki, L.F. Shampine, "A 3(2) Pair of Runge-Kutta Formulas", + Appl. Math. Lett. Vol. 2, No. 4. pp. 321-325, 1989. + """ + order = 3 + error_estimator_order = 2 + n_stages = 3 + C = np.array([0, 1/2, 3/4]) + A = np.array([ + [0, 0, 0], + [1/2, 0, 0], + [0, 3/4, 0] + ]) + B = np.array([2/9, 1/3, 4/9]) + E = np.array([5/72, -1/12, -1/9, 1/8]) + P = np.array([[1, -4 / 3, 5 / 9], + [0, 1, -2/3], + [0, 4/3, -8/9], + [0, -1, 1]]) + + +class RK45(RungeKutta): + """Explicit Runge-Kutta method of order 5(4). + + This uses the Dormand-Prince pair of formulas [1]_. The error is controlled + assuming accuracy of the fourth-order method accuracy, but steps are taken + using the fifth-order accurate formula (local extrapolation is done). + A quartic interpolation polynomial is used for the dense output [2]_. + + Can be applied in the complex domain. + + Parameters + ---------- + fun : callable + Right-hand side of the system. The calling signature is ``fun(t, y)``. + Here ``t`` is a scalar, and there are two options for the ndarray ``y``: + It can either have shape (n,); then ``fun`` must return array_like with + shape (n,). Alternatively it can have shape (n, k); then ``fun`` + must return an array_like with shape (n, k), i.e., each column + corresponds to a single column in ``y``. The choice between the two + options is determined by `vectorized` argument (see below). + t0 : float + Initial time. + y0 : array_like, shape (n,) + Initial state. + t_bound : float + Boundary time - the integration won't continue beyond it. It also + determines the direction of the integration. + first_step : float or None, optional + Initial step size. Default is ``None`` which means that the algorithm + should choose. + max_step : float, optional + Maximum allowed step size. Default is np.inf, i.e., the step size is not + bounded and determined solely by the solver. + rtol, atol : float and array_like, optional + Relative and absolute tolerances. The solver keeps the local error + estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a + relative accuracy (number of correct digits), while `atol` controls + absolute accuracy (number of correct decimal places). To achieve the + desired `rtol`, set `atol` to be smaller than the smallest value that + can be expected from ``rtol * abs(y)`` so that `rtol` dominates the + allowable error. If `atol` is larger than ``rtol * abs(y)`` the + number of correct digits is not guaranteed. Conversely, to achieve the + desired `atol` set `rtol` such that ``rtol * abs(y)`` is always smaller + than `atol`. If components of y have different scales, it might be + beneficial to set different `atol` values for different components by + passing array_like with shape (n,) for `atol`. Default values are + 1e-3 for `rtol` and 1e-6 for `atol`. + vectorized : bool, optional + Whether `fun` is implemented in a vectorized fashion. Default is False. + + Attributes + ---------- + n : int + Number of equations. + status : string + Current status of the solver: 'running', 'finished' or 'failed'. + t_bound : float + Boundary time. + direction : float + Integration direction: +1 or -1. + t : float + Current time. + y : ndarray + Current state. + t_old : float + Previous time. None if no steps were made yet. + step_size : float + Size of the last successful step. None if no steps were made yet. + nfev : int + Number evaluations of the system's right-hand side. + njev : int + Number of evaluations of the Jacobian. + Is always 0 for this solver as it does not use the Jacobian. + nlu : int + Number of LU decompositions. Is always 0 for this solver. + + References + ---------- + .. [1] J. R. Dormand, P. J. Prince, "A family of embedded Runge-Kutta + formulae", Journal of Computational and Applied Mathematics, Vol. 6, + No. 1, pp. 19-26, 1980. + .. [2] L. W. Shampine, "Some Practical Runge-Kutta Formulas", Mathematics + of Computation,, Vol. 46, No. 173, pp. 135-150, 1986. + """ + order = 5 + error_estimator_order = 4 + n_stages = 6 + C = np.array([0, 1/5, 3/10, 4/5, 8/9, 1]) + A = np.array([ + [0, 0, 0, 0, 0], + [1/5, 0, 0, 0, 0], + [3/40, 9/40, 0, 0, 0], + [44/45, -56/15, 32/9, 0, 0], + [19372/6561, -25360/2187, 64448/6561, -212/729, 0], + [9017/3168, -355/33, 46732/5247, 49/176, -5103/18656] + ]) + B = np.array([35/384, 0, 500/1113, 125/192, -2187/6784, 11/84]) + E = np.array([-71/57600, 0, 71/16695, -71/1920, 17253/339200, -22/525, + 1/40]) + # Corresponds to the optimum value of c_6 from [2]_. + P = np.array([ + [1, -8048581381/2820520608, 8663915743/2820520608, + -12715105075/11282082432], + [0, 0, 0, 0], + [0, 131558114200/32700410799, -68118460800/10900136933, + 87487479700/32700410799], + [0, -1754552775/470086768, 14199869525/1410260304, + -10690763975/1880347072], + [0, 127303824393/49829197408, -318862633887/49829197408, + 701980252875 / 199316789632], + [0, -282668133/205662961, 2019193451/616988883, -1453857185/822651844], + [0, 40617522/29380423, -110615467/29380423, 69997945/29380423]]) + + +class DOP853(RungeKutta): + """Explicit Runge-Kutta method of order 8. + + This is a Python implementation of "DOP853" algorithm originally written + in Fortran [1]_, [2]_. Note that this is not a literal translation, but + the algorithmic core and coefficients are the same. + + Can be applied in the complex domain. + + Parameters + ---------- + fun : callable + Right-hand side of the system. The calling signature is ``fun(t, y)``. + Here, ``t`` is a scalar, and there are two options for the ndarray ``y``: + It can either have shape (n,); then ``fun`` must return array_like with + shape (n,). Alternatively it can have shape (n, k); then ``fun`` + must return an array_like with shape (n, k), i.e. each column + corresponds to a single column in ``y``. The choice between the two + options is determined by `vectorized` argument (see below). + t0 : float + Initial time. + y0 : array_like, shape (n,) + Initial state. + t_bound : float + Boundary time - the integration won't continue beyond it. It also + determines the direction of the integration. + first_step : float or None, optional + Initial step size. Default is ``None`` which means that the algorithm + should choose. + max_step : float, optional + Maximum allowed step size. Default is np.inf, i.e. the step size is not + bounded and determined solely by the solver. + rtol, atol : float and array_like, optional + Relative and absolute tolerances. The solver keeps the local error + estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a + relative accuracy (number of correct digits), while `atol` controls + absolute accuracy (number of correct decimal places). To achieve the + desired `rtol`, set `atol` to be smaller than the smallest value that + can be expected from ``rtol * abs(y)`` so that `rtol` dominates the + allowable error. If `atol` is larger than ``rtol * abs(y)`` the + number of correct digits is not guaranteed. Conversely, to achieve the + desired `atol` set `rtol` such that ``rtol * abs(y)`` is always smaller + than `atol`. If components of y have different scales, it might be + beneficial to set different `atol` values for different components by + passing array_like with shape (n,) for `atol`. Default values are + 1e-3 for `rtol` and 1e-6 for `atol`. + vectorized : bool, optional + Whether `fun` is implemented in a vectorized fashion. Default is False. + + Attributes + ---------- + n : int + Number of equations. + status : string + Current status of the solver: 'running', 'finished' or 'failed'. + t_bound : float + Boundary time. + direction : float + Integration direction: +1 or -1. + t : float + Current time. + y : ndarray + Current state. + t_old : float + Previous time. None if no steps were made yet. + step_size : float + Size of the last successful step. None if no steps were made yet. + nfev : int + Number evaluations of the system's right-hand side. + njev : int + Number of evaluations of the Jacobian. Is always 0 for this solver + as it does not use the Jacobian. + nlu : int + Number of LU decompositions. Is always 0 for this solver. + + References + ---------- + .. [1] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential + Equations I: Nonstiff Problems", Sec. II. + .. [2] `Page with original Fortran code of DOP853 + `_. + """ + n_stages = dop853_coefficients.N_STAGES + order = 8 + error_estimator_order = 7 + A = dop853_coefficients.A[:n_stages, :n_stages] + B = dop853_coefficients.B + C = dop853_coefficients.C[:n_stages] + E3 = dop853_coefficients.E3 + E5 = dop853_coefficients.E5 + D = dop853_coefficients.D + + A_EXTRA = dop853_coefficients.A[n_stages + 1:] + C_EXTRA = dop853_coefficients.C[n_stages + 1:] + + def __init__(self, fun, t0, y0, t_bound, max_step=np.inf, + rtol=1e-3, atol=1e-6, vectorized=False, + first_step=None, **extraneous): + super().__init__(fun, t0, y0, t_bound, max_step, rtol, atol, + vectorized, first_step, **extraneous) + self.K_extended = np.empty((dop853_coefficients.N_STAGES_EXTENDED, + self.n), dtype=self.y.dtype) + self.K = self.K_extended[:self.n_stages + 1] + + def _estimate_error(self, K, h): # Left for testing purposes. + err5 = np.dot(K.T, self.E5) + err3 = np.dot(K.T, self.E3) + denom = np.hypot(np.abs(err5), 0.1 * np.abs(err3)) + correction_factor = np.ones_like(err5) + mask = denom > 0 + correction_factor[mask] = np.abs(err5[mask]) / denom[mask] + return h * err5 * correction_factor + + def _estimate_error_norm(self, K, h, scale): + err5 = np.dot(K.T, self.E5) / scale + err3 = np.dot(K.T, self.E3) / scale + err5_norm_2 = np.linalg.norm(err5)**2 + err3_norm_2 = np.linalg.norm(err3)**2 + if err5_norm_2 == 0 and err3_norm_2 == 0: + return 0.0 + denom = err5_norm_2 + 0.01 * err3_norm_2 + return np.abs(h) * err5_norm_2 / np.sqrt(denom * len(scale)) + + def _dense_output_impl(self): + K = self.K_extended + h = self.h_previous + for s, (a, c) in enumerate(zip(self.A_EXTRA, self.C_EXTRA), + start=self.n_stages + 1): + dy = np.dot(K[:s].T, a[:s]) * h + K[s] = self.fun(self.t_old + c * h, self.y_old + dy) + + F = np.empty((dop853_coefficients.INTERPOLATOR_POWER, self.n), + dtype=self.y_old.dtype) + + f_old = K[0] + delta_y = self.y - self.y_old + + F[0] = delta_y + F[1] = h * f_old - delta_y + F[2] = 2 * delta_y - h * (self.f + f_old) + F[3:] = h * np.dot(self.D, K) + + return Dop853DenseOutput(self.t_old, self.t, self.y_old, F) + + +class RkDenseOutput(DenseOutput): + def __init__(self, t_old, t, y_old, Q): + super().__init__(t_old, t) + self.h = t - t_old + self.Q = Q + self.order = Q.shape[1] - 1 + self.y_old = y_old + + def _call_impl(self, t): + x = (t - self.t_old) / self.h + if t.ndim == 0: + p = np.tile(x, self.order + 1) + p = np.cumprod(p) + else: + p = np.tile(x, (self.order + 1, 1)) + p = np.cumprod(p, axis=0) + y = self.h * np.dot(self.Q, p) + if y.ndim == 2: + y += self.y_old[:, None] + else: + y += self.y_old + + return y + + +class Dop853DenseOutput(DenseOutput): + def __init__(self, t_old, t, y_old, F): + super().__init__(t_old, t) + self.h = t - t_old + self.F = F + self.y_old = y_old + + def _call_impl(self, t): + x = (t - self.t_old) / self.h + + if t.ndim == 0: + y = np.zeros_like(self.y_old) + else: + x = x[:, None] + y = np.zeros((len(x), len(self.y_old)), dtype=self.y_old.dtype) + + for i, f in enumerate(reversed(self.F)): + y += f + if i % 2 == 0: + y *= x + else: + y *= 1 - x + y += self.y_old + + return y.T diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/tests/__init__.py b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/tests/__pycache__/__init__.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8232e072ac248a0c9846da911d2867d7b047e2d9 Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/tests/__pycache__/test_ivp.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/tests/__pycache__/test_ivp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..28d6b513a3c53fdc4abc24b73720a3ee134b4817 Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/tests/__pycache__/test_ivp.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/tests/__pycache__/test_rk.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/tests/__pycache__/test_rk.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..86688ecd4bdf5c7d455a8d7efb1c333bc79a3bbf Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/tests/__pycache__/test_rk.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/tests/test_ivp.py b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/tests/test_ivp.py new file mode 100644 index 0000000000000000000000000000000000000000..1aa989370d4e46d9ebd91d65e95ea87153b00b54 --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/tests/test_ivp.py @@ -0,0 +1,1268 @@ +from itertools import product +from numpy.testing import (assert_, assert_allclose, assert_array_less, + assert_equal, assert_no_warnings, suppress_warnings) +import pytest +from pytest import raises as assert_raises +import numpy as np +from scipy.optimize._numdiff import group_columns +from scipy.integrate import solve_ivp, RK23, RK45, DOP853, Radau, BDF, LSODA +from scipy.integrate import OdeSolution +from scipy.integrate._ivp.common import num_jac, select_initial_step +from scipy.integrate._ivp.base import ConstantDenseOutput +from scipy.sparse import coo_matrix, csc_matrix + + +def fun_zero(t, y): + return np.zeros_like(y) + + +def fun_linear(t, y): + return np.array([-y[0] - 5 * y[1], y[0] + y[1]]) + + +def jac_linear(): + return np.array([[-1, -5], [1, 1]]) + + +def sol_linear(t): + return np.vstack((-5 * np.sin(2 * t), + 2 * np.cos(2 * t) + np.sin(2 * t))) + + +def fun_rational(t, y): + return np.array([y[1] / t, + y[1] * (y[0] + 2 * y[1] - 1) / (t * (y[0] - 1))]) + + +def fun_rational_vectorized(t, y): + return np.vstack((y[1] / t, + y[1] * (y[0] + 2 * y[1] - 1) / (t * (y[0] - 1)))) + + +def jac_rational(t, y): + return np.array([ + [0, 1 / t], + [-2 * y[1] ** 2 / (t * (y[0] - 1) ** 2), + (y[0] + 4 * y[1] - 1) / (t * (y[0] - 1))] + ]) + + +def jac_rational_sparse(t, y): + return csc_matrix([ + [0, 1 / t], + [-2 * y[1] ** 2 / (t * (y[0] - 1) ** 2), + (y[0] + 4 * y[1] - 1) / (t * (y[0] - 1))] + ]) + + +def sol_rational(t): + return np.asarray((t / (t + 10), 10 * t / (t + 10) ** 2)) + + +def fun_medazko(t, y): + n = y.shape[0] // 2 + k = 100 + c = 4 + + phi = 2 if t <= 5 else 0 + y = np.hstack((phi, 0, y, y[-2])) + + d = 1 / n + j = np.arange(n) + 1 + alpha = 2 * (j * d - 1) ** 3 / c ** 2 + beta = (j * d - 1) ** 4 / c ** 2 + + j_2_p1 = 2 * j + 2 + j_2_m3 = 2 * j - 2 + j_2_m1 = 2 * j + j_2 = 2 * j + 1 + + f = np.empty(2 * n) + f[::2] = (alpha * (y[j_2_p1] - y[j_2_m3]) / (2 * d) + + beta * (y[j_2_m3] - 2 * y[j_2_m1] + y[j_2_p1]) / d ** 2 - + k * y[j_2_m1] * y[j_2]) + f[1::2] = -k * y[j_2] * y[j_2_m1] + + return f + + +def medazko_sparsity(n): + cols = [] + rows = [] + + i = np.arange(n) * 2 + + cols.append(i[1:]) + rows.append(i[1:] - 2) + + cols.append(i) + rows.append(i) + + cols.append(i) + rows.append(i + 1) + + cols.append(i[:-1]) + rows.append(i[:-1] + 2) + + i = np.arange(n) * 2 + 1 + + cols.append(i) + rows.append(i) + + cols.append(i) + rows.append(i - 1) + + cols = np.hstack(cols) + rows = np.hstack(rows) + + return coo_matrix((np.ones_like(cols), (cols, rows))) + + +def fun_complex(t, y): + return -y + + +def jac_complex(t, y): + return -np.eye(y.shape[0]) + + +def jac_complex_sparse(t, y): + return csc_matrix(jac_complex(t, y)) + + +def sol_complex(t): + y = (0.5 + 1j) * np.exp(-t) + return y.reshape((1, -1)) + + +def fun_event_dense_output_LSODA(t, y): + return y * (t - 2) + + +def jac_event_dense_output_LSODA(t, y): + return t - 2 + + +def sol_event_dense_output_LSODA(t): + return np.exp(t ** 2 / 2 - 2 * t + np.log(0.05) - 6) + + +def compute_error(y, y_true, rtol, atol): + e = (y - y_true) / (atol + rtol * np.abs(y_true)) + return np.linalg.norm(e, axis=0) / np.sqrt(e.shape[0]) + + +@pytest.mark.thread_unsafe +def test_integration(): + rtol = 1e-3 + atol = 1e-6 + y0 = [1/3, 2/9] + + for vectorized, method, t_span, jac in product( + [False, True], + ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA'], + [[5, 9], [5, 1]], + [None, jac_rational, jac_rational_sparse]): + + if vectorized: + fun = fun_rational_vectorized + else: + fun = fun_rational + + with suppress_warnings() as sup: + sup.filter(UserWarning, + "The following arguments have no effect for a chosen " + "solver: `jac`") + res = solve_ivp(fun, t_span, y0, rtol=rtol, + atol=atol, method=method, dense_output=True, + jac=jac, vectorized=vectorized) + assert_equal(res.t[0], t_span[0]) + assert_(res.t_events is None) + assert_(res.y_events is None) + assert_(res.success) + assert_equal(res.status, 0) + + if method == 'DOP853': + # DOP853 spends more functions evaluation because it doesn't + # have enough time to develop big enough step size. + assert_(res.nfev < 50) + else: + assert_(res.nfev < 40) + + if method in ['RK23', 'RK45', 'DOP853', 'LSODA']: + assert_equal(res.njev, 0) + assert_equal(res.nlu, 0) + else: + assert_(0 < res.njev < 3) + assert_(0 < res.nlu < 10) + + y_true = sol_rational(res.t) + e = compute_error(res.y, y_true, rtol, atol) + assert_(np.all(e < 5)) + + tc = np.linspace(*t_span) + yc_true = sol_rational(tc) + yc = res.sol(tc) + + e = compute_error(yc, yc_true, rtol, atol) + assert_(np.all(e < 5)) + + tc = (t_span[0] + t_span[-1]) / 2 + yc_true = sol_rational(tc) + yc = res.sol(tc) + + e = compute_error(yc, yc_true, rtol, atol) + assert_(np.all(e < 5)) + + assert_allclose(res.sol(res.t), res.y, rtol=1e-15, atol=1e-15) + + +@pytest.mark.thread_unsafe +def test_integration_complex(): + rtol = 1e-3 + atol = 1e-6 + y0 = [0.5 + 1j] + t_span = [0, 1] + tc = np.linspace(t_span[0], t_span[1]) + for method, jac in product(['RK23', 'RK45', 'DOP853', 'BDF'], + [None, jac_complex, jac_complex_sparse]): + with suppress_warnings() as sup: + sup.filter(UserWarning, + "The following arguments have no effect for a chosen " + "solver: `jac`") + res = solve_ivp(fun_complex, t_span, y0, method=method, + dense_output=True, rtol=rtol, atol=atol, jac=jac) + + assert_equal(res.t[0], t_span[0]) + assert_(res.t_events is None) + assert_(res.y_events is None) + assert_(res.success) + assert_equal(res.status, 0) + + if method == 'DOP853': + assert res.nfev < 35 + else: + assert res.nfev < 25 + + if method == 'BDF': + assert_equal(res.njev, 1) + assert res.nlu < 6 + else: + assert res.njev == 0 + assert res.nlu == 0 + + y_true = sol_complex(res.t) + e = compute_error(res.y, y_true, rtol, atol) + assert np.all(e < 5) + + yc_true = sol_complex(tc) + yc = res.sol(tc) + e = compute_error(yc, yc_true, rtol, atol) + + assert np.all(e < 5) + + +@pytest.mark.fail_slow(5) +def test_integration_sparse_difference(): + n = 200 + t_span = [0, 20] + y0 = np.zeros(2 * n) + y0[1::2] = 1 + sparsity = medazko_sparsity(n) + + for method in ['BDF', 'Radau']: + res = solve_ivp(fun_medazko, t_span, y0, method=method, + jac_sparsity=sparsity) + + assert_equal(res.t[0], t_span[0]) + assert_(res.t_events is None) + assert_(res.y_events is None) + assert_(res.success) + assert_equal(res.status, 0) + + assert_allclose(res.y[78, -1], 0.233994e-3, rtol=1e-2) + assert_allclose(res.y[79, -1], 0, atol=1e-3) + assert_allclose(res.y[148, -1], 0.359561e-3, rtol=1e-2) + assert_allclose(res.y[149, -1], 0, atol=1e-3) + assert_allclose(res.y[198, -1], 0.117374129e-3, rtol=1e-2) + assert_allclose(res.y[199, -1], 0.6190807e-5, atol=1e-3) + assert_allclose(res.y[238, -1], 0, atol=1e-3) + assert_allclose(res.y[239, -1], 0.9999997, rtol=1e-2) + + +def test_integration_const_jac(): + rtol = 1e-3 + atol = 1e-6 + y0 = [0, 2] + t_span = [0, 2] + J = jac_linear() + J_sparse = csc_matrix(J) + + for method, jac in product(['Radau', 'BDF'], [J, J_sparse]): + res = solve_ivp(fun_linear, t_span, y0, rtol=rtol, atol=atol, + method=method, dense_output=True, jac=jac) + assert_equal(res.t[0], t_span[0]) + assert_(res.t_events is None) + assert_(res.y_events is None) + assert_(res.success) + assert_equal(res.status, 0) + + assert_(res.nfev < 100) + assert_equal(res.njev, 0) + assert_(0 < res.nlu < 15) + + y_true = sol_linear(res.t) + e = compute_error(res.y, y_true, rtol, atol) + assert_(np.all(e < 10)) + + tc = np.linspace(*t_span) + yc_true = sol_linear(tc) + yc = res.sol(tc) + + e = compute_error(yc, yc_true, rtol, atol) + assert_(np.all(e < 15)) + + assert_allclose(res.sol(res.t), res.y, rtol=1e-14, atol=1e-14) + + +@pytest.mark.slow +@pytest.mark.parametrize('method', ['Radau', 'BDF', 'LSODA']) +def test_integration_stiff(method, num_parallel_threads): + rtol = 1e-6 + atol = 1e-6 + y0 = [1e4, 0, 0] + tspan = [0, 1e8] + + if method == 'LSODA' and num_parallel_threads > 1: + pytest.skip(reason='LSODA does not allow for concurrent calls') + + def fun_robertson(t, state): + x, y, z = state + return [ + -0.04 * x + 1e4 * y * z, + 0.04 * x - 1e4 * y * z - 3e7 * y * y, + 3e7 * y * y, + ] + + res = solve_ivp(fun_robertson, tspan, y0, rtol=rtol, + atol=atol, method=method) + + # If the stiff mode is not activated correctly, these numbers will be much bigger + assert res.nfev < 5000 + assert res.njev < 200 + + +def test_events(num_parallel_threads): + def event_rational_1(t, y): + return y[0] - y[1] ** 0.7 + + def event_rational_2(t, y): + return y[1] ** 0.6 - y[0] + + def event_rational_3(t, y): + return t - 7.4 + + event_rational_3.terminal = True + + for method in ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA']: + if method == 'LSODA' and num_parallel_threads > 1: + continue + + res = solve_ivp(fun_rational, [5, 8], [1/3, 2/9], method=method, + events=(event_rational_1, event_rational_2)) + assert_equal(res.status, 0) + assert_equal(res.t_events[0].size, 1) + assert_equal(res.t_events[1].size, 1) + assert_(5.3 < res.t_events[0][0] < 5.7) + assert_(7.3 < res.t_events[1][0] < 7.7) + + assert_equal(res.y_events[0].shape, (1, 2)) + assert_equal(res.y_events[1].shape, (1, 2)) + assert np.isclose( + event_rational_1(res.t_events[0][0], res.y_events[0][0]), 0) + assert np.isclose( + event_rational_2(res.t_events[1][0], res.y_events[1][0]), 0) + + event_rational_1.direction = 1 + event_rational_2.direction = 1 + res = solve_ivp(fun_rational, [5, 8], [1 / 3, 2 / 9], method=method, + events=(event_rational_1, event_rational_2)) + assert_equal(res.status, 0) + assert_equal(res.t_events[0].size, 1) + assert_equal(res.t_events[1].size, 0) + assert_(5.3 < res.t_events[0][0] < 5.7) + assert_equal(res.y_events[0].shape, (1, 2)) + assert_equal(res.y_events[1].shape, (0,)) + assert np.isclose( + event_rational_1(res.t_events[0][0], res.y_events[0][0]), 0) + + event_rational_1.direction = -1 + event_rational_2.direction = -1 + res = solve_ivp(fun_rational, [5, 8], [1 / 3, 2 / 9], method=method, + events=(event_rational_1, event_rational_2)) + assert_equal(res.status, 0) + assert_equal(res.t_events[0].size, 0) + assert_equal(res.t_events[1].size, 1) + assert_(7.3 < res.t_events[1][0] < 7.7) + assert_equal(res.y_events[0].shape, (0,)) + assert_equal(res.y_events[1].shape, (1, 2)) + assert np.isclose( + event_rational_2(res.t_events[1][0], res.y_events[1][0]), 0) + + event_rational_1.direction = 0 + event_rational_2.direction = 0 + + res = solve_ivp(fun_rational, [5, 8], [1 / 3, 2 / 9], method=method, + events=(event_rational_1, event_rational_2, + event_rational_3), dense_output=True) + assert_equal(res.status, 1) + assert_equal(res.t_events[0].size, 1) + assert_equal(res.t_events[1].size, 0) + assert_equal(res.t_events[2].size, 1) + assert_(5.3 < res.t_events[0][0] < 5.7) + assert_(7.3 < res.t_events[2][0] < 7.5) + assert_equal(res.y_events[0].shape, (1, 2)) + assert_equal(res.y_events[1].shape, (0,)) + assert_equal(res.y_events[2].shape, (1, 2)) + assert np.isclose( + event_rational_1(res.t_events[0][0], res.y_events[0][0]), 0) + assert np.isclose( + event_rational_3(res.t_events[2][0], res.y_events[2][0]), 0) + + res = solve_ivp(fun_rational, [5, 8], [1 / 3, 2 / 9], method=method, + events=event_rational_1, dense_output=True) + assert_equal(res.status, 0) + assert_equal(res.t_events[0].size, 1) + assert_(5.3 < res.t_events[0][0] < 5.7) + + assert_equal(res.y_events[0].shape, (1, 2)) + assert np.isclose( + event_rational_1(res.t_events[0][0], res.y_events[0][0]), 0) + + # Also test that termination by event doesn't break interpolants. + tc = np.linspace(res.t[0], res.t[-1]) + yc_true = sol_rational(tc) + yc = res.sol(tc) + e = compute_error(yc, yc_true, 1e-3, 1e-6) + assert_(np.all(e < 5)) + + # Test that the y_event matches solution + assert np.allclose(sol_rational(res.t_events[0][0]), res.y_events[0][0], + rtol=1e-3, atol=1e-6) + + # Test in backward direction. + event_rational_1.direction = 0 + event_rational_2.direction = 0 + for method in ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA']: + if method == 'LSODA' and num_parallel_threads > 1: + continue + + res = solve_ivp(fun_rational, [8, 5], [4/9, 20/81], method=method, + events=(event_rational_1, event_rational_2)) + assert_equal(res.status, 0) + assert_equal(res.t_events[0].size, 1) + assert_equal(res.t_events[1].size, 1) + assert_(5.3 < res.t_events[0][0] < 5.7) + assert_(7.3 < res.t_events[1][0] < 7.7) + + assert_equal(res.y_events[0].shape, (1, 2)) + assert_equal(res.y_events[1].shape, (1, 2)) + assert np.isclose( + event_rational_1(res.t_events[0][0], res.y_events[0][0]), 0) + assert np.isclose( + event_rational_2(res.t_events[1][0], res.y_events[1][0]), 0) + + event_rational_1.direction = -1 + event_rational_2.direction = -1 + res = solve_ivp(fun_rational, [8, 5], [4/9, 20/81], method=method, + events=(event_rational_1, event_rational_2)) + assert_equal(res.status, 0) + assert_equal(res.t_events[0].size, 1) + assert_equal(res.t_events[1].size, 0) + assert_(5.3 < res.t_events[0][0] < 5.7) + + assert_equal(res.y_events[0].shape, (1, 2)) + assert_equal(res.y_events[1].shape, (0,)) + assert np.isclose( + event_rational_1(res.t_events[0][0], res.y_events[0][0]), 0) + + event_rational_1.direction = 1 + event_rational_2.direction = 1 + res = solve_ivp(fun_rational, [8, 5], [4/9, 20/81], method=method, + events=(event_rational_1, event_rational_2)) + assert_equal(res.status, 0) + assert_equal(res.t_events[0].size, 0) + assert_equal(res.t_events[1].size, 1) + assert_(7.3 < res.t_events[1][0] < 7.7) + + assert_equal(res.y_events[0].shape, (0,)) + assert_equal(res.y_events[1].shape, (1, 2)) + assert np.isclose( + event_rational_2(res.t_events[1][0], res.y_events[1][0]), 0) + + event_rational_1.direction = 0 + event_rational_2.direction = 0 + + res = solve_ivp(fun_rational, [8, 5], [4/9, 20/81], method=method, + events=(event_rational_1, event_rational_2, + event_rational_3), dense_output=True) + assert_equal(res.status, 1) + assert_equal(res.t_events[0].size, 0) + assert_equal(res.t_events[1].size, 1) + assert_equal(res.t_events[2].size, 1) + assert_(7.3 < res.t_events[1][0] < 7.7) + assert_(7.3 < res.t_events[2][0] < 7.5) + + assert_equal(res.y_events[0].shape, (0,)) + assert_equal(res.y_events[1].shape, (1, 2)) + assert_equal(res.y_events[2].shape, (1, 2)) + assert np.isclose( + event_rational_2(res.t_events[1][0], res.y_events[1][0]), 0) + assert np.isclose( + event_rational_3(res.t_events[2][0], res.y_events[2][0]), 0) + + # Also test that termination by event doesn't break interpolants. + tc = np.linspace(res.t[-1], res.t[0]) + yc_true = sol_rational(tc) + yc = res.sol(tc) + e = compute_error(yc, yc_true, 1e-3, 1e-6) + assert_(np.all(e < 5)) + + assert np.allclose(sol_rational(res.t_events[1][0]), res.y_events[1][0], + rtol=1e-3, atol=1e-6) + assert np.allclose(sol_rational(res.t_events[2][0]), res.y_events[2][0], + rtol=1e-3, atol=1e-6) + + +def _get_harmonic_oscillator(): + def f(t, y): + return [y[1], -y[0]] + + def event(t, y): + return y[0] + + return f, event + + +@pytest.mark.parametrize('n_events', [3, 4]) +def test_event_terminal_integer(n_events): + f, event = _get_harmonic_oscillator() + event.terminal = n_events + res = solve_ivp(f, (0, 100), [1, 0], events=event) + assert len(res.t_events[0]) == n_events + assert len(res.y_events[0]) == n_events + assert_allclose(res.y_events[0][:, 0], 0, atol=1e-14) + + +def test_event_terminal_iv(): + f, event = _get_harmonic_oscillator() + args = (f, (0, 100), [1, 0]) + + event.terminal = None + res = solve_ivp(*args, events=event) + event.terminal = 0 + ref = solve_ivp(*args, events=event) + assert_allclose(res.t_events, ref.t_events) + + message = "The `terminal` attribute..." + event.terminal = -1 + with pytest.raises(ValueError, match=message): + solve_ivp(*args, events=event) + event.terminal = 3.5 + with pytest.raises(ValueError, match=message): + solve_ivp(*args, events=event) + + +def test_max_step(num_parallel_threads): + rtol = 1e-3 + atol = 1e-6 + y0 = [1/3, 2/9] + for method in [RK23, RK45, DOP853, Radau, BDF, LSODA]: + if method is LSODA and num_parallel_threads > 1: + continue + for t_span in ([5, 9], [5, 1]): + res = solve_ivp(fun_rational, t_span, y0, rtol=rtol, + max_step=0.5, atol=atol, method=method, + dense_output=True) + assert_equal(res.t[0], t_span[0]) + assert_equal(res.t[-1], t_span[-1]) + assert_(np.all(np.abs(np.diff(res.t)) <= 0.5 + 1e-15)) + assert_(res.t_events is None) + assert_(res.success) + assert_equal(res.status, 0) + + y_true = sol_rational(res.t) + e = compute_error(res.y, y_true, rtol, atol) + assert_(np.all(e < 5)) + + tc = np.linspace(*t_span) + yc_true = sol_rational(tc) + yc = res.sol(tc) + + e = compute_error(yc, yc_true, rtol, atol) + assert_(np.all(e < 5)) + + assert_allclose(res.sol(res.t), res.y, rtol=1e-15, atol=1e-15) + + assert_raises(ValueError, method, fun_rational, t_span[0], y0, + t_span[1], max_step=-1) + + if method is not LSODA: + solver = method(fun_rational, t_span[0], y0, t_span[1], + rtol=rtol, atol=atol, max_step=1e-20) + message = solver.step() + message = solver.step() # First step succeeds but second step fails. + assert_equal(solver.status, 'failed') + assert_("step size is less" in message) + assert_raises(RuntimeError, solver.step) + + +def test_first_step(num_parallel_threads): + rtol = 1e-3 + atol = 1e-6 + y0 = [1/3, 2/9] + first_step = 0.1 + for method in [RK23, RK45, DOP853, Radau, BDF, LSODA]: + if method is LSODA and num_parallel_threads > 1: + continue + for t_span in ([5, 9], [5, 1]): + res = solve_ivp(fun_rational, t_span, y0, rtol=rtol, + max_step=0.5, atol=atol, method=method, + dense_output=True, first_step=first_step) + + assert_equal(res.t[0], t_span[0]) + assert_equal(res.t[-1], t_span[-1]) + assert_allclose(first_step, np.abs(res.t[1] - 5)) + assert_(res.t_events is None) + assert_(res.success) + assert_equal(res.status, 0) + + y_true = sol_rational(res.t) + e = compute_error(res.y, y_true, rtol, atol) + assert_(np.all(e < 5)) + + tc = np.linspace(*t_span) + yc_true = sol_rational(tc) + yc = res.sol(tc) + + e = compute_error(yc, yc_true, rtol, atol) + assert_(np.all(e < 5)) + + assert_allclose(res.sol(res.t), res.y, rtol=1e-15, atol=1e-15) + + assert_raises(ValueError, method, fun_rational, t_span[0], y0, + t_span[1], first_step=-1) + assert_raises(ValueError, method, fun_rational, t_span[0], y0, + t_span[1], first_step=5) + + +def test_t_eval(): + rtol = 1e-3 + atol = 1e-6 + y0 = [1/3, 2/9] + for t_span in ([5, 9], [5, 1]): + t_eval = np.linspace(t_span[0], t_span[1], 10) + res = solve_ivp(fun_rational, t_span, y0, rtol=rtol, atol=atol, + t_eval=t_eval) + assert_equal(res.t, t_eval) + assert_(res.t_events is None) + assert_(res.success) + assert_equal(res.status, 0) + + y_true = sol_rational(res.t) + e = compute_error(res.y, y_true, rtol, atol) + assert_(np.all(e < 5)) + + t_eval = [5, 5.01, 7, 8, 8.01, 9] + res = solve_ivp(fun_rational, [5, 9], y0, rtol=rtol, atol=atol, + t_eval=t_eval) + assert_equal(res.t, t_eval) + assert_(res.t_events is None) + assert_(res.success) + assert_equal(res.status, 0) + + y_true = sol_rational(res.t) + e = compute_error(res.y, y_true, rtol, atol) + assert_(np.all(e < 5)) + + t_eval = [5, 4.99, 3, 1.5, 1.1, 1.01, 1] + res = solve_ivp(fun_rational, [5, 1], y0, rtol=rtol, atol=atol, + t_eval=t_eval) + assert_equal(res.t, t_eval) + assert_(res.t_events is None) + assert_(res.success) + assert_equal(res.status, 0) + + t_eval = [5.01, 7, 8, 8.01] + res = solve_ivp(fun_rational, [5, 9], y0, rtol=rtol, atol=atol, + t_eval=t_eval) + assert_equal(res.t, t_eval) + assert_(res.t_events is None) + assert_(res.success) + assert_equal(res.status, 0) + + y_true = sol_rational(res.t) + e = compute_error(res.y, y_true, rtol, atol) + assert_(np.all(e < 5)) + + t_eval = [4.99, 3, 1.5, 1.1, 1.01] + res = solve_ivp(fun_rational, [5, 1], y0, rtol=rtol, atol=atol, + t_eval=t_eval) + assert_equal(res.t, t_eval) + assert_(res.t_events is None) + assert_(res.success) + assert_equal(res.status, 0) + + t_eval = [4, 6] + assert_raises(ValueError, solve_ivp, fun_rational, [5, 9], y0, + rtol=rtol, atol=atol, t_eval=t_eval) + + +def test_t_eval_dense_output(): + rtol = 1e-3 + atol = 1e-6 + y0 = [1/3, 2/9] + t_span = [5, 9] + t_eval = np.linspace(t_span[0], t_span[1], 10) + res = solve_ivp(fun_rational, t_span, y0, rtol=rtol, atol=atol, + t_eval=t_eval) + res_d = solve_ivp(fun_rational, t_span, y0, rtol=rtol, atol=atol, + t_eval=t_eval, dense_output=True) + assert_equal(res.t, t_eval) + assert_(res.t_events is None) + assert_(res.success) + assert_equal(res.status, 0) + + assert_equal(res.t, res_d.t) + assert_equal(res.y, res_d.y) + assert_(res_d.t_events is None) + assert_(res_d.success) + assert_equal(res_d.status, 0) + + # if t and y are equal only test values for one case + y_true = sol_rational(res.t) + e = compute_error(res.y, y_true, rtol, atol) + assert_(np.all(e < 5)) + + +@pytest.mark.thread_unsafe +def test_t_eval_early_event(): + def early_event(t, y): + return t - 7 + + early_event.terminal = True + + rtol = 1e-3 + atol = 1e-6 + y0 = [1/3, 2/9] + t_span = [5, 9] + t_eval = np.linspace(7.5, 9, 16) + for method in ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA']: + with suppress_warnings() as sup: + sup.filter(UserWarning, + "The following arguments have no effect for a chosen " + "solver: `jac`") + res = solve_ivp(fun_rational, t_span, y0, rtol=rtol, atol=atol, + method=method, t_eval=t_eval, events=early_event, + jac=jac_rational) + assert res.success + assert res.message == 'A termination event occurred.' + assert res.status == 1 + assert not res.t and not res.y + assert len(res.t_events) == 1 + assert res.t_events[0].size == 1 + assert res.t_events[0][0] == 7 + + +def test_event_dense_output_LSODA(num_parallel_threads): + if num_parallel_threads > 1: + pytest.skip('LSODA does not allow for concurrent execution') + + def event_lsoda(t, y): + return y[0] - 2.02e-5 + + rtol = 1e-3 + atol = 1e-6 + y0 = [0.05] + t_span = [-2, 2] + first_step = 1e-3 + res = solve_ivp( + fun_event_dense_output_LSODA, + t_span, + y0, + method="LSODA", + dense_output=True, + events=event_lsoda, + first_step=first_step, + max_step=1, + rtol=rtol, + atol=atol, + jac=jac_event_dense_output_LSODA, + ) + + assert_equal(res.t[0], t_span[0]) + assert_equal(res.t[-1], t_span[-1]) + assert_allclose(first_step, np.abs(res.t[1] - t_span[0])) + assert res.success + assert_equal(res.status, 0) + + y_true = sol_event_dense_output_LSODA(res.t) + e = compute_error(res.y, y_true, rtol, atol) + assert_array_less(e, 5) + + tc = np.linspace(*t_span) + yc_true = sol_event_dense_output_LSODA(tc) + yc = res.sol(tc) + e = compute_error(yc, yc_true, rtol, atol) + assert_array_less(e, 5) + + assert_allclose(res.sol(res.t), res.y, rtol=1e-15, atol=1e-15) + + +def test_no_integration(): + for method in ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA']: + sol = solve_ivp(lambda t, y: -y, [4, 4], [2, 3], + method=method, dense_output=True) + assert_equal(sol.sol(4), [2, 3]) + assert_equal(sol.sol([4, 5, 6]), [[2, 2, 2], [3, 3, 3]]) + + +def test_no_integration_class(): + for method in [RK23, RK45, DOP853, Radau, BDF, LSODA]: + solver = method(lambda t, y: -y, 0.0, [10.0, 0.0], 0.0) + solver.step() + assert_equal(solver.status, 'finished') + sol = solver.dense_output() + assert_equal(sol(0.0), [10.0, 0.0]) + assert_equal(sol([0, 1, 2]), [[10, 10, 10], [0, 0, 0]]) + + solver = method(lambda t, y: -y, 0.0, [], np.inf) + solver.step() + assert_equal(solver.status, 'finished') + sol = solver.dense_output() + assert_equal(sol(100.0), []) + assert_equal(sol([0, 1, 2]), np.empty((0, 3))) + + +def test_empty(): + def fun(t, y): + return np.zeros((0,)) + + y0 = np.zeros((0,)) + + for method in ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA']: + sol = assert_no_warnings(solve_ivp, fun, [0, 10], y0, + method=method, dense_output=True) + assert_equal(sol.sol(10), np.zeros((0,))) + assert_equal(sol.sol([1, 2, 3]), np.zeros((0, 3))) + + for method in ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA']: + sol = assert_no_warnings(solve_ivp, fun, [0, np.inf], y0, + method=method, dense_output=True) + assert_equal(sol.sol(10), np.zeros((0,))) + assert_equal(sol.sol([1, 2, 3]), np.zeros((0, 3))) + + +def test_ConstantDenseOutput(): + sol = ConstantDenseOutput(0, 1, np.array([1, 2])) + assert_allclose(sol(1.5), [1, 2]) + assert_allclose(sol([1, 1.5, 2]), [[1, 1, 1], [2, 2, 2]]) + + sol = ConstantDenseOutput(0, 1, np.array([])) + assert_allclose(sol(1.5), np.empty(0)) + assert_allclose(sol([1, 1.5, 2]), np.empty((0, 3))) + + +def test_classes(): + y0 = [1 / 3, 2 / 9] + for cls in [RK23, RK45, DOP853, Radau, BDF, LSODA]: + solver = cls(fun_rational, 5, y0, np.inf) + assert_equal(solver.n, 2) + assert_equal(solver.status, 'running') + assert_equal(solver.t_bound, np.inf) + assert_equal(solver.direction, 1) + assert_equal(solver.t, 5) + assert_equal(solver.y, y0) + assert_(solver.step_size is None) + if cls is not LSODA: + assert_(solver.nfev > 0) + assert_(solver.njev >= 0) + assert_equal(solver.nlu, 0) + else: + assert_equal(solver.nfev, 0) + assert_equal(solver.njev, 0) + assert_equal(solver.nlu, 0) + + assert_raises(RuntimeError, solver.dense_output) + + message = solver.step() + assert_equal(solver.status, 'running') + assert_equal(message, None) + assert_equal(solver.n, 2) + assert_equal(solver.t_bound, np.inf) + assert_equal(solver.direction, 1) + assert_(solver.t > 5) + assert_(not np.all(np.equal(solver.y, y0))) + assert_(solver.step_size > 0) + assert_(solver.nfev > 0) + assert_(solver.njev >= 0) + assert_(solver.nlu >= 0) + sol = solver.dense_output() + assert_allclose(sol(5), y0, rtol=1e-15, atol=0) + + +def test_OdeSolution(): + ts = np.array([0, 2, 5], dtype=float) + s1 = ConstantDenseOutput(ts[0], ts[1], np.array([-1])) + s2 = ConstantDenseOutput(ts[1], ts[2], np.array([1])) + + sol = OdeSolution(ts, [s1, s2]) + + assert_equal(sol(-1), [-1]) + assert_equal(sol(1), [-1]) + assert_equal(sol(2), [-1]) + assert_equal(sol(3), [1]) + assert_equal(sol(5), [1]) + assert_equal(sol(6), [1]) + + assert_equal(sol([0, 6, -2, 1.5, 4.5, 2.5, 5, 5.5, 2]), + np.array([[-1, 1, -1, -1, 1, 1, 1, 1, -1]])) + + ts = np.array([10, 4, -3]) + s1 = ConstantDenseOutput(ts[0], ts[1], np.array([-1])) + s2 = ConstantDenseOutput(ts[1], ts[2], np.array([1])) + + sol = OdeSolution(ts, [s1, s2]) + assert_equal(sol(11), [-1]) + assert_equal(sol(10), [-1]) + assert_equal(sol(5), [-1]) + assert_equal(sol(4), [-1]) + assert_equal(sol(0), [1]) + assert_equal(sol(-3), [1]) + assert_equal(sol(-4), [1]) + + assert_equal(sol([12, -5, 10, -3, 6, 1, 4]), + np.array([[-1, 1, -1, 1, -1, 1, -1]])) + + ts = np.array([1, 1]) + s = ConstantDenseOutput(1, 1, np.array([10])) + sol = OdeSolution(ts, [s]) + assert_equal(sol(0), [10]) + assert_equal(sol(1), [10]) + assert_equal(sol(2), [10]) + + assert_equal(sol([2, 1, 0]), np.array([[10, 10, 10]])) + + +def test_num_jac(): + def fun(t, y): + return np.vstack([ + -0.04 * y[0] + 1e4 * y[1] * y[2], + 0.04 * y[0] - 1e4 * y[1] * y[2] - 3e7 * y[1] ** 2, + 3e7 * y[1] ** 2 + ]) + + def jac(t, y): + return np.array([ + [-0.04, 1e4 * y[2], 1e4 * y[1]], + [0.04, -1e4 * y[2] - 6e7 * y[1], -1e4 * y[1]], + [0, 6e7 * y[1], 0] + ]) + + t = 1 + y = np.array([1, 0, 0]) + J_true = jac(t, y) + threshold = 1e-5 + f = fun(t, y).ravel() + + J_num, factor = num_jac(fun, t, y, f, threshold, None) + assert_allclose(J_num, J_true, rtol=1e-5, atol=1e-5) + + J_num, factor = num_jac(fun, t, y, f, threshold, factor) + assert_allclose(J_num, J_true, rtol=1e-5, atol=1e-5) + + +def test_num_jac_sparse(): + def fun(t, y): + e = y[1:]**3 - y[:-1]**2 + z = np.zeros(y.shape[1]) + return np.vstack((z, 3 * e)) + np.vstack((2 * e, z)) + + def structure(n): + A = np.zeros((n, n), dtype=int) + A[0, 0] = 1 + A[0, 1] = 1 + for i in range(1, n - 1): + A[i, i - 1: i + 2] = 1 + A[-1, -1] = 1 + A[-1, -2] = 1 + + return A + + np.random.seed(0) + n = 20 + y = np.random.randn(n) + A = structure(n) + groups = group_columns(A) + + f = fun(0, y[:, None]).ravel() + + # Compare dense and sparse results, assuming that dense implementation + # is correct (as it is straightforward). + J_num_sparse, factor_sparse = num_jac(fun, 0, y.ravel(), f, 1e-8, None, + sparsity=(A, groups)) + J_num_dense, factor_dense = num_jac(fun, 0, y.ravel(), f, 1e-8, None) + assert_allclose(J_num_dense, J_num_sparse.toarray(), + rtol=1e-12, atol=1e-14) + assert_allclose(factor_dense, factor_sparse, rtol=1e-12, atol=1e-14) + + # Take small factors to trigger their recomputing inside. + factor = np.random.uniform(0, 1e-12, size=n) + J_num_sparse, factor_sparse = num_jac(fun, 0, y.ravel(), f, 1e-8, factor, + sparsity=(A, groups)) + J_num_dense, factor_dense = num_jac(fun, 0, y.ravel(), f, 1e-8, factor) + + assert_allclose(J_num_dense, J_num_sparse.toarray(), + rtol=1e-12, atol=1e-14) + assert_allclose(factor_dense, factor_sparse, rtol=1e-12, atol=1e-14) + + +def test_args(): + + # sys3 is actually two decoupled systems. (x, y) form a + # linear oscillator, while z is a nonlinear first order + # system with equilibria at z=0 and z=1. If k > 0, z=1 + # is stable and z=0 is unstable. + + def sys3(t, w, omega, k, zfinal): + x, y, z = w + return [-omega*y, omega*x, k*z*(1 - z)] + + def sys3_jac(t, w, omega, k, zfinal): + x, y, z = w + J = np.array([[0, -omega, 0], + [omega, 0, 0], + [0, 0, k*(1 - 2*z)]]) + return J + + def sys3_x0decreasing(t, w, omega, k, zfinal): + x, y, z = w + return x + + def sys3_y0increasing(t, w, omega, k, zfinal): + x, y, z = w + return y + + def sys3_zfinal(t, w, omega, k, zfinal): + x, y, z = w + return z - zfinal + + # Set the event flags for the event functions. + sys3_x0decreasing.direction = -1 + sys3_y0increasing.direction = 1 + sys3_zfinal.terminal = True + + omega = 2 + k = 4 + + tfinal = 5 + zfinal = 0.99 + # Find z0 such that when z(0) = z0, z(tfinal) = zfinal. + # The condition z(tfinal) = zfinal is the terminal event. + z0 = np.exp(-k*tfinal)/((1 - zfinal)/zfinal + np.exp(-k*tfinal)) + + w0 = [0, -1, z0] + + # Provide the jac argument and use the Radau method to ensure that the use + # of the Jacobian function is exercised. + # If event handling is working, the solution will stop at tfinal, not tend. + tend = 2*tfinal + sol = solve_ivp(sys3, [0, tend], w0, + events=[sys3_x0decreasing, sys3_y0increasing, sys3_zfinal], + dense_output=True, args=(omega, k, zfinal), + method='Radau', jac=sys3_jac, + rtol=1e-10, atol=1e-13) + + # Check that we got the expected events at the expected times. + x0events_t = sol.t_events[0] + y0events_t = sol.t_events[1] + zfinalevents_t = sol.t_events[2] + assert_allclose(x0events_t, [0.5*np.pi, 1.5*np.pi]) + assert_allclose(y0events_t, [0.25*np.pi, 1.25*np.pi]) + assert_allclose(zfinalevents_t, [tfinal]) + + # Check that the solution agrees with the known exact solution. + t = np.linspace(0, zfinalevents_t[0], 250) + w = sol.sol(t) + assert_allclose(w[0], np.sin(omega*t), rtol=1e-9, atol=1e-12) + assert_allclose(w[1], -np.cos(omega*t), rtol=1e-9, atol=1e-12) + assert_allclose(w[2], 1/(((1 - z0)/z0)*np.exp(-k*t) + 1), + rtol=1e-9, atol=1e-12) + + # Check that the state variables have the expected values at the events. + x0events = sol.sol(x0events_t) + y0events = sol.sol(y0events_t) + zfinalevents = sol.sol(zfinalevents_t) + assert_allclose(x0events[0], np.zeros_like(x0events[0]), atol=5e-14) + assert_allclose(x0events[1], np.ones_like(x0events[1])) + assert_allclose(y0events[0], np.ones_like(y0events[0])) + assert_allclose(y0events[1], np.zeros_like(y0events[1]), atol=5e-14) + assert_allclose(zfinalevents[2], [zfinal]) + + +@pytest.mark.thread_unsafe +def test_array_rtol(): + # solve_ivp had a bug with array_like `rtol`; see gh-15482 + # check that it's fixed + def f(t, y): + return y[0], y[1] + + # no warning (or error) when `rtol` is array_like + sol = solve_ivp(f, (0, 1), [1., 1.], rtol=[1e-1, 1e-1]) + err1 = np.abs(np.linalg.norm(sol.y[:, -1] - np.exp(1))) + + # warning when an element of `rtol` is too small + with pytest.warns(UserWarning, match="At least one element..."): + sol = solve_ivp(f, (0, 1), [1., 1.], rtol=[1e-1, 1e-16]) + err2 = np.abs(np.linalg.norm(sol.y[:, -1] - np.exp(1))) + + # tighter rtol improves the error + assert err2 < err1 + + +@pytest.mark.parametrize('method', ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF', 'LSODA']) +def test_integration_zero_rhs(method, num_parallel_threads): + if method == 'LSODA' and num_parallel_threads > 1: + pytest.skip(reason='LSODA does not allow for concurrent execution') + + result = solve_ivp(fun_zero, [0, 10], np.ones(3), method=method) + assert_(result.success) + assert_equal(result.status, 0) + assert_allclose(result.y, 1.0, rtol=1e-15) + + +def test_args_single_value(): + def fun_with_arg(t, y, a): + return a*y + + message = "Supplied 'args' cannot be unpacked." + with pytest.raises(TypeError, match=message): + solve_ivp(fun_with_arg, (0, 0.1), [1], args=-1) + + sol = solve_ivp(fun_with_arg, (0, 0.1), [1], args=(-1,)) + assert_allclose(sol.y[0, -1], np.exp(-0.1)) + + +@pytest.mark.parametrize("f0_fill", [np.nan, np.inf]) +def test_initial_state_finiteness(f0_fill): + # regression test for gh-17846 + msg = "All components of the initial state `y0` must be finite." + with pytest.raises(ValueError, match=msg): + solve_ivp(fun_zero, [0, 10], np.full(3, f0_fill)) + + +@pytest.mark.parametrize('method', ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF']) +def test_zero_interval(method): + # Case where upper and lower limits of integration are the same + # Result of integration should match initial state. + # f[y(t)] = 2y(t) + def f(t, y): + return 2 * y + res = solve_ivp(f, (0.0, 0.0), np.array([1.0]), method=method) + assert res.success + assert_allclose(res.y[0, -1], 1.0) + + +@pytest.mark.parametrize('method', ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF']) +def test_tbound_respected_small_interval(method): + """Regression test for gh-17341""" + SMALL = 1e-4 + + # f[y(t)] = 2y(t) on t in [0,SMALL] + # undefined otherwise + def f(t, y): + if t > SMALL: + raise ValueError("Function was evaluated outside interval") + return 2 * y + res = solve_ivp(f, (0.0, SMALL), np.array([1]), method=method) + assert res.success + + +@pytest.mark.parametrize('method', ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF']) +def test_tbound_respected_larger_interval(method): + """Regression test for gh-8848""" + def V(r): + return -11/r + 10 * r / (0.05 + r**2) + + def func(t, p): + if t < -17 or t > 2: + raise ValueError("Function was evaluated outside interval") + P = p[0] + Q = p[1] + r = np.exp(t) + dPdr = r * Q + dQdr = -2.0 * r * ((-0.2 - V(r)) * P + 1 / r * Q) + return np.array([dPdr, dQdr]) + + result = solve_ivp(func, + (-17, 2), + y0=np.array([1, -11]), + max_step=0.03, + vectorized=False, + t_eval=None, + atol=1e-8, + rtol=1e-5) + assert result.success + + +@pytest.mark.parametrize('method', ['RK23', 'RK45', 'DOP853', 'Radau', 'BDF']) +def test_tbound_respected_oscillator(method): + "Regression test for gh-9198" + def reactions_func(t, y): + if (t > 205): + raise ValueError("Called outside interval") + yprime = np.array([1.73307544e-02, + 6.49376470e-06, + 0.00000000e+00, + 0.00000000e+00]) + return yprime + + def run_sim2(t_end, n_timepoints=10, shortest_delay_line=10000000): + init_state = np.array([134.08298555, 138.82348612, 100., 0.]) + t0 = 100.0 + t1 = 200.0 + return solve_ivp(reactions_func, + (t0, t1), + init_state.copy(), + dense_output=True, + max_step=t1 - t0) + result = run_sim2(1000, 100, 100) + assert result.success + + +def test_inital_maxstep(): + """Verify that select_inital_step respects max_step""" + rtol = 1e-3 + atol = 1e-6 + y0 = np.array([1/3, 2/9]) + for (t0, t_bound) in ((5, 9), (5, 1)): + for method_order in [RK23.error_estimator_order, + RK45.error_estimator_order, + DOP853.error_estimator_order, + 3, #RADAU + 1 #BDF + ]: + step_no_max = select_initial_step(fun_rational, t0, y0, t_bound, + np.inf, + fun_rational(t0,y0), + np.sign(t_bound - t0), + method_order, + rtol, atol) + max_step = step_no_max/2 + step_with_max = select_initial_step(fun_rational, t0, y0, t_bound, + max_step, + fun_rational(t0, y0), + np.sign(t_bound - t0), + method_order, + rtol, atol) + assert_equal(max_step, step_with_max) diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/tests/test_rk.py b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/tests/test_rk.py new file mode 100644 index 0000000000000000000000000000000000000000..33cb27d0323d037c0937ab94b4de8f63b46be3d7 --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ivp/tests/test_rk.py @@ -0,0 +1,37 @@ +import pytest +from numpy.testing import assert_allclose, assert_ +import numpy as np +from scipy.integrate import RK23, RK45, DOP853 +from scipy.integrate._ivp import dop853_coefficients + + +@pytest.mark.parametrize("solver", [RK23, RK45, DOP853]) +def test_coefficient_properties(solver): + assert_allclose(np.sum(solver.B), 1, rtol=1e-15) + assert_allclose(np.sum(solver.A, axis=1), solver.C, rtol=1e-14) + + +def test_coefficient_properties_dop853(): + assert_allclose(np.sum(dop853_coefficients.B), 1, rtol=1e-15) + assert_allclose(np.sum(dop853_coefficients.A, axis=1), + dop853_coefficients.C, + rtol=1e-14) + + +@pytest.mark.parametrize("solver_class", [RK23, RK45, DOP853]) +def test_error_estimation(solver_class): + step = 0.2 + solver = solver_class(lambda t, y: y, 0, [1], 1, first_step=step) + solver.step() + error_estimate = solver._estimate_error(solver.K, step) + error = solver.y - np.exp([step]) + assert_(np.abs(error) < np.abs(error_estimate)) + + +@pytest.mark.parametrize("solver_class", [RK23, RK45, DOP853]) +def test_error_estimation_complex(solver_class): + h = 0.2 + solver = solver_class(lambda t, y: 1j * y, 0, [1j], 1, first_step=h) + solver.step() + err_norm = solver._estimate_error_norm(solver.K, h, scale=[1]) + assert np.isrealobj(err_norm) diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_lebedev.py b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_lebedev.py new file mode 100644 index 0000000000000000000000000000000000000000..da200972f9d475162f84294ed335149dc86fe94b --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_lebedev.py @@ -0,0 +1,5450 @@ +# getLebedevSphere +# Copyright (c) 2010, Robert Parrish +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the distribution +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# Brainlessly translated to Python + +import numpy as np +from numpy import pi, zeros, sqrt + + +__all__ = ['lebedev_rule'] + + +def get_lebedev_sphere(degree): + # getLebedevSphere + # @author Rob Parrish, The Sherrill Group, CCMST Georgia Tech + # @email robparrish@gmail.com + # @date 03/24/2010 + # + # @description - function to compute normalized points and weights + # for Lebedev quadratures on the surface of the unit sphere at double precision. + # **********Relative error is generally expected to be ~2.0E-14 [1]******** + # Lebedev quadratures are superbly accurate and efficient quadrature rules for + # approximating integrals of the form $v = \iint_{4\pi} f(\Omega) \ \ud + # \Omega$, where $\Omega is the solid angle on the surface of the unit + # sphere. Lebedev quadratures integrate all spherical harmonics up to $l = + # order$, where $degree \approx order(order+1)/3$. These grids may be easily + # combined with radial quadratures to provide robust cubature formulae. For + # example, see 'A. Becke, 1988c, J. Chem. Phys., 88(4), pp. 2547' (The first + # paper on tractable molecular Density Functional Theory methods, of which + # Lebedev grids and numerical cubature are an intrinsic part). + # + # @param degree - positive integer specifying number of points in the + # requested quadrature. Allowed values are (degree -> order): + # degree: { 6, 14, 26, 38, 50, 74, 86, 110, 146, 170, 194, 230, 266, 302, + # 350, 434, 590, 770, 974, 1202, 1454, 1730, 2030, 2354, 2702, 3074, + # 3470, 3890, 4334, 4802, 5294, 5810 } + # order: {3,5,7,9,11,13,15,17,19,21,23,25,27,29,31,35,41,47,53,59,65,71,77, + # 83,89,95,101,107,113,119,125,131} + # + # + # @return leb_tmp - struct containing fields: + # x - x values of quadrature, constrained to unit sphere + # y - y values of quadrature, constrained to unit sphere + # z - z values of quadrature, constrained to unit sphere + # w - quadrature weights, normalized to $4\pi$. + # + # @example: $\int_S x^2+y^2-z^2 \ud \Omega = 4.188790204786399$ + # f = @(x,y,z) x.^2+y.^2-z.^2 + # leb = getLebedevSphere(590) + # v = f(leb.x,leb.y,leb.z) + # int = sum(v.*leb.w) + # + # @citation - Translated from a Fortran code kindly provided by Christoph van + # Wuellen (Ruhr-Universitaet, Bochum, Germany), which in turn came from the + # original C routines coded by Dmitri Laikov (Moscow State University, + # Moscow, Russia). The MATLAB implementation of this code is designed for + # benchmarking of new DFT integration techniques to be implemented in the + # open source Psi4 ab initio quantum chemistry program. + # + # As per Professor Wuellen's request, any papers published using this code + # or its derivatives are requested to include the following citation: + # + # [1] V.I. Lebedev, and D.N. Laikov + # "A quadrature formula for the sphere of the 131st + # algebraic order of accuracy" + # Doklady Mathematics, Vol. 59, No. 3, 1999, pp. 477-481. + + class Leb: + x, y, z, w = None, None, None, None + + leb_tmp = Leb() + + leb_tmp.x = zeros(degree) + leb_tmp.y = zeros(degree) + leb_tmp.z = zeros(degree) + leb_tmp.w = zeros(degree) + + start = 0 + a = 0.0 + b = 0.0 + + match degree: + + case 6: + + v = 0.1666666666666667E+0 + leb_tmp, start = get_lebedev_recurrence_points(1, start, a, b, v, leb_tmp) + + case 14: + + v = 0.6666666666666667E-1 + leb_tmp, start = get_lebedev_recurrence_points(1, start, a, b, v, leb_tmp) + v = 0.7500000000000000E-1 + leb_tmp, start = get_lebedev_recurrence_points(3, start, a, b, v, leb_tmp) + + case 26: + + v = 0.4761904761904762E-1 + leb_tmp, start = get_lebedev_recurrence_points(1, start, a, b, v, leb_tmp) + v = 0.3809523809523810E-1 + leb_tmp, start = get_lebedev_recurrence_points(2, start, a, b, v, leb_tmp) + v = 0.3214285714285714E-1 + leb_tmp, start = get_lebedev_recurrence_points(3, start, a, b, v, leb_tmp) + + case 38: + + v = 0.9523809523809524E-2 + leb_tmp, start = get_lebedev_recurrence_points(1, start, a, b, v, leb_tmp) + v = 0.3214285714285714E-1 + leb_tmp, start = get_lebedev_recurrence_points(3, start, a, b, v, leb_tmp) + a = 0.4597008433809831E+0 + v = 0.2857142857142857E-1 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + + case 50: + + v = 0.1269841269841270E-1 + leb_tmp, start = get_lebedev_recurrence_points(1, start, a, b, v, leb_tmp) + v = 0.2257495590828924E-1 + leb_tmp, start = get_lebedev_recurrence_points(2, start, a, b, v, leb_tmp) + v = 0.2109375000000000E-1 + leb_tmp, start = get_lebedev_recurrence_points(3, start, a, b, v, leb_tmp) + a = 0.3015113445777636E+0 + v = 0.2017333553791887E-1 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + + case 74: + + v = 0.5130671797338464E-3 + leb_tmp, start = get_lebedev_recurrence_points(1, start, a, b, v, leb_tmp) + v = 0.1660406956574204E-1 + leb_tmp, start = get_lebedev_recurrence_points(2, start, a, b, v, leb_tmp) + v = -0.2958603896103896E-1 + leb_tmp, start = get_lebedev_recurrence_points(3, start, a, b, v, leb_tmp) + a = 0.4803844614152614E+0 + v = 0.2657620708215946E-1 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.3207726489807764E+0 + v = 0.1652217099371571E-1 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + + case 86: + + v = 0.1154401154401154E-1 + leb_tmp, start = get_lebedev_recurrence_points(1, start, a, b, v, leb_tmp) + v = 0.1194390908585628E-1 + leb_tmp, start = get_lebedev_recurrence_points(3, start, a, b, v, leb_tmp) + a = 0.3696028464541502E+0 + v = 0.1111055571060340E-1 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6943540066026664E+0 + v = 0.1187650129453714E-1 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.3742430390903412E+0 + v = 0.1181230374690448E-1 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + + case 110: + + v = 0.3828270494937162E-2 + leb_tmp, start = get_lebedev_recurrence_points(1, start, a, b, v, leb_tmp) + v = 0.9793737512487512E-2 + leb_tmp, start = get_lebedev_recurrence_points(3, start, a, b, v, leb_tmp) + a = 0.1851156353447362E+0 + v = 0.8211737283191111E-2 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6904210483822922E+0 + v = 0.9942814891178103E-2 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.3956894730559419E+0 + v = 0.9595471336070963E-2 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.4783690288121502E+0 + v = 0.9694996361663028E-2 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + + case 146: + + v = 0.5996313688621381E-3 + leb_tmp, start = get_lebedev_recurrence_points(1, start, a, b, v, leb_tmp) + v = 0.7372999718620756E-2 + leb_tmp, start = get_lebedev_recurrence_points(2, start, a, b, v, leb_tmp) + v = 0.7210515360144488E-2 + leb_tmp, start = get_lebedev_recurrence_points(3, start, a, b, v, leb_tmp) + a = 0.6764410400114264E+0 + v = 0.7116355493117555E-2 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.4174961227965453E+0 + v = 0.6753829486314477E-2 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.1574676672039082E+0 + v = 0.7574394159054034E-2 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.1403553811713183E+0 + b = 0.4493328323269557E+0 + v = 0.6991087353303262E-2 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + + case 170: + + v = 0.5544842902037365E-2 + leb_tmp, start = get_lebedev_recurrence_points(1, start, a, b, v, leb_tmp) + v = 0.6071332770670752E-2 + leb_tmp, start = get_lebedev_recurrence_points(2, start, a, b, v, leb_tmp) + v = 0.6383674773515093E-2 + leb_tmp, start = get_lebedev_recurrence_points(3, start, a, b, v, leb_tmp) + a = 0.2551252621114134E+0 + v = 0.5183387587747790E-2 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6743601460362766E+0 + v = 0.6317929009813725E-2 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.4318910696719410E+0 + v = 0.6201670006589077E-2 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.2613931360335988E+0 + v = 0.5477143385137348E-2 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.4990453161796037E+0 + b = 0.1446630744325115E+0 + v = 0.5968383987681156E-2 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + + case 194: + + v = 0.1782340447244611E-2 + leb_tmp, start = get_lebedev_recurrence_points(1, start, a, b, v, leb_tmp) + v = 0.5716905949977102E-2 + leb_tmp, start = get_lebedev_recurrence_points(2, start, a, b, v, leb_tmp) + v = 0.5573383178848738E-2 + leb_tmp, start = get_lebedev_recurrence_points(3, start, a, b, v, leb_tmp) + a = 0.6712973442695226E+0 + v = 0.5608704082587997E-2 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.2892465627575439E+0 + v = 0.5158237711805383E-2 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.4446933178717437E+0 + v = 0.5518771467273614E-2 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.1299335447650067E+0 + v = 0.4106777028169394E-2 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.3457702197611283E+0 + v = 0.5051846064614808E-2 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.1590417105383530E+0 + b = 0.8360360154824589E+0 + v = 0.5530248916233094E-2 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + + case 230: + + v = -0.5522639919727325E-1 + leb_tmp, start = get_lebedev_recurrence_points(1, start, a, b, v, leb_tmp) + v = 0.4450274607445226E-2 + leb_tmp, start = get_lebedev_recurrence_points(3, start, a, b, v, leb_tmp) + a = 0.4492044687397611E+0 + v = 0.4496841067921404E-2 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.2520419490210201E+0 + v = 0.5049153450478750E-2 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6981906658447242E+0 + v = 0.3976408018051883E-2 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6587405243460960E+0 + v = 0.4401400650381014E-2 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.4038544050097660E-1 + v = 0.1724544350544401E-1 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.5823842309715585E+0 + v = 0.4231083095357343E-2 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.3545877390518688E+0 + v = 0.5198069864064399E-2 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.2272181808998187E+0 + b = 0.4864661535886647E+0 + v = 0.4695720972568883E-2 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + + case 266: + + v = -0.1313769127326952E-2 + leb_tmp, start = get_lebedev_recurrence_points(1, start, a, b, v, leb_tmp) + v = -0.2522728704859336E-2 + leb_tmp, start = get_lebedev_recurrence_points(2, start, a, b, v, leb_tmp) + v = 0.4186853881700583E-2 + leb_tmp, start = get_lebedev_recurrence_points(3, start, a, b, v, leb_tmp) + a = 0.7039373391585475E+0 + v = 0.5315167977810885E-2 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.1012526248572414E+0 + v = 0.4047142377086219E-2 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.4647448726420539E+0 + v = 0.4112482394406990E-2 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.3277420654971629E+0 + v = 0.3595584899758782E-2 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6620338663699974E+0 + v = 0.4256131351428158E-2 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.8506508083520399E+0 + v = 0.4229582700647240E-2 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.3233484542692899E+0 + b = 0.1153112011009701E+0 + v = 0.4080914225780505E-2 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2314790158712601E+0 + b = 0.5244939240922365E+0 + v = 0.4071467593830964E-2 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + + case 302: + + v = 0.8545911725128148E-3 + leb_tmp, start = get_lebedev_recurrence_points(1, start, a, b, v, leb_tmp) + v = 0.3599119285025571E-2 + leb_tmp, start = get_lebedev_recurrence_points(3, start, a, b, v, leb_tmp) + a = 0.3515640345570105E+0 + v = 0.3449788424305883E-2 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6566329410219612E+0 + v = 0.3604822601419882E-2 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.4729054132581005E+0 + v = 0.3576729661743367E-2 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.9618308522614784E-1 + v = 0.2352101413689164E-2 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.2219645236294178E+0 + v = 0.3108953122413675E-2 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.7011766416089545E+0 + v = 0.3650045807677255E-2 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.2644152887060663E+0 + v = 0.2982344963171804E-2 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.5718955891878961E+0 + v = 0.3600820932216460E-2 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.2510034751770465E+0 + b = 0.8000727494073952E+0 + v = 0.3571540554273387E-2 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.1233548532583327E+0 + b = 0.4127724083168531E+0 + v = 0.3392312205006170E-2 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + + case 350: + + v = 0.3006796749453936E-2 + leb_tmp, start = get_lebedev_recurrence_points(1, start, a, b, v, leb_tmp) + v = 0.3050627745650771E-2 + leb_tmp, start = get_lebedev_recurrence_points(3, start, a, b, v, leb_tmp) + a = 0.7068965463912316E+0 + v = 0.1621104600288991E-2 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.4794682625712025E+0 + v = 0.3005701484901752E-2 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.1927533154878019E+0 + v = 0.2990992529653774E-2 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6930357961327123E+0 + v = 0.2982170644107595E-2 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.3608302115520091E+0 + v = 0.2721564237310992E-2 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6498486161496169E+0 + v = 0.3033513795811141E-2 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.1932945013230339E+0 + v = 0.3007949555218533E-2 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.3800494919899303E+0 + v = 0.2881964603055307E-2 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.2899558825499574E+0 + b = 0.7934537856582316E+0 + v = 0.2958357626535696E-2 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.9684121455103957E-1 + b = 0.8280801506686862E+0 + v = 0.3036020026407088E-2 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.1833434647041659E+0 + b = 0.9074658265305127E+0 + v = 0.2832187403926303E-2 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + + case 434: + + v = 0.5265897968224436E-3 + leb_tmp, start = get_lebedev_recurrence_points(1, start, a, b, v, leb_tmp) + v = 0.2548219972002607E-2 + leb_tmp, start = get_lebedev_recurrence_points(2, start, a, b, v, leb_tmp) + v = 0.2512317418927307E-2 + leb_tmp, start = get_lebedev_recurrence_points(3, start, a, b, v, leb_tmp) + a = 0.6909346307509111E+0 + v = 0.2530403801186355E-2 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.1774836054609158E+0 + v = 0.2014279020918528E-2 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.4914342637784746E+0 + v = 0.2501725168402936E-2 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6456664707424256E+0 + v = 0.2513267174597564E-2 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.2861289010307638E+0 + v = 0.2302694782227416E-2 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.7568084367178018E-1 + v = 0.1462495621594614E-2 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.3927259763368002E+0 + v = 0.2445373437312980E-2 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.8818132877794288E+0 + v = 0.2417442375638981E-2 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.9776428111182649E+0 + v = 0.1910951282179532E-2 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.2054823696403044E+0 + b = 0.8689460322872412E+0 + v = 0.2416930044324775E-2 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5905157048925271E+0 + b = 0.7999278543857286E+0 + v = 0.2512236854563495E-2 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5550152361076807E+0 + b = 0.7717462626915901E+0 + v = 0.2496644054553086E-2 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.9371809858553722E+0 + b = 0.3344363145343455E+0 + v = 0.2236607760437849E-2 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + + case 590: + + v = 0.3095121295306187E-3 + leb_tmp, start = get_lebedev_recurrence_points(1, start, a, b, v, leb_tmp) + v = 0.1852379698597489E-2 + leb_tmp, start = get_lebedev_recurrence_points(3, start, a, b, v, leb_tmp) + a = 0.7040954938227469E+0 + v = 0.1871790639277744E-2 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6807744066455243E+0 + v = 0.1858812585438317E-2 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6372546939258752E+0 + v = 0.1852028828296213E-2 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.5044419707800358E+0 + v = 0.1846715956151242E-2 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.4215761784010967E+0 + v = 0.1818471778162769E-2 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.3317920736472123E+0 + v = 0.1749564657281154E-2 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.2384736701421887E+0 + v = 0.1617210647254411E-2 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.1459036449157763E+0 + v = 0.1384737234851692E-2 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6095034115507196E-1 + v = 0.9764331165051050E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6116843442009876E+0 + v = 0.1857161196774078E-2 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.3964755348199858E+0 + v = 0.1705153996395864E-2 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.1724782009907724E+0 + v = 0.1300321685886048E-2 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.5610263808622060E+0 + b = 0.3518280927733519E+0 + v = 0.1842866472905286E-2 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4742392842551980E+0 + b = 0.2634716655937950E+0 + v = 0.1802658934377451E-2 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5984126497885380E+0 + b = 0.1816640840360209E+0 + v = 0.1849830560443660E-2 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3791035407695563E+0 + b = 0.1720795225656878E+0 + v = 0.1713904507106709E-2 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2778673190586244E+0 + b = 0.8213021581932511E-1 + v = 0.1555213603396808E-2 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5033564271075117E+0 + b = 0.8999205842074875E-1 + v = 0.1802239128008525E-2 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + + case 770: + + v = 0.2192942088181184E-3 + leb_tmp, start = get_lebedev_recurrence_points(1, start, a, b, v, leb_tmp) + v = 0.1436433617319080E-2 + leb_tmp, start = get_lebedev_recurrence_points(2, start, a, b, v, leb_tmp) + v = 0.1421940344335877E-2 + leb_tmp, start = get_lebedev_recurrence_points(3, start, a, b, v, leb_tmp) + a = 0.5087204410502360E-1 + v = 0.6798123511050502E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.1228198790178831E+0 + v = 0.9913184235294912E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.2026890814408786E+0 + v = 0.1180207833238949E-2 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.2847745156464294E+0 + v = 0.1296599602080921E-2 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.3656719078978026E+0 + v = 0.1365871427428316E-2 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.4428264886713469E+0 + v = 0.1402988604775325E-2 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.5140619627249735E+0 + v = 0.1418645563595609E-2 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6306401219166803E+0 + v = 0.1421376741851662E-2 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6716883332022612E+0 + v = 0.1423996475490962E-2 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6979792685336881E+0 + v = 0.1431554042178567E-2 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.1446865674195309E+0 + v = 0.9254401499865368E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.3390263475411216E+0 + v = 0.1250239995053509E-2 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.5335804651263506E+0 + v = 0.1394365843329230E-2 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.6944024393349413E-1 + b = 0.2355187894242326E+0 + v = 0.1127089094671749E-2 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2269004109529460E+0 + b = 0.4102182474045730E+0 + v = 0.1345753760910670E-2 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.8025574607775339E-1 + b = 0.6214302417481605E+0 + v = 0.1424957283316783E-2 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.1467999527896572E+0 + b = 0.3245284345717394E+0 + v = 0.1261523341237750E-2 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.1571507769824727E+0 + b = 0.5224482189696630E+0 + v = 0.1392547106052696E-2 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2365702993157246E+0 + b = 0.6017546634089558E+0 + v = 0.1418761677877656E-2 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.7714815866765732E-1 + b = 0.4346575516141163E+0 + v = 0.1338366684479554E-2 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3062936666210730E+0 + b = 0.4908826589037616E+0 + v = 0.1393700862676131E-2 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3822477379524787E+0 + b = 0.5648768149099500E+0 + v = 0.1415914757466932E-2 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + + case 974: + + v = 0.1438294190527431E-3 + leb_tmp, start = get_lebedev_recurrence_points(1, start, a, b, v, leb_tmp) + v = 0.1125772288287004E-2 + leb_tmp, start = get_lebedev_recurrence_points(3, start, a, b, v, leb_tmp) + a = 0.4292963545341347E-1 + v = 0.4948029341949241E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.1051426854086404E+0 + v = 0.7357990109125470E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.1750024867623087E+0 + v = 0.8889132771304384E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.2477653379650257E+0 + v = 0.9888347838921435E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.3206567123955957E+0 + v = 0.1053299681709471E-2 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.3916520749849983E+0 + v = 0.1092778807014578E-2 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.4590825874187624E+0 + v = 0.1114389394063227E-2 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.5214563888415861E+0 + v = 0.1123724788051555E-2 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6253170244654199E+0 + v = 0.1125239325243814E-2 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6637926744523170E+0 + v = 0.1126153271815905E-2 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6910410398498301E+0 + v = 0.1130286931123841E-2 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.7052907007457760E+0 + v = 0.1134986534363955E-2 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.1236686762657990E+0 + v = 0.6823367927109931E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.2940777114468387E+0 + v = 0.9454158160447096E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.4697753849207649E+0 + v = 0.1074429975385679E-2 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.6334563241139567E+0 + v = 0.1129300086569132E-2 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.5974048614181342E-1 + b = 0.2029128752777523E+0 + v = 0.8436884500901954E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.1375760408473636E+0 + b = 0.4602621942484054E+0 + v = 0.1075255720448885E-2 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3391016526336286E+0 + b = 0.5030673999662036E+0 + v = 0.1108577236864462E-2 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.1271675191439820E+0 + b = 0.2817606422442134E+0 + v = 0.9566475323783357E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2693120740413512E+0 + b = 0.4331561291720157E+0 + v = 0.1080663250717391E-2 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.1419786452601918E+0 + b = 0.6256167358580814E+0 + v = 0.1126797131196295E-2 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6709284600738255E-1 + b = 0.3798395216859157E+0 + v = 0.1022568715358061E-2 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.7057738183256172E-1 + b = 0.5517505421423520E+0 + v = 0.1108960267713108E-2 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2783888477882155E+0 + b = 0.6029619156159187E+0 + v = 0.1122790653435766E-2 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.1979578938917407E+0 + b = 0.3589606329589096E+0 + v = 0.1032401847117460E-2 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2087307061103274E+0 + b = 0.5348666438135476E+0 + v = 0.1107249382283854E-2 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4055122137872836E+0 + b = 0.5674997546074373E+0 + v = 0.1121780048519972E-2 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + + case 1202: + + v = 0.1105189233267572E-3 + leb_tmp, start = get_lebedev_recurrence_points(1, start, a, b, v, leb_tmp) + v = 0.9205232738090741E-3 + leb_tmp, start = get_lebedev_recurrence_points(2, start, a, b, v, leb_tmp) + v = 0.9133159786443561E-3 + leb_tmp, start = get_lebedev_recurrence_points(3, start, a, b, v, leb_tmp) + a = 0.3712636449657089E-1 + v = 0.3690421898017899E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.9140060412262223E-1 + v = 0.5603990928680660E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.1531077852469906E+0 + v = 0.6865297629282609E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.2180928891660612E+0 + v = 0.7720338551145630E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.2839874532200175E+0 + v = 0.8301545958894795E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.3491177600963764E+0 + v = 0.8686692550179628E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.4121431461444309E+0 + v = 0.8927076285846890E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.4718993627149127E+0 + v = 0.9060820238568219E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.5273145452842337E+0 + v = 0.9119777254940867E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6209475332444019E+0 + v = 0.9128720138604181E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6569722711857291E+0 + v = 0.9130714935691735E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6841788309070143E+0 + v = 0.9152873784554116E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.7012604330123631E+0 + v = 0.9187436274321654E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.1072382215478166E+0 + v = 0.5176977312965694E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.2582068959496968E+0 + v = 0.7331143682101417E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.4172752955306717E+0 + v = 0.8463232836379928E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.5700366911792503E+0 + v = 0.9031122694253992E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.9827986018263947E+0 + b = 0.1771774022615325E+0 + v = 0.6485778453163257E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.9624249230326228E+0 + b = 0.2475716463426288E+0 + v = 0.7435030910982369E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.9402007994128811E+0 + b = 0.3354616289066489E+0 + v = 0.7998527891839054E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.9320822040143202E+0 + b = 0.3173615246611977E+0 + v = 0.8101731497468018E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.9043674199393299E+0 + b = 0.4090268427085357E+0 + v = 0.8483389574594331E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.8912407560074747E+0 + b = 0.3854291150669224E+0 + v = 0.8556299257311812E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.8676435628462708E+0 + b = 0.4932221184851285E+0 + v = 0.8803208679738260E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.8581979986041619E+0 + b = 0.4785320675922435E+0 + v = 0.8811048182425720E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.8396753624049856E+0 + b = 0.4507422593157064E+0 + v = 0.8850282341265444E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.8165288564022188E+0 + b = 0.5632123020762100E+0 + v = 0.9021342299040653E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.8015469370783529E+0 + b = 0.5434303569693900E+0 + v = 0.9010091677105086E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.7773563069070351E+0 + b = 0.5123518486419871E+0 + v = 0.9022692938426915E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.7661621213900394E+0 + b = 0.6394279634749102E+0 + v = 0.9158016174693465E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.7553584143533510E+0 + b = 0.6269805509024392E+0 + v = 0.9131578003189435E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.7344305757559503E+0 + b = 0.6031161693096310E+0 + v = 0.9107813579482705E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.7043837184021765E+0 + b = 0.5693702498468441E+0 + v = 0.9105760258970126E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + + case 1454: + + v = 0.7777160743261247E-4 + leb_tmp, start = get_lebedev_recurrence_points(1, start, a, b, v, leb_tmp) + v = 0.7557646413004701E-3 + leb_tmp, start = get_lebedev_recurrence_points(3, start, a, b, v, leb_tmp) + a = 0.3229290663413854E-1 + v = 0.2841633806090617E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.8036733271462222E-1 + v = 0.4374419127053555E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.1354289960531653E+0 + v = 0.5417174740872172E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.1938963861114426E+0 + v = 0.6148000891358593E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.2537343715011275E+0 + v = 0.6664394485800705E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.3135251434752570E+0 + v = 0.7025039356923220E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.3721558339375338E+0 + v = 0.7268511789249627E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.4286809575195696E+0 + v = 0.7422637534208629E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.4822510128282994E+0 + v = 0.7509545035841214E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.5320679333566263E+0 + v = 0.7548535057718401E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6172998195394274E+0 + v = 0.7554088969774001E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6510679849127481E+0 + v = 0.7553147174442808E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6777315251687360E+0 + v = 0.7564767653292297E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6963109410648741E+0 + v = 0.7587991808518730E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.7058935009831749E+0 + v = 0.7608261832033027E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.9955546194091857E+0 + v = 0.4021680447874916E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.9734115901794209E+0 + v = 0.5804871793945964E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.9275693732388626E+0 + v = 0.6792151955945159E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.8568022422795103E+0 + v = 0.7336741211286294E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.7623495553719372E+0 + v = 0.7581866300989608E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.5707522908892223E+0 + b = 0.4387028039889501E+0 + v = 0.7538257859800743E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5196463388403083E+0 + b = 0.3858908414762617E+0 + v = 0.7483517247053123E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4646337531215351E+0 + b = 0.3301937372343854E+0 + v = 0.7371763661112059E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4063901697557691E+0 + b = 0.2725423573563777E+0 + v = 0.7183448895756934E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3456329466643087E+0 + b = 0.2139510237495250E+0 + v = 0.6895815529822191E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2831395121050332E+0 + b = 0.1555922309786647E+0 + v = 0.6480105801792886E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2197682022925330E+0 + b = 0.9892878979686097E-1 + v = 0.5897558896594636E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.1564696098650355E+0 + b = 0.4598642910675510E-1 + v = 0.5095708849247346E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6027356673721295E+0 + b = 0.3376625140173426E+0 + v = 0.7536906428909755E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5496032320255096E+0 + b = 0.2822301309727988E+0 + v = 0.7472505965575118E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4921707755234567E+0 + b = 0.2248632342592540E+0 + v = 0.7343017132279698E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4309422998598483E+0 + b = 0.1666224723456479E+0 + v = 0.7130871582177445E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3664108182313672E+0 + b = 0.1086964901822169E+0 + v = 0.6817022032112776E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2990189057758436E+0 + b = 0.5251989784120085E-1 + v = 0.6380941145604121E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6268724013144998E+0 + b = 0.2297523657550023E+0 + v = 0.7550381377920310E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5707324144834607E+0 + b = 0.1723080607093800E+0 + v = 0.7478646640144802E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5096360901960365E+0 + b = 0.1140238465390513E+0 + v = 0.7335918720601220E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4438729938312456E+0 + b = 0.5611522095882537E-1 + v = 0.7110120527658118E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6419978471082389E+0 + b = 0.1164174423140873E+0 + v = 0.7571363978689501E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5817218061802611E+0 + b = 0.5797589531445219E-1 + v = 0.7489908329079234E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + + case 1730: + + v = 0.6309049437420976E-4 + leb_tmp, start = get_lebedev_recurrence_points(1, start, a, b, v, leb_tmp) + v = 0.6398287705571748E-3 + leb_tmp, start = get_lebedev_recurrence_points(2, start, a, b, v, leb_tmp) + v = 0.6357185073530720E-3 + leb_tmp, start = get_lebedev_recurrence_points(3, start, a, b, v, leb_tmp) + a = 0.2860923126194662E-1 + v = 0.2221207162188168E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.7142556767711522E-1 + v = 0.3475784022286848E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.1209199540995559E+0 + v = 0.4350742443589804E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.1738673106594379E+0 + v = 0.4978569136522127E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.2284645438467734E+0 + v = 0.5435036221998053E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.2834807671701512E+0 + v = 0.5765913388219542E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.3379680145467339E+0 + v = 0.6001200359226003E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.3911355454819537E+0 + v = 0.6162178172717512E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.4422860353001403E+0 + v = 0.6265218152438485E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.4907781568726057E+0 + v = 0.6323987160974212E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.5360006153211468E+0 + v = 0.6350767851540569E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6142105973596603E+0 + v = 0.6354362775297107E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6459300387977504E+0 + v = 0.6352302462706235E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6718056125089225E+0 + v = 0.6358117881417972E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6910888533186254E+0 + v = 0.6373101590310117E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.7030467416823252E+0 + v = 0.6390428961368665E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.8354951166354646E-1 + v = 0.3186913449946576E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.2050143009099486E+0 + v = 0.4678028558591711E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.3370208290706637E+0 + v = 0.5538829697598626E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.4689051484233963E+0 + v = 0.6044475907190476E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.5939400424557334E+0 + v = 0.6313575103509012E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.1394983311832261E+0 + b = 0.4097581162050343E-1 + v = 0.4078626431855630E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.1967999180485014E+0 + b = 0.8851987391293348E-1 + v = 0.4759933057812725E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2546183732548967E+0 + b = 0.1397680182969819E+0 + v = 0.5268151186413440E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3121281074713875E+0 + b = 0.1929452542226526E+0 + v = 0.5643048560507316E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3685981078502492E+0 + b = 0.2467898337061562E+0 + v = 0.5914501076613073E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4233760321547856E+0 + b = 0.3003104124785409E+0 + v = 0.6104561257874195E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4758671236059246E+0 + b = 0.3526684328175033E+0 + v = 0.6230252860707806E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5255178579796463E+0 + b = 0.4031134861145713E+0 + v = 0.6305618761760796E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5718025633734589E+0 + b = 0.4509426448342351E+0 + v = 0.6343092767597889E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2686927772723415E+0 + b = 0.4711322502423248E-1 + v = 0.5176268945737826E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3306006819904809E+0 + b = 0.9784487303942695E-1 + v = 0.5564840313313692E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3904906850594983E+0 + b = 0.1505395810025273E+0 + v = 0.5856426671038980E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4479957951904390E+0 + b = 0.2039728156296050E+0 + v = 0.6066386925777091E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5027076848919780E+0 + b = 0.2571529941121107E+0 + v = 0.6208824962234458E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5542087392260217E+0 + b = 0.3092191375815670E+0 + v = 0.6296314297822907E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6020850887375187E+0 + b = 0.3593807506130276E+0 + v = 0.6340423756791859E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4019851409179594E+0 + b = 0.5063389934378671E-1 + v = 0.5829627677107342E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4635614567449800E+0 + b = 0.1032422269160612E+0 + v = 0.6048693376081110E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5215860931591575E+0 + b = 0.1566322094006254E+0 + v = 0.6202362317732461E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5758202499099271E+0 + b = 0.2098082827491099E+0 + v = 0.6299005328403779E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6259893683876795E+0 + b = 0.2618824114553391E+0 + v = 0.6347722390609353E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5313795124811891E+0 + b = 0.5263245019338556E-1 + v = 0.6203778981238834E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5893317955931995E+0 + b = 0.1061059730982005E+0 + v = 0.6308414671239979E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6426246321215801E+0 + b = 0.1594171564034221E+0 + v = 0.6362706466959498E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6511904367376113E+0 + b = 0.5354789536565540E-1 + v = 0.6375414170333233E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + + case 2030: + + v = 0.4656031899197431E-4 + leb_tmp, start = get_lebedev_recurrence_points(1, start, a, b, v, leb_tmp) + v = 0.5421549195295507E-3 + leb_tmp, start = get_lebedev_recurrence_points(3, start, a, b, v, leb_tmp) + a = 0.2540835336814348E-1 + v = 0.1778522133346553E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6399322800504915E-1 + v = 0.2811325405682796E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.1088269469804125E+0 + v = 0.3548896312631459E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.1570670798818287E+0 + v = 0.4090310897173364E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.2071163932282514E+0 + v = 0.4493286134169965E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.2578914044450844E+0 + v = 0.4793728447962723E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.3085687558169623E+0 + v = 0.5015415319164265E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.3584719706267024E+0 + v = 0.5175127372677937E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.4070135594428709E+0 + v = 0.5285522262081019E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.4536618626222638E+0 + v = 0.5356832703713962E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.4979195686463577E+0 + v = 0.5397914736175170E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.5393075111126999E+0 + v = 0.5416899441599930E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6115617676843916E+0 + v = 0.5419308476889938E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6414308435160159E+0 + v = 0.5416936902030596E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6664099412721607E+0 + v = 0.5419544338703164E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6859161771214913E+0 + v = 0.5428983656630975E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6993625593503890E+0 + v = 0.5442286500098193E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.7062393387719380E+0 + v = 0.5452250345057301E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.7479028168349763E-1 + v = 0.2568002497728530E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.1848951153969366E+0 + v = 0.3827211700292145E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.3059529066581305E+0 + v = 0.4579491561917824E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.4285556101021362E+0 + v = 0.5042003969083574E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.5468758653496526E+0 + v = 0.5312708889976025E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.6565821978343439E+0 + v = 0.5438401790747117E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.1253901572367117E+0 + b = 0.3681917226439641E-1 + v = 0.3316041873197344E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.1775721510383941E+0 + b = 0.7982487607213301E-1 + v = 0.3899113567153771E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2305693358216114E+0 + b = 0.1264640966592335E+0 + v = 0.4343343327201309E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2836502845992063E+0 + b = 0.1751585683418957E+0 + v = 0.4679415262318919E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3361794746232590E+0 + b = 0.2247995907632670E+0 + v = 0.4930847981631031E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3875979172264824E+0 + b = 0.2745299257422246E+0 + v = 0.5115031867540091E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4374019316999074E+0 + b = 0.3236373482441118E+0 + v = 0.5245217148457367E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4851275843340022E+0 + b = 0.3714967859436741E+0 + v = 0.5332041499895321E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5303391803806868E+0 + b = 0.4175353646321745E+0 + v = 0.5384583126021542E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5726197380596287E+0 + b = 0.4612084406355461E+0 + v = 0.5411067210798852E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2431520732564863E+0 + b = 0.4258040133043952E-1 + v = 0.4259797391468714E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3002096800895869E+0 + b = 0.8869424306722721E-1 + v = 0.4604931368460021E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3558554457457432E+0 + b = 0.1368811706510655E+0 + v = 0.4871814878255202E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4097782537048887E+0 + b = 0.1860739985015033E+0 + v = 0.5072242910074885E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4616337666067458E+0 + b = 0.2354235077395853E+0 + v = 0.5217069845235350E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5110707008417874E+0 + b = 0.2842074921347011E+0 + v = 0.5315785966280310E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5577415286163795E+0 + b = 0.3317784414984102E+0 + v = 0.5376833708758905E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6013060431366950E+0 + b = 0.3775299002040700E+0 + v = 0.5408032092069521E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3661596767261781E+0 + b = 0.4599367887164592E-1 + v = 0.4842744917904866E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4237633153506581E+0 + b = 0.9404893773654421E-1 + v = 0.5048926076188130E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4786328454658452E+0 + b = 0.1431377109091971E+0 + v = 0.5202607980478373E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5305702076789774E+0 + b = 0.1924186388843570E+0 + v = 0.5309932388325743E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5793436224231788E+0 + b = 0.2411590944775190E+0 + v = 0.5377419770895208E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6247069017094747E+0 + b = 0.2886871491583605E+0 + v = 0.5411696331677717E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4874315552535204E+0 + b = 0.4804978774953206E-1 + v = 0.5197996293282420E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5427337322059053E+0 + b = 0.9716857199366665E-1 + v = 0.5311120836622945E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5943493747246700E+0 + b = 0.1465205839795055E+0 + v = 0.5384309319956951E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6421314033564943E+0 + b = 0.1953579449803574E+0 + v = 0.5421859504051886E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6020628374713980E+0 + b = 0.4916375015738108E-1 + v = 0.5390948355046314E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6529222529856881E+0 + b = 0.9861621540127005E-1 + v = 0.5433312705027845E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + + case 2354: + + v = 0.3922616270665292E-4 + leb_tmp, start = get_lebedev_recurrence_points(1, start, a, b, v, leb_tmp) + v = 0.4703831750854424E-3 + leb_tmp, start = get_lebedev_recurrence_points(2, start, a, b, v, leb_tmp) + v = 0.4678202801282136E-3 + leb_tmp, start = get_lebedev_recurrence_points(3, start, a, b, v, leb_tmp) + a = 0.2290024646530589E-1 + v = 0.1437832228979900E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.5779086652271284E-1 + v = 0.2303572493577644E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.9863103576375984E-1 + v = 0.2933110752447454E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.1428155792982185E+0 + v = 0.3402905998359838E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.1888978116601463E+0 + v = 0.3759138466870372E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.2359091682970210E+0 + v = 0.4030638447899798E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.2831228833706171E+0 + v = 0.4236591432242211E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.3299495857966693E+0 + v = 0.4390522656946746E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.3758840802660796E+0 + v = 0.4502523466626247E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.4204751831009480E+0 + v = 0.4580577727783541E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.4633068518751051E+0 + v = 0.4631391616615899E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.5039849474507313E+0 + v = 0.4660928953698676E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.5421265793440747E+0 + v = 0.4674751807936953E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6092660230557310E+0 + v = 0.4676414903932920E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6374654204984869E+0 + v = 0.4674086492347870E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6615136472609892E+0 + v = 0.4674928539483207E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6809487285958127E+0 + v = 0.4680748979686447E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6952980021665196E+0 + v = 0.4690449806389040E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.7041245497695400E+0 + v = 0.4699877075860818E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6744033088306065E-1 + v = 0.2099942281069176E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.1678684485334166E+0 + v = 0.3172269150712804E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.2793559049539613E+0 + v = 0.3832051358546523E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.3935264218057639E+0 + v = 0.4252193818146985E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.5052629268232558E+0 + v = 0.4513807963755000E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.6107905315437531E+0 + v = 0.4657797469114178E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.1135081039843524E+0 + b = 0.3331954884662588E-1 + v = 0.2733362800522836E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.1612866626099378E+0 + b = 0.7247167465436538E-1 + v = 0.3235485368463559E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2100786550168205E+0 + b = 0.1151539110849745E+0 + v = 0.3624908726013453E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2592282009459942E+0 + b = 0.1599491097143677E+0 + v = 0.3925540070712828E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3081740561320203E+0 + b = 0.2058699956028027E+0 + v = 0.4156129781116235E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3564289781578164E+0 + b = 0.2521624953502911E+0 + v = 0.4330644984623263E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4035587288240703E+0 + b = 0.2982090785797674E+0 + v = 0.4459677725921312E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4491671196373903E+0 + b = 0.3434762087235733E+0 + v = 0.4551593004456795E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4928854782917489E+0 + b = 0.3874831357203437E+0 + v = 0.4613341462749918E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5343646791958988E+0 + b = 0.4297814821746926E+0 + v = 0.4651019618269806E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5732683216530990E+0 + b = 0.4699402260943537E+0 + v = 0.4670249536100625E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2214131583218986E+0 + b = 0.3873602040643895E-1 + v = 0.3549555576441708E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2741796504750071E+0 + b = 0.8089496256902013E-1 + v = 0.3856108245249010E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3259797439149485E+0 + b = 0.1251732177620872E+0 + v = 0.4098622845756882E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3765441148826891E+0 + b = 0.1706260286403185E+0 + v = 0.4286328604268950E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4255773574530558E+0 + b = 0.2165115147300408E+0 + v = 0.4427802198993945E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4727795117058430E+0 + b = 0.2622089812225259E+0 + v = 0.4530473511488561E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5178546895819012E+0 + b = 0.3071721431296201E+0 + v = 0.4600805475703138E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5605141192097460E+0 + b = 0.3508998998801138E+0 + v = 0.4644599059958017E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6004763319352512E+0 + b = 0.3929160876166931E+0 + v = 0.4667274455712508E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3352842634946949E+0 + b = 0.4202563457288019E-1 + v = 0.4069360518020356E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3891971629814670E+0 + b = 0.8614309758870850E-1 + v = 0.4260442819919195E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4409875565542281E+0 + b = 0.1314500879380001E+0 + v = 0.4408678508029063E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4904893058592484E+0 + b = 0.1772189657383859E+0 + v = 0.4518748115548597E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5375056138769549E+0 + b = 0.2228277110050294E+0 + v = 0.4595564875375116E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5818255708669969E+0 + b = 0.2677179935014386E+0 + v = 0.4643988774315846E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6232334858144959E+0 + b = 0.3113675035544165E+0 + v = 0.4668827491646946E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4489485354492058E+0 + b = 0.4409162378368174E-1 + v = 0.4400541823741973E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5015136875933150E+0 + b = 0.8939009917748489E-1 + v = 0.4514512890193797E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5511300550512623E+0 + b = 0.1351806029383365E+0 + v = 0.4596198627347549E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5976720409858000E+0 + b = 0.1808370355053196E+0 + v = 0.4648659016801781E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6409956378989354E+0 + b = 0.2257852192301602E+0 + v = 0.4675502017157673E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5581222330827514E+0 + b = 0.4532173421637160E-1 + v = 0.4598494476455523E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6074705984161695E+0 + b = 0.9117488031840314E-1 + v = 0.4654916955152048E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6532272537379033E+0 + b = 0.1369294213140155E+0 + v = 0.4684709779505137E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6594761494500487E+0 + b = 0.4589901487275583E-1 + v = 0.4691445539106986E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + + case 2702: + + v = 0.2998675149888161E-4 + leb_tmp, start = get_lebedev_recurrence_points(1, start, a, b, v, leb_tmp) + v = 0.4077860529495355E-3 + leb_tmp, start = get_lebedev_recurrence_points(3, start, a, b, v, leb_tmp) + a = 0.2065562538818703E-1 + v = 0.1185349192520667E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.5250918173022379E-1 + v = 0.1913408643425751E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.8993480082038376E-1 + v = 0.2452886577209897E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.1306023924436019E+0 + v = 0.2862408183288702E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.1732060388531418E+0 + v = 0.3178032258257357E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.2168727084820249E+0 + v = 0.3422945667633690E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.2609528309173586E+0 + v = 0.3612790520235922E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.3049252927938952E+0 + v = 0.3758638229818521E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.3483484138084404E+0 + v = 0.3868711798859953E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.3908321549106406E+0 + v = 0.3949429933189938E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.4320210071894814E+0 + v = 0.4006068107541156E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.4715824795890053E+0 + v = 0.4043192149672723E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.5091984794078453E+0 + v = 0.4064947495808078E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.5445580145650803E+0 + v = 0.4075245619813152E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6072575796841768E+0 + v = 0.4076423540893566E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6339484505755803E+0 + v = 0.4074280862251555E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6570718257486958E+0 + v = 0.4074163756012244E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6762557330090709E+0 + v = 0.4077647795071246E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6911161696923790E+0 + v = 0.4084517552782530E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.7012841911659961E+0 + v = 0.4092468459224052E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.7064559272410020E+0 + v = 0.4097872687240906E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6123554989894765E-1 + v = 0.1738986811745028E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.1533070348312393E+0 + v = 0.2659616045280191E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.2563902605244206E+0 + v = 0.3240596008171533E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.3629346991663361E+0 + v = 0.3621195964432943E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.4683949968987538E+0 + v = 0.3868838330760539E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.5694479240657952E+0 + v = 0.4018911532693111E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.6634465430993955E+0 + v = 0.4089929432983252E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.1033958573552305E+0 + b = 0.3034544009063584E-1 + v = 0.2279907527706409E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.1473521412414395E+0 + b = 0.6618803044247135E-1 + v = 0.2715205490578897E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.1924552158705967E+0 + b = 0.1054431128987715E+0 + v = 0.3057917896703976E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2381094362890328E+0 + b = 0.1468263551238858E+0 + v = 0.3326913052452555E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2838121707936760E+0 + b = 0.1894486108187886E+0 + v = 0.3537334711890037E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3291323133373415E+0 + b = 0.2326374238761579E+0 + v = 0.3700567500783129E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3736896978741460E+0 + b = 0.2758485808485768E+0 + v = 0.3825245372589122E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4171406040760013E+0 + b = 0.3186179331996921E+0 + v = 0.3918125171518296E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4591677985256915E+0 + b = 0.3605329796303794E+0 + v = 0.3984720419937579E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4994733831718418E+0 + b = 0.4012147253586509E+0 + v = 0.4029746003338211E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5377731830445096E+0 + b = 0.4403050025570692E+0 + v = 0.4057428632156627E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5737917830001331E+0 + b = 0.4774565904277483E+0 + v = 0.4071719274114857E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2027323586271389E+0 + b = 0.3544122504976147E-1 + v = 0.2990236950664119E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2516942375187273E+0 + b = 0.7418304388646328E-1 + v = 0.3262951734212878E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3000227995257181E+0 + b = 0.1150502745727186E+0 + v = 0.3482634608242413E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3474806691046342E+0 + b = 0.1571963371209364E+0 + v = 0.3656596681700892E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3938103180359209E+0 + b = 0.1999631877247100E+0 + v = 0.3791740467794218E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4387519590455703E+0 + b = 0.2428073457846535E+0 + v = 0.3894034450156905E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4820503960077787E+0 + b = 0.2852575132906155E+0 + v = 0.3968600245508371E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5234573778475101E+0 + b = 0.3268884208674639E+0 + v = 0.4019931351420050E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5627318647235282E+0 + b = 0.3673033321675939E+0 + v = 0.4052108801278599E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5996390607156954E+0 + b = 0.4061211551830290E+0 + v = 0.4068978613940934E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3084780753791947E+0 + b = 0.3860125523100059E-1 + v = 0.3454275351319704E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3589988275920223E+0 + b = 0.7928938987104867E-1 + v = 0.3629963537007920E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4078628415881973E+0 + b = 0.1212614643030087E+0 + v = 0.3770187233889873E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4549287258889735E+0 + b = 0.1638770827382693E+0 + v = 0.3878608613694378E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5000278512957279E+0 + b = 0.2065965798260176E+0 + v = 0.3959065270221274E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5429785044928199E+0 + b = 0.2489436378852235E+0 + v = 0.4015286975463570E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5835939850491711E+0 + b = 0.2904811368946891E+0 + v = 0.4050866785614717E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6216870353444856E+0 + b = 0.3307941957666609E+0 + v = 0.4069320185051913E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4151104662709091E+0 + b = 0.4064829146052554E-1 + v = 0.3760120964062763E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4649804275009218E+0 + b = 0.8258424547294755E-1 + v = 0.3870969564418064E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5124695757009662E+0 + b = 0.1251841962027289E+0 + v = 0.3955287790534055E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5574711100606224E+0 + b = 0.1679107505976331E+0 + v = 0.4015361911302668E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5998597333287227E+0 + b = 0.2102805057358715E+0 + v = 0.4053836986719548E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6395007148516600E+0 + b = 0.2518418087774107E+0 + v = 0.4073578673299117E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5188456224746252E+0 + b = 0.4194321676077518E-1 + v = 0.3954628379231406E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5664190707942778E+0 + b = 0.8457661551921499E-1 + v = 0.4017645508847530E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6110464353283153E+0 + b = 0.1273652932519396E+0 + v = 0.4059030348651293E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6526430302051563E+0 + b = 0.1698173239076354E+0 + v = 0.4080565809484880E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6167551880377548E+0 + b = 0.4266398851548864E-1 + v = 0.4063018753664651E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6607195418355383E+0 + b = 0.8551925814238349E-1 + v = 0.4087191292799671E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + + case 3074: + + v = 0.2599095953754734E-4 + leb_tmp, start = get_lebedev_recurrence_points(1, start, a, b, v, leb_tmp) + v = 0.3603134089687541E-3 + leb_tmp, start = get_lebedev_recurrence_points(2, start, a, b, v, leb_tmp) + v = 0.3586067974412447E-3 + leb_tmp, start = get_lebedev_recurrence_points(3, start, a, b, v, leb_tmp) + a = 0.1886108518723392E-1 + v = 0.9831528474385880E-4 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.4800217244625303E-1 + v = 0.1605023107954450E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.8244922058397242E-1 + v = 0.2072200131464099E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.1200408362484023E+0 + v = 0.2431297618814187E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.1595773530809965E+0 + v = 0.2711819064496707E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.2002635973434064E+0 + v = 0.2932762038321116E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.2415127590139982E+0 + v = 0.3107032514197368E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.2828584158458477E+0 + v = 0.3243808058921213E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.3239091015338138E+0 + v = 0.3349899091374030E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.3643225097962194E+0 + v = 0.3430580688505218E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.4037897083691802E+0 + v = 0.3490124109290343E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.4420247515194127E+0 + v = 0.3532148948561955E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.4787572538464938E+0 + v = 0.3559862669062833E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.5137265251275234E+0 + v = 0.3576224317551411E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.5466764056654611E+0 + v = 0.3584050533086076E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6054859420813535E+0 + v = 0.3584903581373224E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6308106701764562E+0 + v = 0.3582991879040586E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6530369230179584E+0 + v = 0.3582371187963125E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6718609524611158E+0 + v = 0.3584353631122350E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6869676499894013E+0 + v = 0.3589120166517785E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6980467077240748E+0 + v = 0.3595445704531601E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.7048241721250522E+0 + v = 0.3600943557111074E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.5591105222058232E-1 + v = 0.1456447096742039E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.1407384078513916E+0 + v = 0.2252370188283782E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.2364035438976309E+0 + v = 0.2766135443474897E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.3360602737818170E+0 + v = 0.3110729491500851E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.4356292630054665E+0 + v = 0.3342506712303391E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.5321569415256174E+0 + v = 0.3491981834026860E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.6232956305040554E+0 + v = 0.3576003604348932E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.9469870086838469E-1 + b = 0.2778748387309470E-1 + v = 0.1921921305788564E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.1353170300568141E+0 + b = 0.6076569878628364E-1 + v = 0.2301458216495632E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.1771679481726077E+0 + b = 0.9703072762711040E-1 + v = 0.2604248549522893E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2197066664231751E+0 + b = 0.1354112458524762E+0 + v = 0.2845275425870697E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2624783557374927E+0 + b = 0.1750996479744100E+0 + v = 0.3036870897974840E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3050969521214442E+0 + b = 0.2154896907449802E+0 + v = 0.3188414832298066E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3472252637196021E+0 + b = 0.2560954625740152E+0 + v = 0.3307046414722089E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3885610219026360E+0 + b = 0.2965070050624096E+0 + v = 0.3398330969031360E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4288273776062765E+0 + b = 0.3363641488734497E+0 + v = 0.3466757899705373E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4677662471302948E+0 + b = 0.3753400029836788E+0 + v = 0.3516095923230054E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5051333589553359E+0 + b = 0.4131297522144286E+0 + v = 0.3549645184048486E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5406942145810492E+0 + b = 0.4494423776081795E+0 + v = 0.3570415969441392E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5742204122576457E+0 + b = 0.4839938958841502E+0 + v = 0.3581251798496118E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.1865407027225188E+0 + b = 0.3259144851070796E-1 + v = 0.2543491329913348E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2321186453689432E+0 + b = 0.6835679505297343E-1 + v = 0.2786711051330776E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2773159142523882E+0 + b = 0.1062284864451989E+0 + v = 0.2985552361083679E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3219200192237254E+0 + b = 0.1454404409323047E+0 + v = 0.3145867929154039E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3657032593944029E+0 + b = 0.1854018282582510E+0 + v = 0.3273290662067609E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4084376778363622E+0 + b = 0.2256297412014750E+0 + v = 0.3372705511943501E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4499004945751427E+0 + b = 0.2657104425000896E+0 + v = 0.3448274437851510E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4898758141326335E+0 + b = 0.3052755487631557E+0 + v = 0.3503592783048583E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5281547442266309E+0 + b = 0.3439863920645423E+0 + v = 0.3541854792663162E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5645346989813992E+0 + b = 0.3815229456121914E+0 + v = 0.3565995517909428E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5988181252159848E+0 + b = 0.4175752420966734E+0 + v = 0.3578802078302898E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2850425424471603E+0 + b = 0.3562149509862536E-1 + v = 0.2958644592860982E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3324619433027876E+0 + b = 0.7330318886871096E-1 + v = 0.3119548129116835E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3785848333076282E+0 + b = 0.1123226296008472E+0 + v = 0.3250745225005984E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4232891028562115E+0 + b = 0.1521084193337708E+0 + v = 0.3355153415935208E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4664287050829722E+0 + b = 0.1921844459223610E+0 + v = 0.3435847568549328E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5078458493735726E+0 + b = 0.2321360989678303E+0 + v = 0.3495786831622488E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5473779816204180E+0 + b = 0.2715886486360520E+0 + v = 0.3537767805534621E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5848617133811376E+0 + b = 0.3101924707571355E+0 + v = 0.3564459815421428E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6201348281584888E+0 + b = 0.3476121052890973E+0 + v = 0.3578464061225468E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3852191185387871E+0 + b = 0.3763224880035108E-1 + v = 0.3239748762836212E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4325025061073423E+0 + b = 0.7659581935637135E-1 + v = 0.3345491784174287E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4778486229734490E+0 + b = 0.1163381306083900E+0 + v = 0.3429126177301782E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5211663693009000E+0 + b = 0.1563890598752899E+0 + v = 0.3492420343097421E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5623469504853703E+0 + b = 0.1963320810149200E+0 + v = 0.3537399050235257E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6012718188659246E+0 + b = 0.2357847407258738E+0 + v = 0.3566209152659172E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6378179206390117E+0 + b = 0.2743846121244060E+0 + v = 0.3581084321919782E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4836936460214534E+0 + b = 0.3895902610739024E-1 + v = 0.3426522117591512E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5293792562683797E+0 + b = 0.7871246819312640E-1 + v = 0.3491848770121379E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5726281253100033E+0 + b = 0.1187963808202981E+0 + v = 0.3539318235231476E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6133658776169068E+0 + b = 0.1587914708061787E+0 + v = 0.3570231438458694E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6515085491865307E+0 + b = 0.1983058575227646E+0 + v = 0.3586207335051714E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5778692716064976E+0 + b = 0.3977209689791542E-1 + v = 0.3541196205164025E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6207904288086192E+0 + b = 0.7990157592981152E-1 + v = 0.3574296911573953E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6608688171046802E+0 + b = 0.1199671308754309E+0 + v = 0.3591993279818963E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6656263089489130E+0 + b = 0.4015955957805969E-1 + v = 0.3595855034661997E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + + case 3470: + + v = 0.2040382730826330E-4 + leb_tmp, start = get_lebedev_recurrence_points(1, start, a, b, v, leb_tmp) + v = 0.3178149703889544E-3 + leb_tmp, start = get_lebedev_recurrence_points(3, start, a, b, v, leb_tmp) + a = 0.1721420832906233E-1 + v = 0.8288115128076110E-4 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.4408875374981770E-1 + v = 0.1360883192522954E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.7594680813878681E-1 + v = 0.1766854454542662E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.1108335359204799E+0 + v = 0.2083153161230153E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.1476517054388567E+0 + v = 0.2333279544657158E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.1856731870860615E+0 + v = 0.2532809539930247E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.2243634099428821E+0 + v = 0.2692472184211158E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.2633006881662727E+0 + v = 0.2819949946811885E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.3021340904916283E+0 + v = 0.2920953593973030E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.3405594048030089E+0 + v = 0.2999889782948352E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.3783044434007372E+0 + v = 0.3060292120496902E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.4151194767407910E+0 + v = 0.3105109167522192E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.4507705766443257E+0 + v = 0.3136902387550312E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.4850346056573187E+0 + v = 0.3157984652454632E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.5176950817792470E+0 + v = 0.3170516518425422E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.5485384240820989E+0 + v = 0.3176568425633755E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6039117238943308E+0 + v = 0.3177198411207062E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6279956655573113E+0 + v = 0.3175519492394733E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6493636169568952E+0 + v = 0.3174654952634756E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6677644117704504E+0 + v = 0.3175676415467654E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6829368572115624E+0 + v = 0.3178923417835410E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6946195818184121E+0 + v = 0.3183788287531909E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.7025711542057026E+0 + v = 0.3188755151918807E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.7066004767140119E+0 + v = 0.3191916889313849E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.5132537689946062E-1 + v = 0.1231779611744508E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.1297994661331225E+0 + v = 0.1924661373839880E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.2188852049401307E+0 + v = 0.2380881867403424E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.3123174824903457E+0 + v = 0.2693100663037885E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.4064037620738195E+0 + v = 0.2908673382834366E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.4984958396944782E+0 + v = 0.3053914619381535E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.5864975046021365E+0 + v = 0.3143916684147777E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.6686711634580175E+0 + v = 0.3187042244055363E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.8715738780835950E-1 + b = 0.2557175233367578E-1 + v = 0.1635219535869790E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.1248383123134007E+0 + b = 0.5604823383376681E-1 + v = 0.1968109917696070E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.1638062693383378E+0 + b = 0.8968568601900765E-1 + v = 0.2236754342249974E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2035586203373176E+0 + b = 0.1254086651976279E+0 + v = 0.2453186687017181E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2436798975293774E+0 + b = 0.1624780150162012E+0 + v = 0.2627551791580541E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2838207507773806E+0 + b = 0.2003422342683208E+0 + v = 0.2767654860152220E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3236787502217692E+0 + b = 0.2385628026255263E+0 + v = 0.2879467027765895E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3629849554840691E+0 + b = 0.2767731148783578E+0 + v = 0.2967639918918702E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4014948081992087E+0 + b = 0.3146542308245309E+0 + v = 0.3035900684660351E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4389818379260225E+0 + b = 0.3519196415895088E+0 + v = 0.3087338237298308E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4752331143674377E+0 + b = 0.3883050984023654E+0 + v = 0.3124608838860167E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5100457318374018E+0 + b = 0.4235613423908649E+0 + v = 0.3150084294226743E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5432238388954868E+0 + b = 0.4574484717196220E+0 + v = 0.3165958398598402E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5745758685072442E+0 + b = 0.4897311639255524E+0 + v = 0.3174320440957372E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.1723981437592809E+0 + b = 0.3010630597881105E-1 + v = 0.2182188909812599E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2149553257844597E+0 + b = 0.6326031554204694E-1 + v = 0.2399727933921445E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2573256081247422E+0 + b = 0.9848566980258631E-1 + v = 0.2579796133514652E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2993163751238106E+0 + b = 0.1350835952384266E+0 + v = 0.2727114052623535E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3407238005148000E+0 + b = 0.1725184055442181E+0 + v = 0.2846327656281355E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3813454978483264E+0 + b = 0.2103559279730725E+0 + v = 0.2941491102051334E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4209848104423343E+0 + b = 0.2482278774554860E+0 + v = 0.3016049492136107E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4594519699996300E+0 + b = 0.2858099509982883E+0 + v = 0.3072949726175648E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4965640166185930E+0 + b = 0.3228075659915428E+0 + v = 0.3114768142886460E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5321441655571562E+0 + b = 0.3589459907204151E+0 + v = 0.3143823673666223E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5660208438582166E+0 + b = 0.3939630088864310E+0 + v = 0.3162269764661535E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5980264315964364E+0 + b = 0.4276029922949089E+0 + v = 0.3172164663759821E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2644215852350733E+0 + b = 0.3300939429072552E-1 + v = 0.2554575398967435E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3090113743443063E+0 + b = 0.6803887650078501E-1 + v = 0.2701704069135677E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3525871079197808E+0 + b = 0.1044326136206709E+0 + v = 0.2823693413468940E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3950418005354029E+0 + b = 0.1416751597517679E+0 + v = 0.2922898463214289E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4362475663430163E+0 + b = 0.1793408610504821E+0 + v = 0.3001829062162428E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4760661812145854E+0 + b = 0.2170630750175722E+0 + v = 0.3062890864542953E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5143551042512103E+0 + b = 0.2545145157815807E+0 + v = 0.3108328279264746E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5509709026935597E+0 + b = 0.2913940101706601E+0 + v = 0.3140243146201245E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5857711030329428E+0 + b = 0.3274169910910705E+0 + v = 0.3160638030977130E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6186149917404392E+0 + b = 0.3623081329317265E+0 + v = 0.3171462882206275E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3586894569557064E+0 + b = 0.3497354386450040E-1 + v = 0.2812388416031796E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4035266610019441E+0 + b = 0.7129736739757095E-1 + v = 0.2912137500288045E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4467775312332510E+0 + b = 0.1084758620193165E+0 + v = 0.2993241256502206E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4883638346608543E+0 + b = 0.1460915689241772E+0 + v = 0.3057101738983822E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5281908348434601E+0 + b = 0.1837790832369980E+0 + v = 0.3105319326251432E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5661542687149311E+0 + b = 0.2212075390874021E+0 + v = 0.3139565514428167E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6021450102031452E+0 + b = 0.2580682841160985E+0 + v = 0.3161543006806366E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6360520783610050E+0 + b = 0.2940656362094121E+0 + v = 0.3172985960613294E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4521611065087196E+0 + b = 0.3631055365867002E-1 + v = 0.2989400336901431E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4959365651560963E+0 + b = 0.7348318468484350E-1 + v = 0.3054555883947677E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5376815804038283E+0 + b = 0.1111087643812648E+0 + v = 0.3104764960807702E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5773314480243768E+0 + b = 0.1488226085145408E+0 + v = 0.3141015825977616E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6148113245575056E+0 + b = 0.1862892274135151E+0 + v = 0.3164520621159896E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6500407462842380E+0 + b = 0.2231909701714456E+0 + v = 0.3176652305912204E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5425151448707213E+0 + b = 0.3718201306118944E-1 + v = 0.3105097161023939E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5841860556907931E+0 + b = 0.7483616335067346E-1 + v = 0.3143014117890550E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6234632186851500E+0 + b = 0.1125990834266120E+0 + v = 0.3168172866287200E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6602934551848843E+0 + b = 0.1501303813157619E+0 + v = 0.3181401865570968E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6278573968375105E+0 + b = 0.3767559930245720E-1 + v = 0.3170663659156037E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6665611711264577E+0 + b = 0.7548443301360158E-1 + v = 0.3185447944625510E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + + case 3890: + + v = 0.1807395252196920E-4 + leb_tmp, start = get_lebedev_recurrence_points(1, start, a, b, v, leb_tmp) + v = 0.2848008782238827E-3 + leb_tmp, start = get_lebedev_recurrence_points(2, start, a, b, v, leb_tmp) + v = 0.2836065837530581E-3 + leb_tmp, start = get_lebedev_recurrence_points(3, start, a, b, v, leb_tmp) + a = 0.1587876419858352E-1 + v = 0.7013149266673816E-4 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.4069193593751206E-1 + v = 0.1162798021956766E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.7025888115257997E-1 + v = 0.1518728583972105E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.1027495450028704E+0 + v = 0.1798796108216934E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.1371457730893426E+0 + v = 0.2022593385972785E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.1727758532671953E+0 + v = 0.2203093105575464E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.2091492038929037E+0 + v = 0.2349294234299855E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.2458813281751915E+0 + v = 0.2467682058747003E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.2826545859450066E+0 + v = 0.2563092683572224E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.3191957291799622E+0 + v = 0.2639253896763318E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.3552621469299578E+0 + v = 0.2699137479265108E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.3906329503406230E+0 + v = 0.2745196420166739E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.4251028614093031E+0 + v = 0.2779529197397593E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.4584777520111870E+0 + v = 0.2803996086684265E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.4905711358710193E+0 + v = 0.2820302356715842E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.5212011669847385E+0 + v = 0.2830056747491068E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.5501878488737995E+0 + v = 0.2834808950776839E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6025037877479342E+0 + v = 0.2835282339078929E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6254572689549016E+0 + v = 0.2833819267065800E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6460107179528248E+0 + v = 0.2832858336906784E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6639541138154251E+0 + v = 0.2833268235451244E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6790688515667495E+0 + v = 0.2835432677029253E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6911338580371512E+0 + v = 0.2839091722743049E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6999385956126490E+0 + v = 0.2843308178875841E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.7053037748656896E+0 + v = 0.2846703550533846E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.4732224387180115E-1 + v = 0.1051193406971900E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.1202100529326803E+0 + v = 0.1657871838796974E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.2034304820664855E+0 + v = 0.2064648113714232E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.2912285643573002E+0 + v = 0.2347942745819741E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.3802361792726768E+0 + v = 0.2547775326597726E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.4680598511056146E+0 + v = 0.2686876684847025E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.5528151052155599E+0 + v = 0.2778665755515867E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.6329386307803041E+0 + v = 0.2830996616782929E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.8056516651369069E-1 + b = 0.2363454684003124E-1 + v = 0.1403063340168372E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.1156476077139389E+0 + b = 0.5191291632545936E-1 + v = 0.1696504125939477E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.1520473382760421E+0 + b = 0.8322715736994519E-1 + v = 0.1935787242745390E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.1892986699745931E+0 + b = 0.1165855667993712E+0 + v = 0.2130614510521968E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2270194446777792E+0 + b = 0.1513077167409504E+0 + v = 0.2289381265931048E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2648908185093273E+0 + b = 0.1868882025807859E+0 + v = 0.2418630292816186E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3026389259574136E+0 + b = 0.2229277629776224E+0 + v = 0.2523400495631193E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3400220296151384E+0 + b = 0.2590951840746235E+0 + v = 0.2607623973449605E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3768217953335510E+0 + b = 0.2951047291750847E+0 + v = 0.2674441032689209E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4128372900921884E+0 + b = 0.3307019714169930E+0 + v = 0.2726432360343356E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4478807131815630E+0 + b = 0.3656544101087634E+0 + v = 0.2765787685924545E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4817742034089257E+0 + b = 0.3997448951939695E+0 + v = 0.2794428690642224E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5143472814653344E+0 + b = 0.4327667110812024E+0 + v = 0.2814099002062895E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5454346213905650E+0 + b = 0.4645196123532293E+0 + v = 0.2826429531578994E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5748739313170252E+0 + b = 0.4948063555703345E+0 + v = 0.2832983542550884E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.1599598738286342E+0 + b = 0.2792357590048985E-1 + v = 0.1886695565284976E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.1998097412500951E+0 + b = 0.5877141038139065E-1 + v = 0.2081867882748234E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2396228952566202E+0 + b = 0.9164573914691377E-1 + v = 0.2245148680600796E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2792228341097746E+0 + b = 0.1259049641962687E+0 + v = 0.2380370491511872E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3184251107546741E+0 + b = 0.1610594823400863E+0 + v = 0.2491398041852455E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3570481164426244E+0 + b = 0.1967151653460898E+0 + v = 0.2581632405881230E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3949164710492144E+0 + b = 0.2325404606175168E+0 + v = 0.2653965506227417E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4318617293970503E+0 + b = 0.2682461141151439E+0 + v = 0.2710857216747087E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4677221009931678E+0 + b = 0.3035720116011973E+0 + v = 0.2754434093903659E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5023417939270955E+0 + b = 0.3382781859197439E+0 + v = 0.2786579932519380E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5355701836636128E+0 + b = 0.3721383065625942E+0 + v = 0.2809011080679474E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5672608451328771E+0 + b = 0.4049346360466055E+0 + v = 0.2823336184560987E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5972704202540162E+0 + b = 0.4364538098633802E+0 + v = 0.2831101175806309E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2461687022333596E+0 + b = 0.3070423166833368E-1 + v = 0.2221679970354546E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2881774566286831E+0 + b = 0.6338034669281885E-1 + v = 0.2356185734270703E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3293963604116978E+0 + b = 0.9742862487067941E-1 + v = 0.2469228344805590E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3697303822241377E+0 + b = 0.1323799532282290E+0 + v = 0.2562726348642046E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4090663023135127E+0 + b = 0.1678497018129336E+0 + v = 0.2638756726753028E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4472819355411712E+0 + b = 0.2035095105326114E+0 + v = 0.2699311157390862E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4842513377231437E+0 + b = 0.2390692566672091E+0 + v = 0.2746233268403837E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5198477629962928E+0 + b = 0.2742649818076149E+0 + v = 0.2781225674454771E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5539453011883145E+0 + b = 0.3088503806580094E+0 + v = 0.2805881254045684E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5864196762401251E+0 + b = 0.3425904245906614E+0 + v = 0.2821719877004913E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6171484466668390E+0 + b = 0.3752562294789468E+0 + v = 0.2830222502333124E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3350337830565727E+0 + b = 0.3261589934634747E-1 + v = 0.2457995956744870E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3775773224758284E+0 + b = 0.6658438928081572E-1 + v = 0.2551474407503706E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4188155229848973E+0 + b = 0.1014565797157954E+0 + v = 0.2629065335195311E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4586805892009344E+0 + b = 0.1368573320843822E+0 + v = 0.2691900449925075E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4970895714224235E+0 + b = 0.1724614851951608E+0 + v = 0.2741275485754276E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5339505133960747E+0 + b = 0.2079779381416412E+0 + v = 0.2778530970122595E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5691665792531440E+0 + b = 0.2431385788322288E+0 + v = 0.2805010567646741E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6026387682680377E+0 + b = 0.2776901883049853E+0 + v = 0.2822055834031040E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6342676150163307E+0 + b = 0.3113881356386632E+0 + v = 0.2831016901243473E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4237951119537067E+0 + b = 0.3394877848664351E-1 + v = 0.2624474901131803E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4656918683234929E+0 + b = 0.6880219556291447E-1 + v = 0.2688034163039377E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5058857069185980E+0 + b = 0.1041946859721635E+0 + v = 0.2738932751287636E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5443204666713996E+0 + b = 0.1398039738736393E+0 + v = 0.2777944791242523E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5809298813759742E+0 + b = 0.1753373381196155E+0 + v = 0.2806011661660987E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6156416039447128E+0 + b = 0.2105215793514010E+0 + v = 0.2824181456597460E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6483801351066604E+0 + b = 0.2450953312157051E+0 + v = 0.2833585216577828E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5103616577251688E+0 + b = 0.3485560643800719E-1 + v = 0.2738165236962878E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5506738792580681E+0 + b = 0.7026308631512033E-1 + v = 0.2778365208203180E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5889573040995292E+0 + b = 0.1059035061296403E+0 + v = 0.2807852940418966E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6251641589516930E+0 + b = 0.1414823925236026E+0 + v = 0.2827245949674705E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6592414921570178E+0 + b = 0.1767207908214530E+0 + v = 0.2837342344829828E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5930314017533384E+0 + b = 0.3542189339561672E-1 + v = 0.2809233907610981E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6309812253390175E+0 + b = 0.7109574040369549E-1 + v = 0.2829930809742694E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6666296011353230E+0 + b = 0.1067259792282730E+0 + v = 0.2841097874111479E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6703715271049922E+0 + b = 0.3569455268820809E-1 + v = 0.2843455206008783E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + + case 4334: + + v = 0.1449063022537883E-4 + leb_tmp, start = get_lebedev_recurrence_points(1, start, a, b, v, leb_tmp) + v = 0.2546377329828424E-3 + leb_tmp, start = get_lebedev_recurrence_points(3, start, a, b, v, leb_tmp) + a = 0.1462896151831013E-1 + v = 0.6018432961087496E-4 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.3769840812493139E-1 + v = 0.1002286583263673E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6524701904096891E-1 + v = 0.1315222931028093E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.9560543416134648E-1 + v = 0.1564213746876724E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.1278335898929198E+0 + v = 0.1765118841507736E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.1613096104466031E+0 + v = 0.1928737099311080E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.1955806225745371E+0 + v = 0.2062658534263270E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.2302935218498028E+0 + v = 0.2172395445953787E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.2651584344113027E+0 + v = 0.2262076188876047E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.2999276825183209E+0 + v = 0.2334885699462397E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.3343828669718798E+0 + v = 0.2393355273179203E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.3683265013750518E+0 + v = 0.2439559200468863E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.4015763206518108E+0 + v = 0.2475251866060002E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.4339612026399770E+0 + v = 0.2501965558158773E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.4653180651114582E+0 + v = 0.2521081407925925E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.4954893331080803E+0 + v = 0.2533881002388081E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.5243207068924930E+0 + v = 0.2541582900848261E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.5516590479041704E+0 + v = 0.2545365737525860E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6012371927804176E+0 + v = 0.2545726993066799E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6231574466449819E+0 + v = 0.2544456197465555E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6429416514181271E+0 + v = 0.2543481596881064E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6604124272943595E+0 + v = 0.2543506451429194E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6753851470408250E+0 + v = 0.2544905675493763E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6876717970626160E+0 + v = 0.2547611407344429E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6970895061319234E+0 + v = 0.2551060375448869E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.7034746912553310E+0 + v = 0.2554291933816039E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.7067017217542295E+0 + v = 0.2556255710686343E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.4382223501131123E-1 + v = 0.9041339695118195E-4 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.1117474077400006E+0 + v = 0.1438426330079022E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.1897153252911440E+0 + v = 0.1802523089820518E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.2724023009910331E+0 + v = 0.2060052290565496E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.3567163308709902E+0 + v = 0.2245002248967466E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.4404784483028087E+0 + v = 0.2377059847731150E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.5219833154161411E+0 + v = 0.2468118955882525E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.5998179868977553E+0 + v = 0.2525410872966528E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.6727803154548222E+0 + v = 0.2553101409933397E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.7476563943166086E-1 + b = 0.2193168509461185E-1 + v = 0.1212879733668632E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.1075341482001416E+0 + b = 0.4826419281533887E-1 + v = 0.1472872881270931E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.1416344885203259E+0 + b = 0.7751191883575742E-1 + v = 0.1686846601010828E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.1766325315388586E+0 + b = 0.1087558139247680E+0 + v = 0.1862698414660208E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2121744174481514E+0 + b = 0.1413661374253096E+0 + v = 0.2007430956991861E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2479669443408145E+0 + b = 0.1748768214258880E+0 + v = 0.2126568125394796E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2837600452294113E+0 + b = 0.2089216406612073E+0 + v = 0.2224394603372113E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3193344933193984E+0 + b = 0.2431987685545972E+0 + v = 0.2304264522673135E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3544935442438745E+0 + b = 0.2774497054377770E+0 + v = 0.2368854288424087E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3890571932288154E+0 + b = 0.3114460356156915E+0 + v = 0.2420352089461772E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4228581214259090E+0 + b = 0.3449806851913012E+0 + v = 0.2460597113081295E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4557387211304052E+0 + b = 0.3778618641248256E+0 + v = 0.2491181912257687E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4875487950541643E+0 + b = 0.4099086391698978E+0 + v = 0.2513528194205857E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5181436529962997E+0 + b = 0.4409474925853973E+0 + v = 0.2528943096693220E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5473824095600661E+0 + b = 0.4708094517711291E+0 + v = 0.2538660368488136E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5751263398976174E+0 + b = 0.4993275140354637E+0 + v = 0.2543868648299022E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.1489515746840028E+0 + b = 0.2599381993267017E-1 + v = 0.1642595537825183E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.1863656444351767E+0 + b = 0.5479286532462190E-1 + v = 0.1818246659849308E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2238602880356348E+0 + b = 0.8556763251425254E-1 + v = 0.1966565649492420E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2612723375728160E+0 + b = 0.1177257802267011E+0 + v = 0.2090677905657991E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2984332990206190E+0 + b = 0.1508168456192700E+0 + v = 0.2193820409510504E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3351786584663333E+0 + b = 0.1844801892177727E+0 + v = 0.2278870827661928E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3713505522209120E+0 + b = 0.2184145236087598E+0 + v = 0.2348283192282090E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4067981098954663E+0 + b = 0.2523590641486229E+0 + v = 0.2404139755581477E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4413769993687534E+0 + b = 0.2860812976901373E+0 + v = 0.2448227407760734E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4749487182516394E+0 + b = 0.3193686757808996E+0 + v = 0.2482110455592573E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5073798105075426E+0 + b = 0.3520226949547602E+0 + v = 0.2507192397774103E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5385410448878654E+0 + b = 0.3838544395667890E+0 + v = 0.2524765968534880E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5683065353670530E+0 + b = 0.4146810037640963E+0 + v = 0.2536052388539425E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5965527620663510E+0 + b = 0.4443224094681121E+0 + v = 0.2542230588033068E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2299227700856157E+0 + b = 0.2865757664057584E-1 + v = 0.1944817013047896E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2695752998553267E+0 + b = 0.5923421684485993E-1 + v = 0.2067862362746635E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3086178716611389E+0 + b = 0.9117817776057715E-1 + v = 0.2172440734649114E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3469649871659077E+0 + b = 0.1240593814082605E+0 + v = 0.2260125991723423E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3845153566319655E+0 + b = 0.1575272058259175E+0 + v = 0.2332655008689523E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4211600033403215E+0 + b = 0.1912845163525413E+0 + v = 0.2391699681532458E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4567867834329882E+0 + b = 0.2250710177858171E+0 + v = 0.2438801528273928E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4912829319232061E+0 + b = 0.2586521303440910E+0 + v = 0.2475370504260665E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5245364793303812E+0 + b = 0.2918112242865407E+0 + v = 0.2502707235640574E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5564369788915756E+0 + b = 0.3243439239067890E+0 + v = 0.2522031701054241E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5868757697775287E+0 + b = 0.3560536787835351E+0 + v = 0.2534511269978784E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6157458853519617E+0 + b = 0.3867480821242581E+0 + v = 0.2541284914955151E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3138461110672113E+0 + b = 0.3051374637507278E-1 + v = 0.2161509250688394E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3542495872050569E+0 + b = 0.6237111233730755E-1 + v = 0.2248778513437852E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3935751553120181E+0 + b = 0.9516223952401907E-1 + v = 0.2322388803404617E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4317634668111147E+0 + b = 0.1285467341508517E+0 + v = 0.2383265471001355E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4687413842250821E+0 + b = 0.1622318931656033E+0 + v = 0.2432476675019525E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5044274237060283E+0 + b = 0.1959581153836453E+0 + v = 0.2471122223750674E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5387354077925727E+0 + b = 0.2294888081183837E+0 + v = 0.2500291752486870E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5715768898356105E+0 + b = 0.2626031152713945E+0 + v = 0.2521055942764682E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6028627200136111E+0 + b = 0.2950904075286713E+0 + v = 0.2534472785575503E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6325039812653463E+0 + b = 0.3267458451113286E+0 + v = 0.2541599713080121E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3981986708423407E+0 + b = 0.3183291458749821E-1 + v = 0.2317380975862936E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4382791182133300E+0 + b = 0.6459548193880908E-1 + v = 0.2378550733719775E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4769233057218166E+0 + b = 0.9795757037087952E-1 + v = 0.2428884456739118E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5140823911194238E+0 + b = 0.1316307235126655E+0 + v = 0.2469002655757292E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5496977833862983E+0 + b = 0.1653556486358704E+0 + v = 0.2499657574265851E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5837047306512727E+0 + b = 0.1988931724126510E+0 + v = 0.2521676168486082E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6160349566926879E+0 + b = 0.2320174581438950E+0 + v = 0.2535935662645334E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6466185353209440E+0 + b = 0.2645106562168662E+0 + v = 0.2543356743363214E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4810835158795404E+0 + b = 0.3275917807743992E-1 + v = 0.2427353285201535E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5199925041324341E+0 + b = 0.6612546183967181E-1 + v = 0.2468258039744386E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5571717692207494E+0 + b = 0.9981498331474143E-1 + v = 0.2500060956440310E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5925789250836378E+0 + b = 0.1335687001410374E+0 + v = 0.2523238365420979E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6261658523859670E+0 + b = 0.1671444402896463E+0 + v = 0.2538399260252846E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6578811126669331E+0 + b = 0.2003106382156076E+0 + v = 0.2546255927268069E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5609624612998100E+0 + b = 0.3337500940231335E-1 + v = 0.2500583360048449E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5979959659984670E+0 + b = 0.6708750335901803E-1 + v = 0.2524777638260203E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6330523711054002E+0 + b = 0.1008792126424850E+0 + v = 0.2540951193860656E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6660960998103972E+0 + b = 0.1345050343171794E+0 + v = 0.2549524085027472E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6365384364585819E+0 + b = 0.3372799460737052E-1 + v = 0.2542569507009158E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6710994302899275E+0 + b = 0.6755249309678028E-1 + v = 0.2552114127580376E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + + case 4802: + + v = 0.9687521879420705E-4 + leb_tmp, start = get_lebedev_recurrence_points(1, start, a, b, v, leb_tmp) + v = 0.2307897895367918E-3 + leb_tmp, start = get_lebedev_recurrence_points(2, start, a, b, v, leb_tmp) + v = 0.2297310852498558E-3 + leb_tmp, start = get_lebedev_recurrence_points(3, start, a, b, v, leb_tmp) + a = 0.2335728608887064E-1 + v = 0.7386265944001919E-4 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.4352987836550653E-1 + v = 0.8257977698542210E-4 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6439200521088801E-1 + v = 0.9706044762057630E-4 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.9003943631993181E-1 + v = 0.1302393847117003E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.1196706615548473E+0 + v = 0.1541957004600968E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.1511715412838134E+0 + v = 0.1704459770092199E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.1835982828503801E+0 + v = 0.1827374890942906E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.2165081259155405E+0 + v = 0.1926360817436107E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.2496208720417563E+0 + v = 0.2008010239494833E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.2827200673567900E+0 + v = 0.2075635983209175E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.3156190823994346E+0 + v = 0.2131306638690909E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.3481476793749115E+0 + v = 0.2176562329937335E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.3801466086947226E+0 + v = 0.2212682262991018E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.4114652119634011E+0 + v = 0.2240799515668565E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.4419598786519751E+0 + v = 0.2261959816187525E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.4714925949329543E+0 + v = 0.2277156368808855E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.4999293972879466E+0 + v = 0.2287351772128336E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.5271387221431248E+0 + v = 0.2293490814084085E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.5529896780837761E+0 + v = 0.2296505312376273E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6000856099481712E+0 + v = 0.2296793832318756E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6210562192785175E+0 + v = 0.2295785443842974E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6401165879934240E+0 + v = 0.2295017931529102E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6571144029244334E+0 + v = 0.2295059638184868E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6718910821718863E+0 + v = 0.2296232343237362E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6842845591099010E+0 + v = 0.2298530178740771E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6941353476269816E+0 + v = 0.2301579790280501E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.7012965242212991E+0 + v = 0.2304690404996513E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.7056471428242644E+0 + v = 0.2307027995907102E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.4595557643585895E-1 + v = 0.9312274696671092E-4 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.1049316742435023E+0 + v = 0.1199919385876926E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.1773548879549274E+0 + v = 0.1598039138877690E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.2559071411236127E+0 + v = 0.1822253763574900E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.3358156837985898E+0 + v = 0.1988579593655040E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.4155835743763893E+0 + v = 0.2112620102533307E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.4937894296167472E+0 + v = 0.2201594887699007E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.5691569694793316E+0 + v = 0.2261622590895036E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.6405840854894251E+0 + v = 0.2296458453435705E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.7345133894143348E-1 + b = 0.2177844081486067E-1 + v = 0.1006006990267000E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.1009859834044931E+0 + b = 0.4590362185775188E-1 + v = 0.1227676689635876E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.1324289619748758E+0 + b = 0.7255063095690877E-1 + v = 0.1467864280270117E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.1654272109607127E+0 + b = 0.1017825451960684E+0 + v = 0.1644178912101232E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.1990767186776461E+0 + b = 0.1325652320980364E+0 + v = 0.1777664890718961E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2330125945523278E+0 + b = 0.1642765374496765E+0 + v = 0.1884825664516690E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2670080611108287E+0 + b = 0.1965360374337889E+0 + v = 0.1973269246453848E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3008753376294316E+0 + b = 0.2290726770542238E+0 + v = 0.2046767775855328E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3344475596167860E+0 + b = 0.2616645495370823E+0 + v = 0.2107600125918040E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3675709724070786E+0 + b = 0.2941150728843141E+0 + v = 0.2157416362266829E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4001000887587812E+0 + b = 0.3262440400919066E+0 + v = 0.2197557816920721E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4318956350436028E+0 + b = 0.3578835350611916E+0 + v = 0.2229192611835437E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4628239056795531E+0 + b = 0.3888751854043678E+0 + v = 0.2253385110212775E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4927563229773636E+0 + b = 0.4190678003222840E+0 + v = 0.2271137107548774E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5215687136707969E+0 + b = 0.4483151836883852E+0 + v = 0.2283414092917525E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5491402346984905E+0 + b = 0.4764740676087880E+0 + v = 0.2291161673130077E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5753520160126075E+0 + b = 0.5034021310998277E+0 + v = 0.2295313908576598E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.1388326356417754E+0 + b = 0.2435436510372806E-1 + v = 0.1438204721359031E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.1743686900537244E+0 + b = 0.5118897057342652E-1 + v = 0.1607738025495257E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2099737037950268E+0 + b = 0.8014695048539634E-1 + v = 0.1741483853528379E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2454492590908548E+0 + b = 0.1105117874155699E+0 + v = 0.1851918467519151E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2807219257864278E+0 + b = 0.1417950531570966E+0 + v = 0.1944628638070613E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3156842271975842E+0 + b = 0.1736604945719597E+0 + v = 0.2022495446275152E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3502090945177752E+0 + b = 0.2058466324693981E+0 + v = 0.2087462382438514E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3841684849519686E+0 + b = 0.2381284261195919E+0 + v = 0.2141074754818308E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4174372367906016E+0 + b = 0.2703031270422569E+0 + v = 0.2184640913748162E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4498926465011892E+0 + b = 0.3021845683091309E+0 + v = 0.2219309165220329E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4814146229807701E+0 + b = 0.3335993355165720E+0 + v = 0.2246123118340624E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5118863625734701E+0 + b = 0.3643833735518232E+0 + v = 0.2266062766915125E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5411947455119144E+0 + b = 0.3943789541958179E+0 + v = 0.2280072952230796E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5692301500357246E+0 + b = 0.4234320144403542E+0 + v = 0.2289082025202583E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5958857204139576E+0 + b = 0.4513897947419260E+0 + v = 0.2294012695120025E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2156270284785766E+0 + b = 0.2681225755444491E-1 + v = 0.1722434488736947E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2532385054909710E+0 + b = 0.5557495747805614E-1 + v = 0.1830237421455091E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2902564617771537E+0 + b = 0.8569368062950249E-1 + v = 0.1923855349997633E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3266979823143256E+0 + b = 0.1167367450324135E+0 + v = 0.2004067861936271E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3625039627493614E+0 + b = 0.1483861994003304E+0 + v = 0.2071817297354263E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3975838937548699E+0 + b = 0.1803821503011405E+0 + v = 0.2128250834102103E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4318396099009774E+0 + b = 0.2124962965666424E+0 + v = 0.2174513719440102E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4651706555732742E+0 + b = 0.2445221837805913E+0 + v = 0.2211661839150214E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4974752649620969E+0 + b = 0.2762701224322987E+0 + v = 0.2240665257813102E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5286517579627517E+0 + b = 0.3075627775211328E+0 + v = 0.2262439516632620E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5586001195731895E+0 + b = 0.3382311089826877E+0 + v = 0.2277874557231869E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5872229902021319E+0 + b = 0.3681108834741399E+0 + v = 0.2287854314454994E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6144258616235123E+0 + b = 0.3970397446872839E+0 + v = 0.2293268499615575E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2951676508064861E+0 + b = 0.2867499538750441E-1 + v = 0.1912628201529828E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3335085485472725E+0 + b = 0.5867879341903510E-1 + v = 0.1992499672238701E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3709561760636381E+0 + b = 0.8961099205022284E-1 + v = 0.2061275533454027E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4074722861667498E+0 + b = 0.1211627927626297E+0 + v = 0.2119318215968572E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4429923648839117E+0 + b = 0.1530748903554898E+0 + v = 0.2167416581882652E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4774428052721736E+0 + b = 0.1851176436721877E+0 + v = 0.2206430730516600E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5107446539535904E+0 + b = 0.2170829107658179E+0 + v = 0.2237186938699523E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5428151370542935E+0 + b = 0.2487786689026271E+0 + v = 0.2260480075032884E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5735699292556964E+0 + b = 0.2800239952795016E+0 + v = 0.2277098884558542E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6029253794562866E+0 + b = 0.3106445702878119E+0 + v = 0.2287845715109671E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6307998987073145E+0 + b = 0.3404689500841194E+0 + v = 0.2293547268236294E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3752652273692719E+0 + b = 0.2997145098184479E-1 + v = 0.2056073839852528E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4135383879344028E+0 + b = 0.6086725898678011E-1 + v = 0.2114235865831876E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4506113885153907E+0 + b = 0.9238849548435643E-1 + v = 0.2163175629770551E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4864401554606072E+0 + b = 0.1242786603851851E+0 + v = 0.2203392158111650E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5209708076611709E+0 + b = 0.1563086731483386E+0 + v = 0.2235473176847839E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5541422135830122E+0 + b = 0.1882696509388506E+0 + v = 0.2260024141501235E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5858880915113817E+0 + b = 0.2199672979126059E+0 + v = 0.2277675929329182E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6161399390603444E+0 + b = 0.2512165482924867E+0 + v = 0.2289102112284834E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6448296482255090E+0 + b = 0.2818368701871888E+0 + v = 0.2295027954625118E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4544796274917948E+0 + b = 0.3088970405060312E-1 + v = 0.2161281589879992E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4919389072146628E+0 + b = 0.6240947677636835E-1 + v = 0.2201980477395102E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5279313026985183E+0 + b = 0.9430706144280313E-1 + v = 0.2234952066593166E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5624169925571135E+0 + b = 0.1263547818770374E+0 + v = 0.2260540098520838E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5953484627093287E+0 + b = 0.1583430788822594E+0 + v = 0.2279157981899988E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6266730715339185E+0 + b = 0.1900748462555988E+0 + v = 0.2291296918565571E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6563363204278871E+0 + b = 0.2213599519592567E+0 + v = 0.2297533752536649E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5314574716585696E+0 + b = 0.3152508811515374E-1 + v = 0.2234927356465995E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5674614932298185E+0 + b = 0.6343865291465561E-1 + v = 0.2261288012985219E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6017706004970264E+0 + b = 0.9551503504223951E-1 + v = 0.2280818160923688E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6343471270264178E+0 + b = 0.1275440099801196E+0 + v = 0.2293773295180159E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6651494599127802E+0 + b = 0.1593252037671960E+0 + v = 0.2300528767338634E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6050184986005704E+0 + b = 0.3192538338496105E-1 + v = 0.2281893855065666E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6390163550880400E+0 + b = 0.6402824353962306E-1 + v = 0.2295720444840727E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6711199107088448E+0 + b = 0.9609805077002909E-1 + v = 0.2303227649026753E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6741354429572275E+0 + b = 0.3211853196273233E-1 + v = 0.2304831913227114E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + + case 5294: + + v = 0.9080510764308163E-4 + leb_tmp, start = get_lebedev_recurrence_points(1, start, a, b, v, leb_tmp) + v = 0.2084824361987793E-3 + leb_tmp, start = get_lebedev_recurrence_points(3, start, a, b, v, leb_tmp) + a = 0.2303261686261450E-1 + v = 0.5011105657239616E-4 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.3757208620162394E-1 + v = 0.5942520409683854E-4 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.5821912033821852E-1 + v = 0.9564394826109721E-4 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.8403127529194872E-1 + v = 0.1185530657126338E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.1122927798060578E+0 + v = 0.1364510114230331E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.1420125319192987E+0 + v = 0.1505828825605415E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.1726396437341978E+0 + v = 0.1619298749867023E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.2038170058115696E+0 + v = 0.1712450504267789E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.2352849892876508E+0 + v = 0.1789891098164999E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.2668363354312461E+0 + v = 0.1854474955629795E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.2982941279900452E+0 + v = 0.1908148636673661E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.3295002922087076E+0 + v = 0.1952377405281833E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.3603094918363593E+0 + v = 0.1988349254282232E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.3905857895173920E+0 + v = 0.2017079807160050E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.4202005758160837E+0 + v = 0.2039473082709094E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.4490310061597227E+0 + v = 0.2056360279288953E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.4769586160311491E+0 + v = 0.2068525823066865E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.5038679887049750E+0 + v = 0.2076724877534488E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.5296454286519961E+0 + v = 0.2081694278237885E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.5541776207164850E+0 + v = 0.2084157631219326E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.5990467321921213E+0 + v = 0.2084381531128593E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6191467096294587E+0 + v = 0.2083476277129307E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6375251212901849E+0 + v = 0.2082686194459732E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6540514381131168E+0 + v = 0.2082475686112415E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6685899064391510E+0 + v = 0.2083139860289915E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6810013009681648E+0 + v = 0.2084745561831237E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6911469578730340E+0 + v = 0.2087091313375890E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6988956915141736E+0 + v = 0.2089718413297697E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.7041335794868720E+0 + v = 0.2092003303479793E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.7067754398018567E+0 + v = 0.2093336148263241E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.3840368707853623E-1 + v = 0.7591708117365267E-4 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.9835485954117399E-1 + v = 0.1083383968169186E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.1665774947612998E+0 + v = 0.1403019395292510E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.2405702335362910E+0 + v = 0.1615970179286436E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.3165270770189046E+0 + v = 0.1771144187504911E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.3927386145645443E+0 + v = 0.1887760022988168E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.4678825918374656E+0 + v = 0.1973474670768214E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.5408022024266935E+0 + v = 0.2033787661234659E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.6104967445752438E+0 + v = 0.2072343626517331E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.6760910702685738E+0 + v = 0.2091177834226918E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.6655644120217392E-1 + b = 0.1936508874588424E-1 + v = 0.9316684484675566E-4 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.9446246161270182E-1 + b = 0.4252442002115869E-1 + v = 0.1116193688682976E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.1242651925452509E+0 + b = 0.6806529315354374E-1 + v = 0.1298623551559414E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.1553438064846751E+0 + b = 0.9560957491205369E-1 + v = 0.1450236832456426E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.1871137110542670E+0 + b = 0.1245931657452888E+0 + v = 0.1572719958149914E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2192612628836257E+0 + b = 0.1545385828778978E+0 + v = 0.1673234785867195E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2515682807206955E+0 + b = 0.1851004249723368E+0 + v = 0.1756860118725188E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2838535866287290E+0 + b = 0.2160182608272384E+0 + v = 0.1826776290439367E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3159578817528521E+0 + b = 0.2470799012277111E+0 + v = 0.1885116347992865E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3477370882791392E+0 + b = 0.2781014208986402E+0 + v = 0.1933457860170574E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3790576960890540E+0 + b = 0.3089172523515731E+0 + v = 0.1973060671902064E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4097938317810200E+0 + b = 0.3393750055472244E+0 + v = 0.2004987099616311E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4398256572859637E+0 + b = 0.3693322470987730E+0 + v = 0.2030170909281499E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4690384114718480E+0 + b = 0.3986541005609877E+0 + v = 0.2049461460119080E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4973216048301053E+0 + b = 0.4272112491408562E+0 + v = 0.2063653565200186E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5245681526132446E+0 + b = 0.4548781735309936E+0 + v = 0.2073507927381027E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5506733911803888E+0 + b = 0.4815315355023251E+0 + v = 0.2079764593256122E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5755339829522475E+0 + b = 0.5070486445801855E+0 + v = 0.2083150534968778E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.1305472386056362E+0 + b = 0.2284970375722366E-1 + v = 0.1262715121590664E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.1637327908216477E+0 + b = 0.4812254338288384E-1 + v = 0.1414386128545972E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.1972734634149637E+0 + b = 0.7531734457511935E-1 + v = 0.1538740401313898E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2308694653110130E+0 + b = 0.1039043639882017E+0 + v = 0.1642434942331432E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2643899218338160E+0 + b = 0.1334526587117626E+0 + v = 0.1729790609237496E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2977171599622171E+0 + b = 0.1636414868936382E+0 + v = 0.1803505190260828E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3307293903032310E+0 + b = 0.1942195406166568E+0 + v = 0.1865475350079657E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3633069198219073E+0 + b = 0.2249752879943753E+0 + v = 0.1917182669679069E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3953346955922727E+0 + b = 0.2557218821820032E+0 + v = 0.1959851709034382E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4267018394184914E+0 + b = 0.2862897925213193E+0 + v = 0.1994529548117882E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4573009622571704E+0 + b = 0.3165224536636518E+0 + v = 0.2022138911146548E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4870279559856109E+0 + b = 0.3462730221636496E+0 + v = 0.2043518024208592E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5157819581450322E+0 + b = 0.3754016870282835E+0 + v = 0.2059450313018110E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5434651666465393E+0 + b = 0.4037733784993613E+0 + v = 0.2070685715318472E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5699823887764627E+0 + b = 0.4312557784139123E+0 + v = 0.2077955310694373E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5952403350947741E+0 + b = 0.4577175367122110E+0 + v = 0.2081980387824712E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2025152599210369E+0 + b = 0.2520253617719557E-1 + v = 0.1521318610377956E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2381066653274425E+0 + b = 0.5223254506119000E-1 + v = 0.1622772720185755E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2732823383651612E+0 + b = 0.8060669688588620E-1 + v = 0.1710498139420709E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3080137692611118E+0 + b = 0.1099335754081255E+0 + v = 0.1785911149448736E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3422405614587601E+0 + b = 0.1399120955959857E+0 + v = 0.1850125313687736E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3758808773890420E+0 + b = 0.1702977801651705E+0 + v = 0.1904229703933298E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4088458383438932E+0 + b = 0.2008799256601680E+0 + v = 0.1949259956121987E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4410450550841152E+0 + b = 0.2314703052180836E+0 + v = 0.1986161545363960E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4723879420561312E+0 + b = 0.2618972111375892E+0 + v = 0.2015790585641370E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5027843561874343E+0 + b = 0.2920013195600270E+0 + v = 0.2038934198707418E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5321453674452458E+0 + b = 0.3216322555190551E+0 + v = 0.2056334060538251E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5603839113834030E+0 + b = 0.3506456615934198E+0 + v = 0.2068705959462289E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5874150706875146E+0 + b = 0.3789007181306267E+0 + v = 0.2076753906106002E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6131559381660038E+0 + b = 0.4062580170572782E+0 + v = 0.2081179391734803E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2778497016394506E+0 + b = 0.2696271276876226E-1 + v = 0.1700345216228943E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3143733562261912E+0 + b = 0.5523469316960465E-1 + v = 0.1774906779990410E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3501485810261827E+0 + b = 0.8445193201626464E-1 + v = 0.1839659377002642E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3851430322303653E+0 + b = 0.1143263119336083E+0 + v = 0.1894987462975169E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4193013979470415E+0 + b = 0.1446177898344475E+0 + v = 0.1941548809452595E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4525585960458567E+0 + b = 0.1751165438438091E+0 + v = 0.1980078427252384E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4848447779622947E+0 + b = 0.2056338306745660E+0 + v = 0.2011296284744488E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5160871208276894E+0 + b = 0.2359965487229226E+0 + v = 0.2035888456966776E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5462112185696926E+0 + b = 0.2660430223139146E+0 + v = 0.2054516325352142E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5751425068101757E+0 + b = 0.2956193664498032E+0 + v = 0.2067831033092635E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6028073872853596E+0 + b = 0.3245763905312779E+0 + v = 0.2076485320284876E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6291338275278409E+0 + b = 0.3527670026206972E+0 + v = 0.2081141439525255E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3541797528439391E+0 + b = 0.2823853479435550E-1 + v = 0.1834383015469222E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3908234972074657E+0 + b = 0.5741296374713106E-1 + v = 0.1889540591777677E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4264408450107590E+0 + b = 0.8724646633650199E-1 + v = 0.1936677023597375E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4609949666553286E+0 + b = 0.1175034422915616E+0 + v = 0.1976176495066504E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4944389496536006E+0 + b = 0.1479755652628428E+0 + v = 0.2008536004560983E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5267194884346086E+0 + b = 0.1784740659484352E+0 + v = 0.2034280351712291E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5577787810220990E+0 + b = 0.2088245700431244E+0 + v = 0.2053944466027758E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5875563763536670E+0 + b = 0.2388628136570763E+0 + v = 0.2068077642882360E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6159910016391269E+0 + b = 0.2684308928769185E+0 + v = 0.2077250949661599E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6430219602956268E+0 + b = 0.2973740761960252E+0 + v = 0.2082062440705320E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4300647036213646E+0 + b = 0.2916399920493977E-1 + v = 0.1934374486546626E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4661486308935531E+0 + b = 0.5898803024755659E-1 + v = 0.1974107010484300E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5009658555287261E+0 + b = 0.8924162698525409E-1 + v = 0.2007129290388658E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5344824270447704E+0 + b = 0.1197185199637321E+0 + v = 0.2033736947471293E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5666575997416371E+0 + b = 0.1502300756161382E+0 + v = 0.2054287125902493E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5974457471404752E+0 + b = 0.1806004191913564E+0 + v = 0.2069184936818894E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6267984444116886E+0 + b = 0.2106621764786252E+0 + v = 0.2078883689808782E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6546664713575417E+0 + b = 0.2402526932671914E+0 + v = 0.2083886366116359E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5042711004437253E+0 + b = 0.2982529203607657E-1 + v = 0.2006593275470817E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5392127456774380E+0 + b = 0.6008728062339922E-1 + v = 0.2033728426135397E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5726819437668618E+0 + b = 0.9058227674571398E-1 + v = 0.2055008781377608E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6046469254207278E+0 + b = 0.1211219235803400E+0 + v = 0.2070651783518502E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6350716157434952E+0 + b = 0.1515286404791580E+0 + v = 0.2080953335094320E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6639177679185454E+0 + b = 0.1816314681255552E+0 + v = 0.2086284998988521E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5757276040972253E+0 + b = 0.3026991752575440E-1 + v = 0.2055549387644668E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6090265823139755E+0 + b = 0.6078402297870770E-1 + v = 0.2071871850267654E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6406735344387661E+0 + b = 0.9135459984176636E-1 + v = 0.2082856600431965E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6706397927793709E+0 + b = 0.1218024155966590E+0 + v = 0.2088705858819358E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6435019674426665E+0 + b = 0.3052608357660639E-1 + v = 0.2083995867536322E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6747218676375681E+0 + b = 0.6112185773983089E-1 + v = 0.2090509712889637E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + + case 5810: + + v = 0.9735347946175486E-5 + leb_tmp, start = get_lebedev_recurrence_points(1, start, a, b, v, leb_tmp) + v = 0.1907581241803167E-3 + leb_tmp, start = get_lebedev_recurrence_points(2, start, a, b, v, leb_tmp) + v = 0.1901059546737578E-3 + leb_tmp, start = get_lebedev_recurrence_points(3, start, a, b, v, leb_tmp) + a = 0.1182361662400277E-1 + v = 0.3926424538919212E-4 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.3062145009138958E-1 + v = 0.6667905467294382E-4 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.5329794036834243E-1 + v = 0.8868891315019135E-4 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.7848165532862220E-1 + v = 0.1066306000958872E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.1054038157636201E+0 + v = 0.1214506743336128E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.1335577797766211E+0 + v = 0.1338054681640871E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.1625769955502252E+0 + v = 0.1441677023628504E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.1921787193412792E+0 + v = 0.1528880200826557E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.2221340534690548E+0 + v = 0.1602330623773609E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.2522504912791132E+0 + v = 0.1664102653445244E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.2823610860679697E+0 + v = 0.1715845854011323E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.3123173966267560E+0 + v = 0.1758901000133069E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.3419847036953789E+0 + v = 0.1794382485256736E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.3712386456999758E+0 + v = 0.1823238106757407E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.3999627649876828E+0 + v = 0.1846293252959976E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.4280466458648093E+0 + v = 0.1864284079323098E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.4553844360185711E+0 + v = 0.1877882694626914E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.4818736094437834E+0 + v = 0.1887716321852025E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.5074138709260629E+0 + v = 0.1894381638175673E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.5319061304570707E+0 + v = 0.1898454899533629E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.5552514978677286E+0 + v = 0.1900497929577815E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.5981009025246183E+0 + v = 0.1900671501924092E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6173990192228116E+0 + v = 0.1899837555533510E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6351365239411131E+0 + v = 0.1899014113156229E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6512010228227200E+0 + v = 0.1898581257705106E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6654758363948120E+0 + v = 0.1898804756095753E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6778410414853370E+0 + v = 0.1899793610426402E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6881760887484110E+0 + v = 0.1901464554844117E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.6963645267094598E+0 + v = 0.1903533246259542E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.7023010617153579E+0 + v = 0.1905556158463228E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.7059004636628753E+0 + v = 0.1907037155663528E-3 + leb_tmp, start = get_lebedev_recurrence_points(4, start, a, b, v, leb_tmp) + a = 0.3552470312472575E-1 + v = 0.5992997844249967E-4 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.9151176620841283E-1 + v = 0.9749059382456978E-4 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.1566197930068980E+0 + v = 0.1241680804599158E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.2265467599271907E+0 + v = 0.1437626154299360E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.2988242318581361E+0 + v = 0.1584200054793902E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.3717482419703886E+0 + v = 0.1694436550982744E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.4440094491758889E+0 + v = 0.1776617014018108E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.5145337096756642E+0 + v = 0.1836132434440077E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.5824053672860230E+0 + v = 0.1876494727075983E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.6468283961043370E+0 + v = 0.1899906535336482E-3 + leb_tmp, start = get_lebedev_recurrence_points(5, start, a, b, v, leb_tmp) + a = 0.6095964259104373E-1 + b = 0.1787828275342931E-1 + v = 0.8143252820767350E-4 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.8811962270959388E-1 + b = 0.3953888740792096E-1 + v = 0.9998859890887728E-4 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.1165936722428831E+0 + b = 0.6378121797722990E-1 + v = 0.1156199403068359E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.1460232857031785E+0 + b = 0.8985890813745037E-1 + v = 0.1287632092635513E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.1761197110181755E+0 + b = 0.1172606510576162E+0 + v = 0.1398378643365139E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2066471190463718E+0 + b = 0.1456102876970995E+0 + v = 0.1491876468417391E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2374076026328152E+0 + b = 0.1746153823011775E+0 + v = 0.1570855679175456E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2682305474337051E+0 + b = 0.2040383070295584E+0 + v = 0.1637483948103775E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2989653312142369E+0 + b = 0.2336788634003698E+0 + v = 0.1693500566632843E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3294762752772209E+0 + b = 0.2633632752654219E+0 + v = 0.1740322769393633E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3596390887276086E+0 + b = 0.2929369098051601E+0 + v = 0.1779126637278296E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3893383046398812E+0 + b = 0.3222592785275512E+0 + v = 0.1810908108835412E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4184653789358347E+0 + b = 0.3512004791195743E+0 + v = 0.1836529132600190E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4469172319076166E+0 + b = 0.3796385677684537E+0 + v = 0.1856752841777379E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4745950813276976E+0 + b = 0.4074575378263879E+0 + v = 0.1872270566606832E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5014034601410262E+0 + b = 0.4345456906027828E+0 + v = 0.1883722645591307E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5272493404551239E+0 + b = 0.4607942515205134E+0 + v = 0.1891714324525297E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5520413051846366E+0 + b = 0.4860961284181720E+0 + v = 0.1896827480450146E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5756887237503077E+0 + b = 0.5103447395342790E+0 + v = 0.1899628417059528E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.1225039430588352E+0 + b = 0.2136455922655793E-1 + v = 0.1123301829001669E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.1539113217321372E+0 + b = 0.4520926166137188E-1 + v = 0.1253698826711277E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.1856213098637712E+0 + b = 0.7086468177864818E-1 + v = 0.1366266117678531E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2174998728035131E+0 + b = 0.9785239488772918E-1 + v = 0.1462736856106918E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2494128336938330E+0 + b = 0.1258106396267210E+0 + v = 0.1545076466685412E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2812321562143480E+0 + b = 0.1544529125047001E+0 + v = 0.1615096280814007E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3128372276456111E+0 + b = 0.1835433512202753E+0 + v = 0.1674366639741759E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3441145160177973E+0 + b = 0.2128813258619585E+0 + v = 0.1724225002437900E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3749567714853510E+0 + b = 0.2422913734880829E+0 + v = 0.1765810822987288E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4052621732015610E+0 + b = 0.2716163748391453E+0 + v = 0.1800104126010751E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4349335453522385E+0 + b = 0.3007127671240280E+0 + v = 0.1827960437331284E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4638776641524965E+0 + b = 0.3294470677216479E+0 + v = 0.1850140300716308E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4920046410462687E+0 + b = 0.3576932543699155E+0 + v = 0.1867333507394938E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5192273554861704E+0 + b = 0.3853307059757764E+0 + v = 0.1880178688638289E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5454609081136522E+0 + b = 0.4122425044452694E+0 + v = 0.1889278925654758E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5706220661424140E+0 + b = 0.4383139587781027E+0 + v = 0.1895213832507346E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5946286755181518E+0 + b = 0.4634312536300553E+0 + v = 0.1898548277397420E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.1905370790924295E+0 + b = 0.2371311537781979E-1 + v = 0.1349105935937341E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2242518717748009E+0 + b = 0.4917878059254806E-1 + v = 0.1444060068369326E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2577190808025936E+0 + b = 0.7595498960495142E-1 + v = 0.1526797390930008E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2908724534927187E+0 + b = 0.1036991083191100E+0 + v = 0.1598208771406474E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3236354020056219E+0 + b = 0.1321348584450234E+0 + v = 0.1659354368615331E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3559267359304543E+0 + b = 0.1610316571314789E+0 + v = 0.1711279910946440E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3876637123676956E+0 + b = 0.1901912080395707E+0 + v = 0.1754952725601440E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4187636705218842E+0 + b = 0.2194384950137950E+0 + v = 0.1791247850802529E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4491449019883107E+0 + b = 0.2486155334763858E+0 + v = 0.1820954300877716E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4787270932425445E+0 + b = 0.2775768931812335E+0 + v = 0.1844788524548449E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5074315153055574E+0 + b = 0.3061863786591120E+0 + v = 0.1863409481706220E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5351810507738336E+0 + b = 0.3343144718152556E+0 + v = 0.1877433008795068E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5619001025975381E+0 + b = 0.3618362729028427E+0 + v = 0.1887444543705232E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5875144035268046E+0 + b = 0.3886297583620408E+0 + v = 0.1894009829375006E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6119507308734495E+0 + b = 0.4145742277792031E+0 + v = 0.1897683345035198E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2619733870119463E+0 + b = 0.2540047186389353E-1 + v = 0.1517327037467653E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.2968149743237949E+0 + b = 0.5208107018543989E-1 + v = 0.1587740557483543E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3310451504860488E+0 + b = 0.7971828470885599E-1 + v = 0.1649093382274097E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3646215567376676E+0 + b = 0.1080465999177927E+0 + v = 0.1701915216193265E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3974916785279360E+0 + b = 0.1368413849366629E+0 + v = 0.1746847753144065E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4295967403772029E+0 + b = 0.1659073184763559E+0 + v = 0.1784555512007570E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4608742854473447E+0 + b = 0.1950703730454614E+0 + v = 0.1815687562112174E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4912598858949903E+0 + b = 0.2241721144376724E+0 + v = 0.1840864370663302E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5206882758945558E+0 + b = 0.2530655255406489E+0 + v = 0.1860676785390006E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5490940914019819E+0 + b = 0.2816118409731066E+0 + v = 0.1875690583743703E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5764123302025542E+0 + b = 0.3096780504593238E+0 + v = 0.1886453236347225E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6025786004213506E+0 + b = 0.3371348366394987E+0 + v = 0.1893501123329645E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6275291964794956E+0 + b = 0.3638547827694396E+0 + v = 0.1897366184519868E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3348189479861771E+0 + b = 0.2664841935537443E-1 + v = 0.1643908815152736E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.3699515545855295E+0 + b = 0.5424000066843495E-1 + v = 0.1696300350907768E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4042003071474669E+0 + b = 0.8251992715430854E-1 + v = 0.1741553103844483E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4375320100182624E+0 + b = 0.1112695182483710E+0 + v = 0.1780015282386092E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4699054490335947E+0 + b = 0.1402964116467816E+0 + v = 0.1812116787077125E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5012739879431952E+0 + b = 0.1694275117584291E+0 + v = 0.1838323158085421E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5315874883754966E+0 + b = 0.1985038235312689E+0 + v = 0.1859113119837737E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5607937109622117E+0 + b = 0.2273765660020893E+0 + v = 0.1874969220221698E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5888393223495521E+0 + b = 0.2559041492849764E+0 + v = 0.1886375612681076E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6156705979160163E+0 + b = 0.2839497251976899E+0 + v = 0.1893819575809276E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6412338809078123E+0 + b = 0.3113791060500690E+0 + v = 0.1897794748256767E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4076051259257167E+0 + b = 0.2757792290858463E-1 + v = 0.1738963926584846E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4423788125791520E+0 + b = 0.5584136834984293E-1 + v = 0.1777442359873466E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4760480917328258E+0 + b = 0.8457772087727143E-1 + v = 0.1810010815068719E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5085838725946297E+0 + b = 0.1135975846359248E+0 + v = 0.1836920318248129E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5399513637391218E+0 + b = 0.1427286904765053E+0 + v = 0.1858489473214328E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5701118433636380E+0 + b = 0.1718112740057635E+0 + v = 0.1875079342496592E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5990240530606021E+0 + b = 0.2006944855985351E+0 + v = 0.1887080239102310E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6266452685139695E+0 + b = 0.2292335090598907E+0 + v = 0.1894905752176822E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6529320971415942E+0 + b = 0.2572871512353714E+0 + v = 0.1898991061200695E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.4791583834610126E+0 + b = 0.2826094197735932E-1 + v = 0.1809065016458791E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5130373952796940E+0 + b = 0.5699871359683649E-1 + v = 0.1836297121596799E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5456252429628476E+0 + b = 0.8602712528554394E-1 + v = 0.1858426916241869E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5768956329682385E+0 + b = 0.1151748137221281E+0 + v = 0.1875654101134641E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6068186944699046E+0 + b = 0.1442811654136362E+0 + v = 0.1888240751833503E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6353622248024907E+0 + b = 0.1731930321657680E+0 + v = 0.1896497383866979E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6624927035731797E+0 + b = 0.2017619958756061E+0 + v = 0.1900775530219121E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5484933508028488E+0 + b = 0.2874219755907391E-1 + v = 0.1858525041478814E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.5810207682142106E+0 + b = 0.5778312123713695E-1 + v = 0.1876248690077947E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6120955197181352E+0 + b = 0.8695262371439526E-1 + v = 0.1889404439064607E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6416944284294319E+0 + b = 0.1160893767057166E+0 + v = 0.1898168539265290E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6697926391731260E+0 + b = 0.1450378826743251E+0 + v = 0.1902779940661772E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6147594390585488E+0 + b = 0.2904957622341456E-1 + v = 0.1890125641731815E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6455390026356783E+0 + b = 0.5823809152617197E-1 + v = 0.1899434637795751E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6747258588365477E+0 + b = 0.8740384899884715E-1 + v = 0.1904520856831751E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + a = 0.6772135750395347E+0 + b = 0.2919946135808105E-1 + v = 0.1905534498734563E-3 + leb_tmp, start = get_lebedev_recurrence_points(6, start, a, b, v, leb_tmp) + + case _: + raise Exception('Angular grid unrecognized, choices are 6, 14, 26, 38, 50, 74, 86, 110, 146, 170, 194, 230, 266, 302, 350, 434, 590, 770, 974, 1202, 1454, 1730, 2030, 2354, 2702, 3074, 3470, 3890, 4334, 4802, 5294, 5810') # noqa: E501 + + leb_tmp.n = degree + return leb_tmp + + +def get_lebedev_recurrence_points(type_, start, a, b, v, leb): + c = 0.0 + + match type_: + + case 1: + a = 1.0 + + leb.x[start] = a + leb.y[start] = 0.0 + leb.z[start] = 0.0 + leb.w[start] = 4.0 * pi * v + + leb.x[start + 1] = -a + leb.y[start + 1] = 0.0 + leb.z[start + 1] = 0.0 + leb.w[start + 1] = 4.0 * pi * v + + leb.x[start + 2] = 0.0 + leb.y[start + 2] = a + leb.z[start + 2] = 0.0 + leb.w[start + 2] = 4.0 * pi * v + + leb.x[start + 3] = 0.0 + leb.y[start + 3] = -a + leb.z[start + 3] = 0.0 + leb.w[start + 3] = 4.0 * pi * v + + leb.x[start + 4] = 0.0 + leb.y[start + 4] = 0.0 + leb.z[start + 4] = a + leb.w[start + 4] = 4.0 * pi * v + + leb.x[start + 5] = 0.0 + leb.y[start + 5] = 0.0 + leb.z[start + 5] = -a + leb.w[start + 5] = 4.0 * pi * v + start = start + 6 + + case 2: + a = sqrt(0.5) + leb.x[start] = 0.0 + leb.y[start] = a + leb.z[start] = a + leb.w[start] = 4.0 * pi * v + + leb.x[start + 1] = 0.0 + leb.y[start + 1] = -a + leb.z[start + 1] = a + leb.w[start + 1] = 4.0 * pi * v + + leb.x[start + 2] = 0.0 + leb.y[start + 2] = a + leb.z[start + 2] = -a + leb.w[start + 2] = 4.0 * pi * v + + leb.x[start + 3] = 0.0 + leb.y[start + 3] = -a + leb.z[start + 3] = -a + leb.w[start + 3] = 4.0 * pi * v + + leb.x[start + 4] = a + leb.y[start + 4] = 0.0 + leb.z[start + 4] = a + leb.w[start + 4] = 4.0 * pi * v + + leb.x[start + 5] = a + leb.y[start + 5] = 0.0 + leb.z[start + 5] = -a + leb.w[start + 5] = 4.0 * pi * v + + leb.x[start + 6] = -a + leb.y[start + 6] = 0.0 + leb.z[start + 6] = a + leb.w[start + 6] = 4.0 * pi * v + + leb.x[start + 7] = -a + leb.y[start + 7] = 0.0 + leb.z[start + 7] = -a + leb.w[start + 7] = 4.0 * pi * v + + leb.x[start + 8] = a + leb.y[start + 8] = a + leb.z[start + 8] = 0.0 + leb.w[start + 8] = 4.0 * pi * v + + leb.x[start + 9] = -a + leb.y[start + 9] = a + leb.z[start + 9] = 0.0 + leb.w[start + 9] = 4.0 * pi * v + + leb.x[start + 10] = a + leb.y[start + 10] = -a + leb.z[start + 10] = 0.0 + leb.w[start + 10] = 4.0 * pi * v + + leb.x[start + 11] = -a + leb.y[start + 11] = -a + leb.z[start + 11] = 0.0 + leb.w[start + 11] = 4.0 * pi * v + start = start + 12 + + case 3: + a = sqrt(1.0 / 3.0) + leb.x[start] = a + leb.y[start] = a + leb.z[start] = a + leb.w[start] = 4.0 * pi * v + + leb.x[start + 1] = -a + leb.y[start + 1] = a + leb.z[start + 1] = a + leb.w[start + 1] = 4.0 * pi * v + + leb.x[start + 2] = a + leb.y[start + 2] = -a + leb.z[start + 2] = a + leb.w[start + 2] = 4.0 * pi * v + + leb.x[start + 3] = a + leb.y[start + 3] = a + leb.z[start + 3] = -a + leb.w[start + 3] = 4.0 * pi * v + + leb.x[start + 4] = -a + leb.y[start + 4] = -a + leb.z[start + 4] = a + leb.w[start + 4] = 4.0 * pi * v + + leb.x[start + 5] = a + leb.y[start + 5] = -a + leb.z[start + 5] = -a + leb.w[start + 5] = 4.0 * pi * v + + leb.x[start + 6] = -a + leb.y[start + 6] = a + leb.z[start + 6] = -a + leb.w[start + 6] = 4.0 * pi * v + + leb.x[start + 7] = -a + leb.y[start + 7] = -a + leb.z[start + 7] = -a + leb.w[start + 7] = 4.0 * pi * v + start = start + 8 + + case 4: + # /* In this case A is inputed */ + b = sqrt(1.0 - 2.0 * a * a) + leb.x[start] = a + leb.y[start] = a + leb.z[start] = b + leb.w[start] = 4.0 * pi * v + + leb.x[start + 1] = -a + leb.y[start + 1] = a + leb.z[start + 1] = b + leb.w[start + 1] = 4.0 * pi * v + + leb.x[start + 2] = a + leb.y[start + 2] = -a + leb.z[start + 2] = b + leb.w[start + 2] = 4.0 * pi * v + + leb.x[start + 3] = a + leb.y[start + 3] = a + leb.z[start + 3] = -b + leb.w[start + 3] = 4.0 * pi * v + + leb.x[start + 4] = -a + leb.y[start + 4] = -a + leb.z[start + 4] = b + leb.w[start + 4] = 4.0 * pi * v + + leb.x[start + 5] = -a + leb.y[start + 5] = a + leb.z[start + 5] = -b + leb.w[start + 5] = 4.0 * pi * v + + leb.x[start + 6] = a + leb.y[start + 6] = -a + leb.z[start + 6] = -b + leb.w[start + 6] = 4.0 * pi * v + + leb.x[start + 7] = -a + leb.y[start + 7] = -a + leb.z[start + 7] = -b + leb.w[start + 7] = 4.0 * pi * v + + leb.x[start + 8] = -a + leb.y[start + 8] = b + leb.z[start + 8] = a + leb.w[start + 8] = 4.0 * pi * v + + leb.x[start + 9] = a + leb.y[start + 9] = -b + leb.z[start + 9] = a + leb.w[start + 9] = 4.0 * pi * v + + leb.x[start + 10] = a + leb.y[start + 10] = b + leb.z[start + 10] = -a + leb.w[start + 10] = 4.0 * pi * v + + leb.x[start + 11] = -a + leb.y[start + 11] = -b + leb.z[start + 11] = a + leb.w[start + 11] = 4.0 * pi * v + + leb.x[start + 12] = -a + leb.y[start + 12] = b + leb.z[start + 12] = -a + leb.w[start + 12] = 4.0 * pi * v + + leb.x[start + 13] = a + leb.y[start + 13] = -b + leb.z[start + 13] = -a + leb.w[start + 13] = 4.0 * pi * v + + leb.x[start + 14] = -a + leb.y[start + 14] = -b + leb.z[start + 14] = -a + leb.w[start + 14] = 4.0 * pi * v + + leb.x[start + 15] = a + leb.y[start + 15] = b + leb.z[start + 15] = a + leb.w[start + 15] = 4.0 * pi * v + + leb.x[start + 16] = b + leb.y[start + 16] = a + leb.z[start + 16] = a + leb.w[start + 16] = 4.0 * pi * v + + leb.x[start + 17] = -b + leb.y[start + 17] = a + leb.z[start + 17] = a + leb.w[start + 17] = 4.0 * pi * v + + leb.x[start + 18] = b + leb.y[start + 18] = -a + leb.z[start + 18] = a + leb.w[start + 18] = 4.0 * pi * v + + leb.x[start + 19] = b + leb.y[start + 19] = a + leb.z[start + 19] = -a + leb.w[start + 19] = 4.0 * pi * v + + leb.x[start + 20] = -b + leb.y[start + 20] = -a + leb.z[start + 20] = a + leb.w[start + 20] = 4.0 * pi * v + + leb.x[start + 21] = -b + leb.y[start + 21] = a + leb.z[start + 21] = -a + leb.w[start + 21] = 4.0 * pi * v + + leb.x[start + 22] = b + leb.y[start + 22] = -a + leb.z[start + 22] = -a + leb.w[start + 22] = 4.0 * pi * v + + leb.x[start + 23] = -b + leb.y[start + 23] = -a + leb.z[start + 23] = -a + leb.w[start + 23] = 4.0 * pi * v + start = start + 24 + + case 5: + # /* A is inputed in this case as well*/ + b = sqrt(1 - a * a) + leb.x[start] = a + leb.y[start] = b + leb.z[start] = 0.0 + leb.w[start] = 4.0 * pi * v + + leb.x[start + 1] = -a + leb.y[start + 1] = b + leb.z[start + 1] = 0.0 + leb.w[start + 1] = 4.0 * pi * v + + leb.x[start + 2] = a + leb.y[start + 2] = -b + leb.z[start + 2] = 0.0 + leb.w[start + 2] = 4.0 * pi * v + + leb.x[start + 3] = -a + leb.y[start + 3] = -b + leb.z[start + 3] = 0.0 + leb.w[start + 3] = 4.0 * pi * v + + leb.x[start + 4] = b + leb.y[start + 4] = a + leb.z[start + 4] = 0.0 + leb.w[start + 4] = 4.0 * pi * v + + leb.x[start + 5] = -b + leb.y[start + 5] = a + leb.z[start + 5] = 0.0 + leb.w[start + 5] = 4.0 * pi * v + + leb.x[start + 6] = b + leb.y[start + 6] = -a + leb.z[start + 6] = 0.0 + leb.w[start + 6] = 4.0 * pi * v + + leb.x[start + 7] = -b + leb.y[start + 7] = -a + leb.z[start + 7] = 0.0 + leb.w[start + 7] = 4.0 * pi * v + + leb.x[start + 8] = a + leb.y[start + 8] = 0.0 + leb.z[start + 8] = b + leb.w[start + 8] = 4.0 * pi * v + + leb.x[start + 9] = -a + leb.y[start + 9] = 0.0 + leb.z[start + 9] = b + leb.w[start + 9] = 4.0 * pi * v + + leb.x[start + 10] = a + leb.y[start + 10] = 0.0 + leb.z[start + 10] = -b + leb.w[start + 10] = 4.0 * pi * v + + leb.x[start + 11] = -a + leb.y[start + 11] = 0.0 + leb.z[start + 11] = -b + leb.w[start + 11] = 4.0 * pi * v + + leb.x[start + 12] = b + leb.y[start + 12] = 0.0 + leb.z[start + 12] = a + leb.w[start + 12] = 4.0 * pi * v + + leb.x[start + 13] = -b + leb.y[start + 13] = 0.0 + leb.z[start + 13] = a + leb.w[start + 13] = 4.0 * pi * v + + leb.x[start + 14] = b + leb.y[start + 14] = 0.0 + leb.z[start + 14] = -a + leb.w[start + 14] = 4.0 * pi * v + + leb.x[start + 15] = -b + leb.y[start + 15] = 0.0 + leb.z[start + 15] = -a + leb.w[start + 15] = 4.0 * pi * v + + leb.x[start + 16] = 0.0 + leb.y[start + 16] = a + leb.z[start + 16] = b + leb.w[start + 16] = 4.0 * pi * v + + leb.x[start + 17] = 0.0 + leb.y[start + 17] = -a + leb.z[start + 17] = b + leb.w[start + 17] = 4.0 * pi * v + + leb.x[start + 18] = 0.0 + leb.y[start + 18] = a + leb.z[start + 18] = -b + leb.w[start + 18] = 4.0 * pi * v + + leb.x[start + 19] = 0.0 + leb.y[start + 19] = -a + leb.z[start + 19] = -b + leb.w[start + 19] = 4.0 * pi * v + + leb.x[start + 20] = 0.0 + leb.y[start + 20] = b + leb.z[start + 20] = a + leb.w[start + 20] = 4.0 * pi * v + + leb.x[start + 21] = 0.0 + leb.y[start + 21] = -b + leb.z[start + 21] = a + leb.w[start + 21] = 4.0 * pi * v + + leb.x[start + 22] = 0.0 + leb.y[start + 22] = b + leb.z[start + 22] = -a + leb.w[start + 22] = 4.0 * pi * v + + leb.x[start + 23] = 0.0 + leb.y[start + 23] = -b + leb.z[start + 23] = -a + leb.w[start + 23] = 4.0 * pi * v + start = start + 24 + + case 6: + # /* both A and B are inputed in this case */ + c = sqrt(1.0 - a * a - b * b) + leb.x[start] = a + leb.y[start] = b + leb.z[start] = c + leb.w[start] = 4.0 * pi * v + + leb.x[start + 1] = -a + leb.y[start + 1] = b + leb.z[start + 1] = c + leb.w[start + 1] = 4.0 * pi * v + + leb.x[start + 2] = a + leb.y[start + 2] = -b + leb.z[start + 2] = c + leb.w[start + 2] = 4.0 * pi * v + + leb.x[start + 3] = a + leb.y[start + 3] = b + leb.z[start + 3] = -c + leb.w[start + 3] = 4.0 * pi * v + + leb.x[start + 4] = -a + leb.y[start + 4] = -b + leb.z[start + 4] = c + leb.w[start + 4] = 4.0 * pi * v + + leb.x[start + 5] = a + leb.y[start + 5] = -b + leb.z[start + 5] = -c + leb.w[start + 5] = 4.0 * pi * v + + leb.x[start + 6] = -a + leb.y[start + 6] = b + leb.z[start + 6] = -c + leb.w[start + 6] = 4.0 * pi * v + + leb.x[start + 7] = -a + leb.y[start + 7] = -b + leb.z[start + 7] = -c + leb.w[start + 7] = 4.0 * pi * v + + leb.x[start + 8] = b + leb.y[start + 8] = a + leb.z[start + 8] = c + leb.w[start + 8] = 4.0 * pi * v + + leb.x[start + 9] = -b + leb.y[start + 9] = a + leb.z[start + 9] = c + leb.w[start + 9] = 4.0 * pi * v + + leb.x[start + 10] = b + leb.y[start + 10] = -a + leb.z[start + 10] = c + leb.w[start + 10] = 4.0 * pi * v + + leb.x[start + 11] = b + leb.y[start + 11] = a + leb.z[start + 11] = -c + leb.w[start + 11] = 4.0 * pi * v + + leb.x[start + 12] = -b + leb.y[start + 12] = -a + leb.z[start + 12] = c + leb.w[start + 12] = 4.0 * pi * v + + leb.x[start + 13] = b + leb.y[start + 13] = -a + leb.z[start + 13] = -c + leb.w[start + 13] = 4.0 * pi * v + + leb.x[start + 14] = -b + leb.y[start + 14] = a + leb.z[start + 14] = -c + leb.w[start + 14] = 4.0 * pi * v + + leb.x[start + 15] = -b + leb.y[start + 15] = -a + leb.z[start + 15] = -c + leb.w[start + 15] = 4.0 * pi * v + + leb.x[start + 16] = c + leb.y[start + 16] = a + leb.z[start + 16] = b + leb.w[start + 16] = 4.0 * pi * v + + leb.x[start + 17] = -c + leb.y[start + 17] = a + leb.z[start + 17] = b + leb.w[start + 17] = 4.0 * pi * v + + leb.x[start + 18] = c + leb.y[start + 18] = -a + leb.z[start + 18] = b + leb.w[start + 18] = 4.0 * pi * v + + leb.x[start + 19] = c + leb.y[start + 19] = a + leb.z[start + 19] = -b + leb.w[start + 19] = 4.0 * pi * v + + leb.x[start + 20] = -c + leb.y[start + 20] = -a + leb.z[start + 20] = b + leb.w[start + 20] = 4.0 * pi * v + + leb.x[start + 21] = c + leb.y[start + 21] = -a + leb.z[start + 21] = -b + leb.w[start + 21] = 4.0 * pi * v + + leb.x[start + 22] = -c + leb.y[start + 22] = a + leb.z[start + 22] = -b + leb.w[start + 22] = 4.0 * pi * v + + leb.x[start + 23] = -c + leb.y[start + 23] = -a + leb.z[start + 23] = -b + leb.w[start + 23] = 4.0 * pi * v + + leb.x[start + 24] = c + leb.y[start + 24] = b + leb.z[start + 24] = a + leb.w[start + 24] = 4.0 * pi * v + + leb.x[start + 25] = -c + leb.y[start + 25] = b + leb.z[start + 25] = a + leb.w[start + 25] = 4.0 * pi * v + + leb.x[start + 26] = c + leb.y[start + 26] = -b + leb.z[start + 26] = a + leb.w[start + 26] = 4.0 * pi * v + + leb.x[start + 27] = c + leb.y[start + 27] = b + leb.z[start + 27] = -a + leb.w[start + 27] = 4.0 * pi * v + + leb.x[start + 28] = -c + leb.y[start + 28] = -b + leb.z[start + 28] = a + leb.w[start + 28] = 4.0 * pi * v + + leb.x[start + 29] = c + leb.y[start + 29] = -b + leb.z[start + 29] = -a + leb.w[start + 29] = 4.0 * pi * v + + leb.x[start + 30] = -c + leb.y[start + 30] = b + leb.z[start + 30] = -a + leb.w[start + 30] = 4.0 * pi * v + + leb.x[start + 31] = -c + leb.y[start + 31] = -b + leb.z[start + 31] = -a + leb.w[start + 31] = 4.0 * pi * v + + leb.x[start + 32] = a + leb.y[start + 32] = c + leb.z[start + 32] = b + leb.w[start + 32] = 4.0 * pi * v + + leb.x[start + 33] = -a + leb.y[start + 33] = c + leb.z[start + 33] = b + leb.w[start + 33] = 4.0 * pi * v + + leb.x[start + 34] = a + leb.y[start + 34] = -c + leb.z[start + 34] = b + leb.w[start + 34] = 4.0 * pi * v + + leb.x[start + 35] = a + leb.y[start + 35] = c + leb.z[start + 35] = -b + leb.w[start + 35] = 4.0 * pi * v + + leb.x[start + 36] = -a + leb.y[start + 36] = -c + leb.z[start + 36] = b + leb.w[start + 36] = 4.0 * pi * v + + leb.x[start + 37] = a + leb.y[start + 37] = -c + leb.z[start + 37] = -b + leb.w[start + 37] = 4.0 * pi * v + + leb.x[start + 38] = -a + leb.y[start + 38] = c + leb.z[start + 38] = -b + leb.w[start + 38] = 4.0 * pi * v + + leb.x[start + 39] = -a + leb.y[start + 39] = -c + leb.z[start + 39] = -b + leb.w[start + 39] = 4.0 * pi * v + + leb.x[start + 40] = b + leb.y[start + 40] = c + leb.z[start + 40] = a + leb.w[start + 40] = 4.0 * pi * v + + leb.x[start + 41] = -b + leb.y[start + 41] = c + leb.z[start + 41] = a + leb.w[start + 41] = 4.0 * pi * v + + leb.x[start + 42] = b + leb.y[start + 42] = -c + leb.z[start + 42] = a + leb.w[start + 42] = 4.0 * pi * v + + leb.x[start + 43] = b + leb.y[start + 43] = c + leb.z[start + 43] = -a + leb.w[start + 43] = 4.0 * pi * v + + leb.x[start + 44] = -b + leb.y[start + 44] = -c + leb.z[start + 44] = a + leb.w[start + 44] = 4.0 * pi * v + + leb.x[start + 45] = b + leb.y[start + 45] = -c + leb.z[start + 45] = -a + leb.w[start + 45] = 4.0 * pi * v + + leb.x[start + 46] = -b + leb.y[start + 46] = c + leb.z[start + 46] = -a + leb.w[start + 46] = 4.0 * pi * v + + leb.x[start + 47] = -b + leb.y[start + 47] = -c + leb.z[start + 47] = -a + leb.w[start + 47] = 4.0 * pi * v + start = start + 48 + + case _: + raise Exception('Bad grid order') + + return leb, start + + +def lebedev_rule(n): + r"""Lebedev quadrature. + + Compute the sample points and weights for Lebedev quadrature [1]_ + for integration of a function over the surface of a unit sphere. + + Parameters + ---------- + n : int + Quadrature order. See Notes for supported values. + + Returns + ------- + x : ndarray of shape ``(3, m)`` + Sample points on the unit sphere in Cartesian coordinates. + ``m`` is the "degree" corresponding with the specified order; see Notes. + w : ndarray of shape ``(m,)`` + Weights + + Notes + ----- + Implemented by translating the Matlab code of [2]_ to Python. + + The available orders (argument `n`) are:: + + 3, 5, 7, 9, 11, 13, 15, 17, + 19, 21, 23, 25, 27, 29, 31, 35, + 41, 47, 53, 59, 65, 71, 77, 83, + 89, 95, 101, 107, 113, 119, 125, 131 + + The corresponding degrees ``m`` are:: + + 6, 14, 26, 38, 50, 74, 86, 110, + 146, 170, 194, 230, 266, 302, 350, 434, + 590, 770, 974, 1202, 1454, 1730, 2030, 2354, + 2702, 3074, 3470, 3890, 4334, 4802, 5294, 5810 + + References + ---------- + .. [1] V.I. Lebedev, and D.N. Laikov. "A quadrature formula for the sphere of + the 131st algebraic order of accuracy". Doklady Mathematics, Vol. 59, + No. 3, 1999, pp. 477-481. + .. [2] R. Parrish. ``getLebedevSphere``. Matlab Central File Exchange. + https://www.mathworks.com/matlabcentral/fileexchange/27097-getlebedevsphere. + .. [3] Bellet, Jean-Baptiste, Matthieu Brachet, and Jean-Pierre Croisille. + "Quadrature and symmetry on the Cubed Sphere." Journal of Computational and + Applied Mathematics 409 (2022): 114142. :doi:`10.1016/j.cam.2022.114142`. + + Examples + -------- + An example given in [3]_ is integration of :math:`f(x, y, z) = \exp(x)` over a + sphere of radius :math:`1`; the reference there is ``14.7680137457653``. + Show the convergence to the expected result as the order increases: + + >>> import matplotlib.pyplot as plt + >>> import numpy as np + >>> from scipy.integrate import lebedev_rule + >>> + >>> def f(x): + ... return np.exp(x[0]) + >>> + >>> res = [] + >>> orders = np.arange(3, 20, 2) + >>> for n in orders: + ... x, w = lebedev_rule(n) + ... res.append(w @ f(x)) + >>> + >>> ref = np.full_like(res, 14.7680137457653) + >>> err = abs(res - ref)/abs(ref) + >>> plt.semilogy(orders, err) + >>> plt.xlabel('order $n$') + >>> plt.ylabel('relative error') + >>> plt.title(r'Convergence for $f(x, y, z) = \exp(x)$') + >>> plt.show() + + """ + degree = [6, 14, 26, 38, 50, 74, 86, 110, 146, 170, 194, 230, 266, 302, 350, + 434, 590, 770, 974, 1202, 1454, 1730, 2030, 2354, 2702, 3074, 3470, + 3890, 4334, 4802, 5294, 5810] + order = [3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 35, 41, 47, + 53, 59, 65, 71, 77, 83, 89, 95, 101, 107, 113, 119, 125, 131] + order_degree_map = dict(zip(order, degree)) + + if n not in order_degree_map: + message = f"Order {n=} not available. Available orders are {order}." + raise NotImplementedError(message) + + degree = order_degree_map[n] + res = get_lebedev_sphere(degree) + x = np.stack((res.x, res.y, res.z)) + w = res.w + + return x, w diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_lsoda.cpython-310-x86_64-linux-gnu.so b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_lsoda.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..a95d0b1f63defff00d27bb235f8208fd1426fcb8 --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_lsoda.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2b386368734a8b4c0c067a7e7a560a3f520121abd1a7f3b0d5307f7ce3ba8714 +size 516865 diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ode.py b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ode.py new file mode 100644 index 0000000000000000000000000000000000000000..72d9da2495da768753f45796e8df1996cd70d382 --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_ode.py @@ -0,0 +1,1388 @@ +# Authors: Pearu Peterson, Pauli Virtanen, John Travers +""" +First-order ODE integrators. + +User-friendly interface to various numerical integrators for solving a +system of first order ODEs with prescribed initial conditions:: + + d y(t)[i] + --------- = f(t,y(t))[i], + d t + + y(t=0)[i] = y0[i], + +where:: + + i = 0, ..., len(y0) - 1 + +class ode +--------- + +A generic interface class to numeric integrators. It has the following +methods:: + + integrator = ode(f, jac=None) + integrator = integrator.set_integrator(name, **params) + integrator = integrator.set_initial_value(y0, t0=0.0) + integrator = integrator.set_f_params(*args) + integrator = integrator.set_jac_params(*args) + y1 = integrator.integrate(t1, step=False, relax=False) + flag = integrator.successful() + +class complex_ode +----------------- + +This class has the same generic interface as ode, except it can handle complex +f, y and Jacobians by transparently translating them into the equivalent +real-valued system. It supports the real-valued solvers (i.e., not zvode) and is +an alternative to ode with the zvode solver, sometimes performing better. +""" +# XXX: Integrators must have: +# =========================== +# cvode - C version of vode and vodpk with many improvements. +# Get it from http://www.netlib.org/ode/cvode.tar.gz. +# To wrap cvode to Python, one must write the extension module by +# hand. Its interface is too much 'advanced C' that using f2py +# would be too complicated (or impossible). +# +# How to define a new integrator: +# =============================== +# +# class myodeint(IntegratorBase): +# +# runner = or None +# +# def __init__(self,...): # required +# +# +# def reset(self,n,has_jac): # optional +# # n - the size of the problem (number of equations) +# # has_jac - whether user has supplied its own routine for Jacobian +# +# +# def run(self,f,jac,y0,t0,t1,f_params,jac_params): # required +# # this method is called to integrate from t=t0 to t=t1 +# # with initial condition y0. f and jac are user-supplied functions +# # that define the problem. f_params,jac_params are additional +# # arguments +# # to these functions. +# +# if : +# self.success = 0 +# return t1,y1 +# +# # In addition, one can define step() and run_relax() methods (they +# # take the same arguments as run()) if the integrator can support +# # these features (see IntegratorBase doc strings). +# +# if myodeint.runner: +# IntegratorBase.integrator_classes.append(myodeint) + +__all__ = ['ode', 'complex_ode'] + +import re +import threading +import warnings + +from numpy import asarray, array, zeros, isscalar, real, imag, vstack + +from . import _vode +from . import _dop +from . import _lsoda + + +_dop_int_dtype = _dop.types.intvar.dtype +_vode_int_dtype = _vode.types.intvar.dtype +_lsoda_int_dtype = _lsoda.types.intvar.dtype + + +# lsoda, vode and zvode are not thread-safe. VODE_LOCK protects both vode and +# zvode; they share the `def run` implementation +LSODA_LOCK = threading.Lock() +VODE_LOCK = threading.Lock() + + +# ------------------------------------------------------------------------------ +# User interface +# ------------------------------------------------------------------------------ + + +class ode: + """ + A generic interface class to numeric integrators. + + Solve an equation system :math:`y'(t) = f(t,y)` with (optional) ``jac = df/dy``. + + *Note*: The first two arguments of ``f(t, y, ...)`` are in the + opposite order of the arguments in the system definition function used + by `scipy.integrate.odeint`. + + Parameters + ---------- + f : callable ``f(t, y, *f_args)`` + Right-hand side of the differential equation. t is a scalar, + ``y.shape == (n,)``. + ``f_args`` is set by calling ``set_f_params(*args)``. + `f` should return a scalar, array or list (not a tuple). + jac : callable ``jac(t, y, *jac_args)``, optional + Jacobian of the right-hand side, ``jac[i,j] = d f[i] / d y[j]``. + ``jac_args`` is set by calling ``set_jac_params(*args)``. + + Attributes + ---------- + t : float + Current time. + y : ndarray + Current variable values. + + See also + -------- + odeint : an integrator with a simpler interface based on lsoda from ODEPACK + quad : for finding the area under a curve + + Notes + ----- + Available integrators are listed below. They can be selected using + the `set_integrator` method. + + "vode" + + Real-valued Variable-coefficient Ordinary Differential Equation + solver, with fixed-leading-coefficient implementation. It provides + implicit Adams method (for non-stiff problems) and a method based on + backward differentiation formulas (BDF) (for stiff problems). + + Source: http://www.netlib.org/ode/vode.f + + .. warning:: + + This integrator is not re-entrant. You cannot have two `ode` + instances using the "vode" integrator at the same time. + + This integrator accepts the following parameters in `set_integrator` + method of the `ode` class: + + - atol : float or sequence + absolute tolerance for solution + - rtol : float or sequence + relative tolerance for solution + - lband : None or int + - uband : None or int + Jacobian band width, jac[i,j] != 0 for i-lband <= j <= i+uband. + Setting these requires your jac routine to return the jacobian + in packed format, jac_packed[i-j+uband, j] = jac[i,j]. The + dimension of the matrix must be (lband+uband+1, len(y)). + - method: 'adams' or 'bdf' + Which solver to use, Adams (non-stiff) or BDF (stiff) + - with_jacobian : bool + This option is only considered when the user has not supplied a + Jacobian function and has not indicated (by setting either band) + that the Jacobian is banded. In this case, `with_jacobian` specifies + whether the iteration method of the ODE solver's correction step is + chord iteration with an internally generated full Jacobian or + functional iteration with no Jacobian. + - nsteps : int + Maximum number of (internally defined) steps allowed during one + call to the solver. + - first_step : float + - min_step : float + - max_step : float + Limits for the step sizes used by the integrator. + - order : int + Maximum order used by the integrator, + order <= 12 for Adams, <= 5 for BDF. + + "zvode" + + Complex-valued Variable-coefficient Ordinary Differential Equation + solver, with fixed-leading-coefficient implementation. It provides + implicit Adams method (for non-stiff problems) and a method based on + backward differentiation formulas (BDF) (for stiff problems). + + Source: http://www.netlib.org/ode/zvode.f + + .. warning:: + + This integrator is not re-entrant. You cannot have two `ode` + instances using the "zvode" integrator at the same time. + + This integrator accepts the same parameters in `set_integrator` + as the "vode" solver. + + .. note:: + + When using ZVODE for a stiff system, it should only be used for + the case in which the function f is analytic, that is, when each f(i) + is an analytic function of each y(j). Analyticity means that the + partial derivative df(i)/dy(j) is a unique complex number, and this + fact is critical in the way ZVODE solves the dense or banded linear + systems that arise in the stiff case. For a complex stiff ODE system + in which f is not analytic, ZVODE is likely to have convergence + failures, and for this problem one should instead use DVODE on the + equivalent real system (in the real and imaginary parts of y). + + "lsoda" + + Real-valued Variable-coefficient Ordinary Differential Equation + solver, with fixed-leading-coefficient implementation. It provides + automatic method switching between implicit Adams method (for non-stiff + problems) and a method based on backward differentiation formulas (BDF) + (for stiff problems). + + Source: http://www.netlib.org/odepack + + .. warning:: + + This integrator is not re-entrant. You cannot have two `ode` + instances using the "lsoda" integrator at the same time. + + This integrator accepts the following parameters in `set_integrator` + method of the `ode` class: + + - atol : float or sequence + absolute tolerance for solution + - rtol : float or sequence + relative tolerance for solution + - lband : None or int + - uband : None or int + Jacobian band width, jac[i,j] != 0 for i-lband <= j <= i+uband. + Setting these requires your jac routine to return the jacobian + in packed format, jac_packed[i-j+uband, j] = jac[i,j]. + - with_jacobian : bool + *Not used.* + - nsteps : int + Maximum number of (internally defined) steps allowed during one + call to the solver. + - first_step : float + - min_step : float + - max_step : float + Limits for the step sizes used by the integrator. + - max_order_ns : int + Maximum order used in the nonstiff case (default 12). + - max_order_s : int + Maximum order used in the stiff case (default 5). + - max_hnil : int + Maximum number of messages reporting too small step size (t + h = t) + (default 0) + - ixpr : int + Whether to generate extra printing at method switches (default False). + + "dopri5" + + This is an explicit runge-kutta method of order (4)5 due to Dormand & + Prince (with stepsize control and dense output). + + Authors: + + E. Hairer and G. Wanner + Universite de Geneve, Dept. de Mathematiques + CH-1211 Geneve 24, Switzerland + e-mail: ernst.hairer@math.unige.ch, gerhard.wanner@math.unige.ch + + This code is described in [HNW93]_. + + This integrator accepts the following parameters in set_integrator() + method of the ode class: + + - atol : float or sequence + absolute tolerance for solution + - rtol : float or sequence + relative tolerance for solution + - nsteps : int + Maximum number of (internally defined) steps allowed during one + call to the solver. + - first_step : float + - max_step : float + - safety : float + Safety factor on new step selection (default 0.9) + - ifactor : float + - dfactor : float + Maximum factor to increase/decrease step size by in one step + - beta : float + Beta parameter for stabilised step size control. + - verbosity : int + Switch for printing messages (< 0 for no messages). + + "dop853" + + This is an explicit runge-kutta method of order 8(5,3) due to Dormand + & Prince (with stepsize control and dense output). + + Options and references the same as "dopri5". + + Examples + -------- + + A problem to integrate and the corresponding jacobian: + + >>> from scipy.integrate import ode + >>> + >>> y0, t0 = [1.0j, 2.0], 0 + >>> + >>> def f(t, y, arg1): + ... return [1j*arg1*y[0] + y[1], -arg1*y[1]**2] + >>> def jac(t, y, arg1): + ... return [[1j*arg1, 1], [0, -arg1*2*y[1]]] + + The integration: + + >>> r = ode(f, jac).set_integrator('zvode', method='bdf') + >>> r.set_initial_value(y0, t0).set_f_params(2.0).set_jac_params(2.0) + >>> t1 = 10 + >>> dt = 1 + >>> while r.successful() and r.t < t1: + ... print(r.t+dt, r.integrate(r.t+dt)) + 1 [-0.71038232+0.23749653j 0.40000271+0.j ] + 2.0 [0.19098503-0.52359246j 0.22222356+0.j ] + 3.0 [0.47153208+0.52701229j 0.15384681+0.j ] + 4.0 [-0.61905937+0.30726255j 0.11764744+0.j ] + 5.0 [0.02340997-0.61418799j 0.09523835+0.j ] + 6.0 [0.58643071+0.339819j 0.08000018+0.j ] + 7.0 [-0.52070105+0.44525141j 0.06896565+0.j ] + 8.0 [-0.15986733-0.61234476j 0.06060616+0.j ] + 9.0 [0.64850462+0.15048982j 0.05405414+0.j ] + 10.0 [-0.38404699+0.56382299j 0.04878055+0.j ] + + References + ---------- + .. [HNW93] E. Hairer, S.P. Norsett and G. Wanner, Solving Ordinary + Differential Equations i. Nonstiff Problems. 2nd edition. + Springer Series in Computational Mathematics, + Springer-Verlag (1993) + + """ + + def __init__(self, f, jac=None): + self.stiff = 0 + self.f = f + self.jac = jac + self.f_params = () + self.jac_params = () + self._y = [] + + @property + def y(self): + return self._y + + def set_initial_value(self, y, t=0.0): + """Set initial conditions y(t) = y.""" + if isscalar(y): + y = [y] + n_prev = len(self._y) + if not n_prev: + self.set_integrator('') # find first available integrator + self._y = asarray(y, self._integrator.scalar) + self.t = t + self._integrator.reset(len(self._y), self.jac is not None) + return self + + def set_integrator(self, name, **integrator_params): + """ + Set integrator by name. + + Parameters + ---------- + name : str + Name of the integrator. + **integrator_params + Additional parameters for the integrator. + """ + integrator = find_integrator(name) + if integrator is None: + # FIXME: this really should be raise an exception. Will that break + # any code? + message = f'No integrator name match with {name!r} or is not available.' + warnings.warn(message, stacklevel=2) + else: + self._integrator = integrator(**integrator_params) + if not len(self._y): + self.t = 0.0 + self._y = array([0.0], self._integrator.scalar) + self._integrator.reset(len(self._y), self.jac is not None) + return self + + def integrate(self, t, step=False, relax=False): + """Find y=y(t), set y as an initial condition, and return y. + + Parameters + ---------- + t : float + The endpoint of the integration step. + step : bool + If True, and if the integrator supports the step method, + then perform a single integration step and return. + This parameter is provided in order to expose internals of + the implementation, and should not be changed from its default + value in most cases. + relax : bool + If True and if the integrator supports the run_relax method, + then integrate until t_1 >= t and return. ``relax`` is not + referenced if ``step=True``. + This parameter is provided in order to expose internals of + the implementation, and should not be changed from its default + value in most cases. + + Returns + ------- + y : float + The integrated value at t + """ + if step and self._integrator.supports_step: + mth = self._integrator.step + elif relax and self._integrator.supports_run_relax: + mth = self._integrator.run_relax + else: + mth = self._integrator.run + + try: + self._y, self.t = mth(self.f, self.jac or (lambda: None), + self._y, self.t, t, + self.f_params, self.jac_params) + except SystemError as e: + # f2py issue with tuple returns, see ticket 1187. + raise ValueError( + 'Function to integrate must not return a tuple.' + ) from e + + return self._y + + def successful(self): + """Check if integration was successful.""" + try: + self._integrator + except AttributeError: + self.set_integrator('') + return self._integrator.success == 1 + + def get_return_code(self): + """Extracts the return code for the integration to enable better control + if the integration fails. + + In general, a return code > 0 implies success, while a return code < 0 + implies failure. + + Notes + ----- + This section describes possible return codes and their meaning, for available + integrators that can be selected by `set_integrator` method. + + "vode" + + =========== ======= + Return Code Message + =========== ======= + 2 Integration successful. + -1 Excess work done on this call. (Perhaps wrong MF.) + -2 Excess accuracy requested. (Tolerances too small.) + -3 Illegal input detected. (See printed message.) + -4 Repeated error test failures. (Check all input.) + -5 Repeated convergence failures. (Perhaps bad Jacobian + supplied or wrong choice of MF or tolerances.) + -6 Error weight became zero during problem. (Solution + component i vanished, and ATOL or ATOL(i) = 0.) + =========== ======= + + "zvode" + + =========== ======= + Return Code Message + =========== ======= + 2 Integration successful. + -1 Excess work done on this call. (Perhaps wrong MF.) + -2 Excess accuracy requested. (Tolerances too small.) + -3 Illegal input detected. (See printed message.) + -4 Repeated error test failures. (Check all input.) + -5 Repeated convergence failures. (Perhaps bad Jacobian + supplied or wrong choice of MF or tolerances.) + -6 Error weight became zero during problem. (Solution + component i vanished, and ATOL or ATOL(i) = 0.) + =========== ======= + + "dopri5" + + =========== ======= + Return Code Message + =========== ======= + 1 Integration successful. + 2 Integration successful (interrupted by solout). + -1 Input is not consistent. + -2 Larger nsteps is needed. + -3 Step size becomes too small. + -4 Problem is probably stiff (interrupted). + =========== ======= + + "dop853" + + =========== ======= + Return Code Message + =========== ======= + 1 Integration successful. + 2 Integration successful (interrupted by solout). + -1 Input is not consistent. + -2 Larger nsteps is needed. + -3 Step size becomes too small. + -4 Problem is probably stiff (interrupted). + =========== ======= + + "lsoda" + + =========== ======= + Return Code Message + =========== ======= + 2 Integration successful. + -1 Excess work done on this call (perhaps wrong Dfun type). + -2 Excess accuracy requested (tolerances too small). + -3 Illegal input detected (internal error). + -4 Repeated error test failures (internal error). + -5 Repeated convergence failures (perhaps bad Jacobian or tolerances). + -6 Error weight became zero during problem. + -7 Internal workspace insufficient to finish (internal error). + =========== ======= + """ + try: + self._integrator + except AttributeError: + self.set_integrator('') + return self._integrator.istate + + def set_f_params(self, *args): + """Set extra parameters for user-supplied function f.""" + self.f_params = args + return self + + def set_jac_params(self, *args): + """Set extra parameters for user-supplied function jac.""" + self.jac_params = args + return self + + def set_solout(self, solout): + """ + Set callable to be called at every successful integration step. + + Parameters + ---------- + solout : callable + ``solout(t, y)`` is called at each internal integrator step, + t is a scalar providing the current independent position + y is the current solution ``y.shape == (n,)`` + solout should return -1 to stop integration + otherwise it should return None or 0 + + """ + if self._integrator.supports_solout: + self._integrator.set_solout(solout) + if self._y is not None: + self._integrator.reset(len(self._y), self.jac is not None) + else: + raise ValueError("selected integrator does not support solout," + " choose another one") + + +def _transform_banded_jac(bjac): + """ + Convert a real matrix of the form (for example) + + [0 0 A B] [0 0 0 B] + [0 0 C D] [0 0 A D] + [E F G H] to [0 F C H] + [I J K L] [E J G L] + [I 0 K 0] + + That is, every other column is shifted up one. + """ + # Shift every other column. + newjac = zeros((bjac.shape[0] + 1, bjac.shape[1])) + newjac[1:, ::2] = bjac[:, ::2] + newjac[:-1, 1::2] = bjac[:, 1::2] + return newjac + + +class complex_ode(ode): + """ + A wrapper of ode for complex systems. + + This functions similarly as `ode`, but re-maps a complex-valued + equation system to a real-valued one before using the integrators. + + Parameters + ---------- + f : callable ``f(t, y, *f_args)`` + Rhs of the equation. t is a scalar, ``y.shape == (n,)``. + ``f_args`` is set by calling ``set_f_params(*args)``. + jac : callable ``jac(t, y, *jac_args)`` + Jacobian of the rhs, ``jac[i,j] = d f[i] / d y[j]``. + ``jac_args`` is set by calling ``set_f_params(*args)``. + + Attributes + ---------- + t : float + Current time. + y : ndarray + Current variable values. + + Examples + -------- + For usage examples, see `ode`. + + """ + + def __init__(self, f, jac=None): + self.cf = f + self.cjac = jac + if jac is None: + ode.__init__(self, self._wrap, None) + else: + ode.__init__(self, self._wrap, self._wrap_jac) + + def _wrap(self, t, y, *f_args): + f = self.cf(*((t, y[::2] + 1j * y[1::2]) + f_args)) + # self.tmp is a real-valued array containing the interleaved + # real and imaginary parts of f. + self.tmp[::2] = real(f) + self.tmp[1::2] = imag(f) + return self.tmp + + def _wrap_jac(self, t, y, *jac_args): + # jac is the complex Jacobian computed by the user-defined function. + jac = self.cjac(*((t, y[::2] + 1j * y[1::2]) + jac_args)) + + # jac_tmp is the real version of the complex Jacobian. Each complex + # entry in jac, say 2+3j, becomes a 2x2 block of the form + # [2 -3] + # [3 2] + jac_tmp = zeros((2 * jac.shape[0], 2 * jac.shape[1])) + jac_tmp[1::2, 1::2] = jac_tmp[::2, ::2] = real(jac) + jac_tmp[1::2, ::2] = imag(jac) + jac_tmp[::2, 1::2] = -jac_tmp[1::2, ::2] + + ml = getattr(self._integrator, 'ml', None) + mu = getattr(self._integrator, 'mu', None) + if ml is not None or mu is not None: + # Jacobian is banded. The user's Jacobian function has computed + # the complex Jacobian in packed format. The corresponding + # real-valued version has every other column shifted up. + jac_tmp = _transform_banded_jac(jac_tmp) + + return jac_tmp + + @property + def y(self): + return self._y[::2] + 1j * self._y[1::2] + + def set_integrator(self, name, **integrator_params): + """ + Set integrator by name. + + Parameters + ---------- + name : str + Name of the integrator + **integrator_params + Additional parameters for the integrator. + """ + if name == 'zvode': + raise ValueError("zvode must be used with ode, not complex_ode") + + lband = integrator_params.get('lband') + uband = integrator_params.get('uband') + if lband is not None or uband is not None: + # The Jacobian is banded. Override the user-supplied bandwidths + # (which are for the complex Jacobian) with the bandwidths of + # the corresponding real-valued Jacobian wrapper of the complex + # Jacobian. + integrator_params['lband'] = 2 * (lband or 0) + 1 + integrator_params['uband'] = 2 * (uband or 0) + 1 + + return ode.set_integrator(self, name, **integrator_params) + + def set_initial_value(self, y, t=0.0): + """Set initial conditions y(t) = y.""" + y = asarray(y) + self.tmp = zeros(y.size * 2, 'float') + self.tmp[::2] = real(y) + self.tmp[1::2] = imag(y) + return ode.set_initial_value(self, self.tmp, t) + + def integrate(self, t, step=False, relax=False): + """Find y=y(t), set y as an initial condition, and return y. + + Parameters + ---------- + t : float + The endpoint of the integration step. + step : bool + If True, and if the integrator supports the step method, + then perform a single integration step and return. + This parameter is provided in order to expose internals of + the implementation, and should not be changed from its default + value in most cases. + relax : bool + If True and if the integrator supports the run_relax method, + then integrate until t_1 >= t and return. ``relax`` is not + referenced if ``step=True``. + This parameter is provided in order to expose internals of + the implementation, and should not be changed from its default + value in most cases. + + Returns + ------- + y : float + The integrated value at t + """ + y = ode.integrate(self, t, step, relax) + return y[::2] + 1j * y[1::2] + + def set_solout(self, solout): + """ + Set callable to be called at every successful integration step. + + Parameters + ---------- + solout : callable + ``solout(t, y)`` is called at each internal integrator step, + t is a scalar providing the current independent position + y is the current solution ``y.shape == (n,)`` + solout should return -1 to stop integration + otherwise it should return None or 0 + + """ + if self._integrator.supports_solout: + self._integrator.set_solout(solout, complex=True) + else: + raise TypeError("selected integrator does not support solouta, " + "choose another one") + + +# ------------------------------------------------------------------------------ +# ODE integrators +# ------------------------------------------------------------------------------ + +def find_integrator(name): + for cl in IntegratorBase.integrator_classes: + if re.match(name, cl.__name__, re.I): + return cl + return None + + +class IntegratorConcurrencyError(RuntimeError): + """ + Failure due to concurrent usage of an integrator that can be used + only for a single problem at a time. + + """ + + def __init__(self, name): + msg = (f"Integrator `{name}` can be used to solve only a single problem " + "at a time. If you want to integrate multiple problems, " + "consider using a different integrator (see `ode.set_integrator`)") + RuntimeError.__init__(self, msg) + + +class IntegratorBase: + runner = None # runner is None => integrator is not available + success = None # success==1 if integrator was called successfully + istate = None # istate > 0 means success, istate < 0 means failure + supports_run_relax = None + supports_step = None + supports_solout = False + integrator_classes = [] + scalar = float + + def acquire_new_handle(self): + # Some of the integrators have internal state (ancient + # Fortran...), and so only one instance can use them at a time. + # We keep track of this, and fail when concurrent usage is tried. + self.__class__.active_global_handle += 1 + self.handle = self.__class__.active_global_handle + + def check_handle(self): + if self.handle is not self.__class__.active_global_handle: + raise IntegratorConcurrencyError(self.__class__.__name__) + + def reset(self, n, has_jac): + """Prepare integrator for call: allocate memory, set flags, etc. + n - number of equations. + has_jac - if user has supplied function for evaluating Jacobian. + """ + + def run(self, f, jac, y0, t0, t1, f_params, jac_params): + """Integrate from t=t0 to t=t1 using y0 as an initial condition. + Return 2-tuple (y1,t1) where y1 is the result and t=t1 + defines the stoppage coordinate of the result. + """ + raise NotImplementedError('all integrators must define ' + 'run(f, jac, t0, t1, y0, f_params, jac_params)') + + def step(self, f, jac, y0, t0, t1, f_params, jac_params): + """Make one integration step and return (y1,t1).""" + raise NotImplementedError(f'{self.__class__.__name__} ' + 'does not support step() method') + + def run_relax(self, f, jac, y0, t0, t1, f_params, jac_params): + """Integrate from t=t0 to t>=t1 and return (y1,t).""" + raise NotImplementedError(f'{self.__class__.__name__} ' + 'does not support run_relax() method') + + # XXX: __str__ method for getting visual state of the integrator + + +def _vode_banded_jac_wrapper(jacfunc, ml, jac_params): + """ + Wrap a banded Jacobian function with a function that pads + the Jacobian with `ml` rows of zeros. + """ + + def jac_wrapper(t, y): + jac = asarray(jacfunc(t, y, *jac_params)) + padded_jac = vstack((jac, zeros((ml, jac.shape[1])))) + return padded_jac + + return jac_wrapper + + +class vode(IntegratorBase): + runner = getattr(_vode, 'dvode', None) + + messages = {-1: 'Excess work done on this call. (Perhaps wrong MF.)', + -2: 'Excess accuracy requested. (Tolerances too small.)', + -3: 'Illegal input detected. (See printed message.)', + -4: 'Repeated error test failures. (Check all input.)', + -5: 'Repeated convergence failures. (Perhaps bad' + ' Jacobian supplied or wrong choice of MF or tolerances.)', + -6: 'Error weight became zero during problem. (Solution' + ' component i vanished, and ATOL or ATOL(i) = 0.)' + } + supports_run_relax = 1 + supports_step = 1 + active_global_handle = 0 + + def __init__(self, + method='adams', + with_jacobian=False, + rtol=1e-6, atol=1e-12, + lband=None, uband=None, + order=12, + nsteps=500, + max_step=0.0, # corresponds to infinite + min_step=0.0, + first_step=0.0, # determined by solver + ): + + if re.match(method, r'adams', re.I): + self.meth = 1 + elif re.match(method, r'bdf', re.I): + self.meth = 2 + else: + raise ValueError(f'Unknown integration method {method}') + self.with_jacobian = with_jacobian + self.rtol = rtol + self.atol = atol + self.mu = uband + self.ml = lband + + self.order = order + self.nsteps = nsteps + self.max_step = max_step + self.min_step = min_step + self.first_step = first_step + self.success = 1 + + self.initialized = False + + def _determine_mf_and_set_bands(self, has_jac): + """ + Determine the `MF` parameter (Method Flag) for the Fortran subroutine `dvode`. + + In the Fortran code, the legal values of `MF` are: + 10, 11, 12, 13, 14, 15, 20, 21, 22, 23, 24, 25, + -11, -12, -14, -15, -21, -22, -24, -25 + but this Python wrapper does not use negative values. + + Returns + + mf = 10*self.meth + miter + + self.meth is the linear multistep method: + self.meth == 1: method="adams" + self.meth == 2: method="bdf" + + miter is the correction iteration method: + miter == 0: Functional iteration; no Jacobian involved. + miter == 1: Chord iteration with user-supplied full Jacobian. + miter == 2: Chord iteration with internally computed full Jacobian. + miter == 3: Chord iteration with internally computed diagonal Jacobian. + miter == 4: Chord iteration with user-supplied banded Jacobian. + miter == 5: Chord iteration with internally computed banded Jacobian. + + Side effects: If either self.mu or self.ml is not None and the other is None, + then the one that is None is set to 0. + """ + + jac_is_banded = self.mu is not None or self.ml is not None + if jac_is_banded: + if self.mu is None: + self.mu = 0 + if self.ml is None: + self.ml = 0 + + # has_jac is True if the user provided a Jacobian function. + if has_jac: + if jac_is_banded: + miter = 4 + else: + miter = 1 + else: + if jac_is_banded: + if self.ml == self.mu == 0: + miter = 3 # Chord iteration with internal diagonal Jacobian. + else: + miter = 5 # Chord iteration with internal banded Jacobian. + else: + # self.with_jacobian is set by the user in + # the call to ode.set_integrator. + if self.with_jacobian: + miter = 2 # Chord iteration with internal full Jacobian. + else: + miter = 0 # Functional iteration; no Jacobian involved. + + mf = 10 * self.meth + miter + return mf + + def reset(self, n, has_jac): + mf = self._determine_mf_and_set_bands(has_jac) + + if mf == 10: + lrw = 20 + 16 * n + elif mf in [11, 12]: + lrw = 22 + 16 * n + 2 * n * n + elif mf == 13: + lrw = 22 + 17 * n + elif mf in [14, 15]: + lrw = 22 + 18 * n + (3 * self.ml + 2 * self.mu) * n + elif mf == 20: + lrw = 20 + 9 * n + elif mf in [21, 22]: + lrw = 22 + 9 * n + 2 * n * n + elif mf == 23: + lrw = 22 + 10 * n + elif mf in [24, 25]: + lrw = 22 + 11 * n + (3 * self.ml + 2 * self.mu) * n + else: + raise ValueError(f'Unexpected mf={mf}') + + if mf % 10 in [0, 3]: + liw = 30 + else: + liw = 30 + n + + rwork = zeros((lrw,), float) + rwork[4] = self.first_step + rwork[5] = self.max_step + rwork[6] = self.min_step + self.rwork = rwork + + iwork = zeros((liw,), _vode_int_dtype) + if self.ml is not None: + iwork[0] = self.ml + if self.mu is not None: + iwork[1] = self.mu + iwork[4] = self.order + iwork[5] = self.nsteps + iwork[6] = 2 # mxhnil + self.iwork = iwork + + self.call_args = [self.rtol, self.atol, 1, 1, + self.rwork, self.iwork, mf] + self.success = 1 + self.initialized = False + + def run(self, f, jac, y0, t0, t1, f_params, jac_params): + if self.initialized: + self.check_handle() + else: + self.initialized = True + self.acquire_new_handle() + + if self.ml is not None and self.ml > 0: + # Banded Jacobian. Wrap the user-provided function with one + # that pads the Jacobian array with the extra `self.ml` rows + # required by the f2py-generated wrapper. + jac = _vode_banded_jac_wrapper(jac, self.ml, jac_params) + + args = ((f, jac, y0, t0, t1) + tuple(self.call_args) + + (f_params, jac_params)) + + with VODE_LOCK: + y1, t, istate = self.runner(*args) + + self.istate = istate + if istate < 0: + unexpected_istate_msg = f'Unexpected istate={istate:d}' + warnings.warn(f'{self.__class__.__name__:s}: ' + f'{self.messages.get(istate, unexpected_istate_msg):s}', + stacklevel=2) + self.success = 0 + else: + self.call_args[3] = 2 # upgrade istate from 1 to 2 + self.istate = 2 + return y1, t + + def step(self, *args): + itask = self.call_args[2] + self.call_args[2] = 2 + r = self.run(*args) + self.call_args[2] = itask + return r + + def run_relax(self, *args): + itask = self.call_args[2] + self.call_args[2] = 3 + r = self.run(*args) + self.call_args[2] = itask + return r + + +if vode.runner is not None: + IntegratorBase.integrator_classes.append(vode) + + +class zvode(vode): + runner = getattr(_vode, 'zvode', None) + + supports_run_relax = 1 + supports_step = 1 + scalar = complex + active_global_handle = 0 + + def reset(self, n, has_jac): + mf = self._determine_mf_and_set_bands(has_jac) + + if mf in (10,): + lzw = 15 * n + elif mf in (11, 12): + lzw = 15 * n + 2 * n ** 2 + elif mf in (-11, -12): + lzw = 15 * n + n ** 2 + elif mf in (13,): + lzw = 16 * n + elif mf in (14, 15): + lzw = 17 * n + (3 * self.ml + 2 * self.mu) * n + elif mf in (-14, -15): + lzw = 16 * n + (2 * self.ml + self.mu) * n + elif mf in (20,): + lzw = 8 * n + elif mf in (21, 22): + lzw = 8 * n + 2 * n ** 2 + elif mf in (-21, -22): + lzw = 8 * n + n ** 2 + elif mf in (23,): + lzw = 9 * n + elif mf in (24, 25): + lzw = 10 * n + (3 * self.ml + 2 * self.mu) * n + elif mf in (-24, -25): + lzw = 9 * n + (2 * self.ml + self.mu) * n + + lrw = 20 + n + + if mf % 10 in (0, 3): + liw = 30 + else: + liw = 30 + n + + zwork = zeros((lzw,), complex) + self.zwork = zwork + + rwork = zeros((lrw,), float) + rwork[4] = self.first_step + rwork[5] = self.max_step + rwork[6] = self.min_step + self.rwork = rwork + + iwork = zeros((liw,), _vode_int_dtype) + if self.ml is not None: + iwork[0] = self.ml + if self.mu is not None: + iwork[1] = self.mu + iwork[4] = self.order + iwork[5] = self.nsteps + iwork[6] = 2 # mxhnil + self.iwork = iwork + + self.call_args = [self.rtol, self.atol, 1, 1, + self.zwork, self.rwork, self.iwork, mf] + self.success = 1 + self.initialized = False + + +if zvode.runner is not None: + IntegratorBase.integrator_classes.append(zvode) + + +class dopri5(IntegratorBase): + runner = getattr(_dop, 'dopri5', None) + name = 'dopri5' + supports_solout = True + + messages = {1: 'computation successful', + 2: 'computation successful (interrupted by solout)', + -1: 'input is not consistent', + -2: 'larger nsteps is needed', + -3: 'step size becomes too small', + -4: 'problem is probably stiff (interrupted)', + } + + def __init__(self, + rtol=1e-6, atol=1e-12, + nsteps=500, + max_step=0.0, + first_step=0.0, # determined by solver + safety=0.9, + ifactor=10.0, + dfactor=0.2, + beta=0.0, + method=None, + verbosity=-1, # no messages if negative + ): + self.rtol = rtol + self.atol = atol + self.nsteps = nsteps + self.max_step = max_step + self.first_step = first_step + self.safety = safety + self.ifactor = ifactor + self.dfactor = dfactor + self.beta = beta + self.verbosity = verbosity + self.success = 1 + self.set_solout(None) + + def set_solout(self, solout, complex=False): + self.solout = solout + self.solout_cmplx = complex + if solout is None: + self.iout = 0 + else: + self.iout = 1 + + def reset(self, n, has_jac): + work = zeros((8 * n + 21,), float) + work[1] = self.safety + work[2] = self.dfactor + work[3] = self.ifactor + work[4] = self.beta + work[5] = self.max_step + work[6] = self.first_step + self.work = work + iwork = zeros((21,), _dop_int_dtype) + iwork[0] = self.nsteps + iwork[2] = self.verbosity + self.iwork = iwork + self.call_args = [self.rtol, self.atol, self._solout, + self.iout, self.work, self.iwork] + self.success = 1 + + def run(self, f, jac, y0, t0, t1, f_params, jac_params): + x, y, iwork, istate = self.runner(*((f, t0, y0, t1) + + tuple(self.call_args) + (f_params,))) + self.istate = istate + if istate < 0: + unexpected_istate_msg = f'Unexpected istate={istate:d}' + warnings.warn(f'{self.__class__.__name__:s}: ' + f'{self.messages.get(istate, unexpected_istate_msg):s}', + stacklevel=2) + self.success = 0 + return y, x + + def _solout(self, nr, xold, x, y, nd, icomp, con): + if self.solout is not None: + if self.solout_cmplx: + y = y[::2] + 1j * y[1::2] + return self.solout(x, y) + else: + return 1 + + +if dopri5.runner is not None: + IntegratorBase.integrator_classes.append(dopri5) + + +class dop853(dopri5): + runner = getattr(_dop, 'dop853', None) + name = 'dop853' + + def __init__(self, + rtol=1e-6, atol=1e-12, + nsteps=500, + max_step=0.0, + first_step=0.0, # determined by solver + safety=0.9, + ifactor=6.0, + dfactor=0.3, + beta=0.0, + method=None, + verbosity=-1, # no messages if negative + ): + super().__init__(rtol, atol, nsteps, max_step, first_step, safety, + ifactor, dfactor, beta, method, verbosity) + + def reset(self, n, has_jac): + work = zeros((11 * n + 21,), float) + work[1] = self.safety + work[2] = self.dfactor + work[3] = self.ifactor + work[4] = self.beta + work[5] = self.max_step + work[6] = self.first_step + self.work = work + iwork = zeros((21,), _dop_int_dtype) + iwork[0] = self.nsteps + iwork[2] = self.verbosity + self.iwork = iwork + self.call_args = [self.rtol, self.atol, self._solout, + self.iout, self.work, self.iwork] + self.success = 1 + + +if dop853.runner is not None: + IntegratorBase.integrator_classes.append(dop853) + + +class lsoda(IntegratorBase): + runner = getattr(_lsoda, 'lsoda', None) + active_global_handle = 0 + + messages = { + 2: "Integration successful.", + -1: "Excess work done on this call (perhaps wrong Dfun type).", + -2: "Excess accuracy requested (tolerances too small).", + -3: "Illegal input detected (internal error).", + -4: "Repeated error test failures (internal error).", + -5: "Repeated convergence failures (perhaps bad Jacobian or tolerances).", + -6: "Error weight became zero during problem.", + -7: "Internal workspace insufficient to finish (internal error)." + } + + def __init__(self, + with_jacobian=False, + rtol=1e-6, atol=1e-12, + lband=None, uband=None, + nsteps=500, + max_step=0.0, # corresponds to infinite + min_step=0.0, + first_step=0.0, # determined by solver + ixpr=0, + max_hnil=0, + max_order_ns=12, + max_order_s=5, + method=None + ): + + self.with_jacobian = with_jacobian + self.rtol = rtol + self.atol = atol + self.mu = uband + self.ml = lband + + self.max_order_ns = max_order_ns + self.max_order_s = max_order_s + self.nsteps = nsteps + self.max_step = max_step + self.min_step = min_step + self.first_step = first_step + self.ixpr = ixpr + self.max_hnil = max_hnil + self.success = 1 + + self.initialized = False + + def reset(self, n, has_jac): + # Calculate parameters for Fortran subroutine dvode. + if has_jac: + if self.mu is None and self.ml is None: + jt = 1 + else: + if self.mu is None: + self.mu = 0 + if self.ml is None: + self.ml = 0 + jt = 4 + else: + if self.mu is None and self.ml is None: + jt = 2 + else: + if self.mu is None: + self.mu = 0 + if self.ml is None: + self.ml = 0 + jt = 5 + lrn = 20 + (self.max_order_ns + 4) * n + if jt in [1, 2]: + lrs = 22 + (self.max_order_s + 4) * n + n * n + elif jt in [4, 5]: + lrs = 22 + (self.max_order_s + 5 + 2 * self.ml + self.mu) * n + else: + raise ValueError(f'Unexpected jt={jt}') + lrw = max(lrn, lrs) + liw = 20 + n + rwork = zeros((lrw,), float) + rwork[4] = self.first_step + rwork[5] = self.max_step + rwork[6] = self.min_step + self.rwork = rwork + iwork = zeros((liw,), _lsoda_int_dtype) + if self.ml is not None: + iwork[0] = self.ml + if self.mu is not None: + iwork[1] = self.mu + iwork[4] = self.ixpr + iwork[5] = self.nsteps + iwork[6] = self.max_hnil + iwork[7] = self.max_order_ns + iwork[8] = self.max_order_s + self.iwork = iwork + self.call_args = [self.rtol, self.atol, 1, 1, + self.rwork, self.iwork, jt] + self.success = 1 + self.initialized = False + + def run(self, f, jac, y0, t0, t1, f_params, jac_params): + if self.initialized: + self.check_handle() + else: + self.initialized = True + self.acquire_new_handle() + args = [f, y0, t0, t1] + self.call_args[:-1] + \ + [jac, self.call_args[-1], f_params, 0, jac_params] + + with LSODA_LOCK: + y1, t, istate = self.runner(*args) + + self.istate = istate + if istate < 0: + unexpected_istate_msg = f'Unexpected istate={istate:d}' + warnings.warn(f'{self.__class__.__name__:s}: ' + f'{self.messages.get(istate, unexpected_istate_msg):s}', + stacklevel=2) + self.success = 0 + else: + self.call_args[3] = 2 # upgrade istate from 1 to 2 + self.istate = 2 + return y1, t + + def step(self, *args): + itask = self.call_args[2] + self.call_args[2] = 2 + r = self.run(*args) + self.call_args[2] = itask + return r + + def run_relax(self, *args): + itask = self.call_args[2] + self.call_args[2] = 3 + r = self.run(*args) + self.call_args[2] = itask + return r + + +if lsoda.runner: + IntegratorBase.integrator_classes.append(lsoda) diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_odepack.cpython-310-x86_64-linux-gnu.so b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_odepack.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..3d354b1f633335682a26424d8e36a5ade7f2230c --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_odepack.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:405e393b7e8bb42de902c77d9affa7e88163141aeea18532f20bd77efc61076d +size 479121 diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_odepack_py.py b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_odepack_py.py new file mode 100644 index 0000000000000000000000000000000000000000..75dfe925b312ae609d19ccbec27927c6c945176f --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_odepack_py.py @@ -0,0 +1,273 @@ +# Author: Travis Oliphant + +__all__ = ['odeint', 'ODEintWarning'] + +import numpy as np +from . import _odepack +from copy import copy +import warnings + +from threading import Lock + + +ODE_LOCK = Lock() + + +class ODEintWarning(Warning): + """Warning raised during the execution of `odeint`.""" + pass + + +_msgs = {2: "Integration successful.", + 1: "Nothing was done; the integration time was 0.", + -1: "Excess work done on this call (perhaps wrong Dfun type).", + -2: "Excess accuracy requested (tolerances too small).", + -3: "Illegal input detected (internal error).", + -4: "Repeated error test failures (internal error).", + -5: "Repeated convergence failures (perhaps bad Jacobian or tolerances).", + -6: "Error weight became zero during problem.", + -7: "Internal workspace insufficient to finish (internal error).", + -8: "Run terminated (internal error)." + } + + +def odeint(func, y0, t, args=(), Dfun=None, col_deriv=0, full_output=0, + ml=None, mu=None, rtol=None, atol=None, tcrit=None, h0=0.0, + hmax=0.0, hmin=0.0, ixpr=0, mxstep=0, mxhnil=0, mxordn=12, + mxords=5, printmessg=0, tfirst=False): + """ + Integrate a system of ordinary differential equations. + + .. note:: For new code, use `scipy.integrate.solve_ivp` to solve a + differential equation. + + Solve a system of ordinary differential equations using lsoda from the + FORTRAN library odepack. + + Solves the initial value problem for stiff or non-stiff systems + of first order ode-s:: + + dy/dt = func(y, t, ...) [or func(t, y, ...)] + + where y can be a vector. + + .. note:: By default, the required order of the first two arguments of + `func` are in the opposite order of the arguments in the system + definition function used by the `scipy.integrate.ode` class and + the function `scipy.integrate.solve_ivp`. To use a function with + the signature ``func(t, y, ...)``, the argument `tfirst` must be + set to ``True``. + + Parameters + ---------- + func : callable(y, t, ...) or callable(t, y, ...) + Computes the derivative of y at t. + If the signature is ``callable(t, y, ...)``, then the argument + `tfirst` must be set ``True``. + `func` must not modify the data in `y`, as it is a + view of the data used internally by the ODE solver. + y0 : array + Initial condition on y (can be a vector). + t : array + A sequence of time points for which to solve for y. The initial + value point should be the first element of this sequence. + This sequence must be monotonically increasing or monotonically + decreasing; repeated values are allowed. + args : tuple, optional + Extra arguments to pass to function. + Dfun : callable(y, t, ...) or callable(t, y, ...) + Gradient (Jacobian) of `func`. + If the signature is ``callable(t, y, ...)``, then the argument + `tfirst` must be set ``True``. + `Dfun` must not modify the data in `y`, as it is a + view of the data used internally by the ODE solver. + col_deriv : bool, optional + True if `Dfun` defines derivatives down columns (faster), + otherwise `Dfun` should define derivatives across rows. + full_output : bool, optional + True if to return a dictionary of optional outputs as the second output + printmessg : bool, optional + Whether to print the convergence message + tfirst : bool, optional + If True, the first two arguments of `func` (and `Dfun`, if given) + must ``t, y`` instead of the default ``y, t``. + + .. versionadded:: 1.1.0 + + Returns + ------- + y : array, shape (len(t), len(y0)) + Array containing the value of y for each desired time in t, + with the initial value `y0` in the first row. + infodict : dict, only returned if full_output == True + Dictionary containing additional output information + + ======= ============================================================ + key meaning + ======= ============================================================ + 'hu' vector of step sizes successfully used for each time step + 'tcur' vector with the value of t reached for each time step + (will always be at least as large as the input times) + 'tolsf' vector of tolerance scale factors, greater than 1.0, + computed when a request for too much accuracy was detected + 'tsw' value of t at the time of the last method switch + (given for each time step) + 'nst' cumulative number of time steps + 'nfe' cumulative number of function evaluations for each time step + 'nje' cumulative number of jacobian evaluations for each time step + 'nqu' a vector of method orders for each successful step + 'imxer' index of the component of largest magnitude in the + weighted local error vector (e / ewt) on an error return, -1 + otherwise + 'lenrw' the length of the double work array required + 'leniw' the length of integer work array required + 'mused' a vector of method indicators for each successful time step: + 1: adams (nonstiff), 2: bdf (stiff) + ======= ============================================================ + + Other Parameters + ---------------- + ml, mu : int, optional + If either of these are not None or non-negative, then the + Jacobian is assumed to be banded. These give the number of + lower and upper non-zero diagonals in this banded matrix. + For the banded case, `Dfun` should return a matrix whose + rows contain the non-zero bands (starting with the lowest diagonal). + Thus, the return matrix `jac` from `Dfun` should have shape + ``(ml + mu + 1, len(y0))`` when ``ml >=0`` or ``mu >=0``. + The data in `jac` must be stored such that ``jac[i - j + mu, j]`` + holds the derivative of the ``i``\\ th equation with respect to the + ``j``\\ th state variable. If `col_deriv` is True, the transpose of + this `jac` must be returned. + rtol, atol : float, optional + The input parameters `rtol` and `atol` determine the error + control performed by the solver. The solver will control the + vector, e, of estimated local errors in y, according to an + inequality of the form ``max-norm of (e / ewt) <= 1``, + where ewt is a vector of positive error weights computed as + ``ewt = rtol * abs(y) + atol``. + rtol and atol can be either vectors the same length as y or scalars. + Defaults to 1.49012e-8. + tcrit : ndarray, optional + Vector of critical points (e.g., singularities) where integration + care should be taken. + h0 : float, (0: solver-determined), optional + The step size to be attempted on the first step. + hmax : float, (0: solver-determined), optional + The maximum absolute step size allowed. + hmin : float, (0: solver-determined), optional + The minimum absolute step size allowed. + ixpr : bool, optional + Whether to generate extra printing at method switches. + mxstep : int, (0: solver-determined), optional + Maximum number of (internally defined) steps allowed for each + integration point in t. + mxhnil : int, (0: solver-determined), optional + Maximum number of messages printed. + mxordn : int, (0: solver-determined), optional + Maximum order to be allowed for the non-stiff (Adams) method. + mxords : int, (0: solver-determined), optional + Maximum order to be allowed for the stiff (BDF) method. + + See Also + -------- + solve_ivp : solve an initial value problem for a system of ODEs + ode : a more object-oriented integrator based on VODE + quad : for finding the area under a curve + + Examples + -------- + The second order differential equation for the angle `theta` of a + pendulum acted on by gravity with friction can be written:: + + theta''(t) + b*theta'(t) + c*sin(theta(t)) = 0 + + where `b` and `c` are positive constants, and a prime (') denotes a + derivative. To solve this equation with `odeint`, we must first convert + it to a system of first order equations. By defining the angular + velocity ``omega(t) = theta'(t)``, we obtain the system:: + + theta'(t) = omega(t) + omega'(t) = -b*omega(t) - c*sin(theta(t)) + + Let `y` be the vector [`theta`, `omega`]. We implement this system + in Python as: + + >>> import numpy as np + >>> def pend(y, t, b, c): + ... theta, omega = y + ... dydt = [omega, -b*omega - c*np.sin(theta)] + ... return dydt + ... + + We assume the constants are `b` = 0.25 and `c` = 5.0: + + >>> b = 0.25 + >>> c = 5.0 + + For initial conditions, we assume the pendulum is nearly vertical + with `theta(0)` = `pi` - 0.1, and is initially at rest, so + `omega(0)` = 0. Then the vector of initial conditions is + + >>> y0 = [np.pi - 0.1, 0.0] + + We will generate a solution at 101 evenly spaced samples in the interval + 0 <= `t` <= 10. So our array of times is: + + >>> t = np.linspace(0, 10, 101) + + Call `odeint` to generate the solution. To pass the parameters + `b` and `c` to `pend`, we give them to `odeint` using the `args` + argument. + + >>> from scipy.integrate import odeint + >>> sol = odeint(pend, y0, t, args=(b, c)) + + The solution is an array with shape (101, 2). The first column + is `theta(t)`, and the second is `omega(t)`. The following code + plots both components. + + >>> import matplotlib.pyplot as plt + >>> plt.plot(t, sol[:, 0], 'b', label='theta(t)') + >>> plt.plot(t, sol[:, 1], 'g', label='omega(t)') + >>> plt.legend(loc='best') + >>> plt.xlabel('t') + >>> plt.grid() + >>> plt.show() + """ + + if ml is None: + ml = -1 # changed to zero inside function call + if mu is None: + mu = -1 # changed to zero inside function call + + dt = np.diff(t) + if not ((dt >= 0).all() or (dt <= 0).all()): + raise ValueError("The values in t must be monotonically increasing " + "or monotonically decreasing; repeated values are " + "allowed.") + + t = copy(t) + y0 = copy(y0) + + with ODE_LOCK: + output = _odepack.odeint(func, y0, t, args, Dfun, col_deriv, ml, mu, + full_output, rtol, atol, tcrit, h0, hmax, hmin, + ixpr, mxstep, mxhnil, mxordn, mxords, + int(bool(tfirst))) + if output[-1] < 0: + warning_msg = (f"{_msgs[output[-1]]} Run with full_output = 1 to " + f"get quantitative information.") + warnings.warn(warning_msg, ODEintWarning, stacklevel=2) + elif printmessg: + warning_msg = _msgs[output[-1]] + warnings.warn(warning_msg, ODEintWarning, stacklevel=2) + + if full_output: + output[1]['message'] = _msgs[output[-1]] + + output = output[:-1] + if len(output) == 1: + return output[0] + else: + return output diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_quad_vec.py b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_quad_vec.py new file mode 100644 index 0000000000000000000000000000000000000000..758bac5138777dbe152c2b455b5160196d2282ca --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_quad_vec.py @@ -0,0 +1,682 @@ +import sys +import copy +import heapq +import collections +import functools +import warnings + +import numpy as np + +from scipy._lib._util import MapWrapper, _FunctionWrapper + + +class LRUDict(collections.OrderedDict): + def __init__(self, max_size): + self.__max_size = max_size + + def __setitem__(self, key, value): + existing_key = (key in self) + super().__setitem__(key, value) + if existing_key: + self.move_to_end(key) + elif len(self) > self.__max_size: + self.popitem(last=False) + + def update(self, other): + # Not needed below + raise NotImplementedError() + + +class SemiInfiniteFunc: + """ + Argument transform from (start, +-oo) to (0, 1) + """ + def __init__(self, func, start, infty): + self._func = func + self._start = start + self._sgn = -1 if infty < 0 else 1 + + # Overflow threshold for the 1/t**2 factor + self._tmin = sys.float_info.min**0.5 + + def get_t(self, x): + z = self._sgn * (x - self._start) + 1 + if z == 0: + # Can happen only if point not in range + return np.inf + return 1 / z + + def __call__(self, t): + if t < self._tmin: + return 0.0 + else: + x = self._start + self._sgn * (1 - t) / t + f = self._func(x) + return self._sgn * (f / t) / t + + +class DoubleInfiniteFunc: + """ + Argument transform from (-oo, oo) to (-1, 1) + """ + def __init__(self, func): + self._func = func + + # Overflow threshold for the 1/t**2 factor + self._tmin = sys.float_info.min**0.5 + + def get_t(self, x): + s = -1 if x < 0 else 1 + return s / (abs(x) + 1) + + def __call__(self, t): + if abs(t) < self._tmin: + return 0.0 + else: + x = (1 - abs(t)) / t + f = self._func(x) + return (f / t) / t + + +def _max_norm(x): + return np.amax(abs(x)) + + +def _get_sizeof(obj): + try: + return sys.getsizeof(obj) + except TypeError: + # occurs on pypy + if hasattr(obj, '__sizeof__'): + return int(obj.__sizeof__()) + return 64 + + +class _Bunch: + def __init__(self, **kwargs): + self.__keys = kwargs.keys() + self.__dict__.update(**kwargs) + + def __repr__(self): + key_value_pairs = ', '.join( + f'{k}={repr(self.__dict__[k])}' for k in self.__keys + ) + return f"_Bunch({key_value_pairs})" + + +def quad_vec(f, a, b, epsabs=1e-200, epsrel=1e-8, norm='2', cache_size=100e6, + limit=10000, workers=1, points=None, quadrature=None, full_output=False, + *, args=()): + r"""Adaptive integration of a vector-valued function. + + Parameters + ---------- + f : callable + Vector-valued function f(x) to integrate. + a : float + Initial point. + b : float + Final point. + epsabs : float, optional + Absolute tolerance. + epsrel : float, optional + Relative tolerance. + norm : {'max', '2'}, optional + Vector norm to use for error estimation. + cache_size : int, optional + Number of bytes to use for memoization. + limit : float or int, optional + An upper bound on the number of subintervals used in the adaptive + algorithm. + workers : int or map-like callable, optional + If `workers` is an integer, part of the computation is done in + parallel subdivided to this many tasks (using + :class:`python:multiprocessing.pool.Pool`). + Supply `-1` to use all cores available to the Process. + Alternatively, supply a map-like callable, such as + :meth:`python:multiprocessing.pool.Pool.map` for evaluating the + population in parallel. + This evaluation is carried out as ``workers(func, iterable)``. + points : list, optional + List of additional breakpoints. + quadrature : {'gk21', 'gk15', 'trapezoid'}, optional + Quadrature rule to use on subintervals. + Options: 'gk21' (Gauss-Kronrod 21-point rule), + 'gk15' (Gauss-Kronrod 15-point rule), + 'trapezoid' (composite trapezoid rule). + Default: 'gk21' for finite intervals and 'gk15' for (semi-)infinite + full_output : bool, optional + Return an additional ``info`` dictionary. + args : tuple, optional + Extra arguments to pass to function, if any. + + .. versionadded:: 1.8.0 + + Returns + ------- + res : {float, array-like} + Estimate for the result + err : float + Error estimate for the result in the given norm + info : dict + Returned only when ``full_output=True``. + Info dictionary. Is an object with the attributes: + + success : bool + Whether integration reached target precision. + status : int + Indicator for convergence, success (0), + failure (1), and failure due to rounding error (2). + neval : int + Number of function evaluations. + intervals : ndarray, shape (num_intervals, 2) + Start and end points of subdivision intervals. + integrals : ndarray, shape (num_intervals, ...) + Integral for each interval. + Note that at most ``cache_size`` values are recorded, + and the array may contains *nan* for missing items. + errors : ndarray, shape (num_intervals,) + Estimated integration error for each interval. + + Notes + ----- + The algorithm mainly follows the implementation of QUADPACK's + DQAG* algorithms, implementing global error control and adaptive + subdivision. + + The algorithm here has some differences to the QUADPACK approach: + + Instead of subdividing one interval at a time, the algorithm + subdivides N intervals with largest errors at once. This enables + (partial) parallelization of the integration. + + The logic of subdividing "next largest" intervals first is then + not implemented, and we rely on the above extension to avoid + concentrating on "small" intervals only. + + The Wynn epsilon table extrapolation is not used (QUADPACK uses it + for infinite intervals). This is because the algorithm here is + supposed to work on vector-valued functions, in an user-specified + norm, and the extension of the epsilon algorithm to this case does + not appear to be widely agreed. For max-norm, using elementwise + Wynn epsilon could be possible, but we do not do this here with + the hope that the epsilon extrapolation is mainly useful in + special cases. + + References + ---------- + [1] R. Piessens, E. de Doncker, QUADPACK (1983). + + Examples + -------- + We can compute integrations of a vector-valued function: + + >>> from scipy.integrate import quad_vec + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> alpha = np.linspace(0.0, 2.0, num=30) + >>> f = lambda x: x**alpha + >>> x0, x1 = 0, 2 + >>> y, err = quad_vec(f, x0, x1) + >>> plt.plot(alpha, y) + >>> plt.xlabel(r"$\alpha$") + >>> plt.ylabel(r"$\int_{0}^{2} x^\alpha dx$") + >>> plt.show() + + When using the argument `workers`, one should ensure + that the main module is import-safe, for instance + by rewriting the example above as: + + .. code-block:: python + + from scipy.integrate import quad_vec + import numpy as np + import matplotlib.pyplot as plt + + alpha = np.linspace(0.0, 2.0, num=30) + x0, x1 = 0, 2 + def f(x): + return x**alpha + + if __name__ == "__main__": + y, err = quad_vec(f, x0, x1, workers=2) + """ + a = float(a) + b = float(b) + + if args: + if not isinstance(args, tuple): + args = (args,) + + # create a wrapped function to allow the use of map and Pool.map + f = _FunctionWrapper(f, args) + + # Use simple transformations to deal with integrals over infinite + # intervals. + kwargs = dict(epsabs=epsabs, + epsrel=epsrel, + norm=norm, + cache_size=cache_size, + limit=limit, + workers=workers, + points=points, + quadrature='gk15' if quadrature is None else quadrature, + full_output=full_output) + if np.isfinite(a) and np.isinf(b): + f2 = SemiInfiniteFunc(f, start=a, infty=b) + if points is not None: + kwargs['points'] = tuple(f2.get_t(xp) for xp in points) + return quad_vec(f2, 0, 1, **kwargs) + elif np.isfinite(b) and np.isinf(a): + f2 = SemiInfiniteFunc(f, start=b, infty=a) + if points is not None: + kwargs['points'] = tuple(f2.get_t(xp) for xp in points) + res = quad_vec(f2, 0, 1, **kwargs) + return (-res[0],) + res[1:] + elif np.isinf(a) and np.isinf(b): + sgn = -1 if b < a else 1 + + # NB. explicitly split integral at t=0, which separates + # the positive and negative sides + f2 = DoubleInfiniteFunc(f) + if points is not None: + kwargs['points'] = (0,) + tuple(f2.get_t(xp) for xp in points) + else: + kwargs['points'] = (0,) + + if a != b: + res = quad_vec(f2, -1, 1, **kwargs) + else: + res = quad_vec(f2, 1, 1, **kwargs) + + return (res[0]*sgn,) + res[1:] + elif not (np.isfinite(a) and np.isfinite(b)): + raise ValueError(f"invalid integration bounds a={a}, b={b}") + + norm_funcs = { + None: _max_norm, + 'max': _max_norm, + '2': np.linalg.norm + } + if callable(norm): + norm_func = norm + else: + norm_func = norm_funcs[norm] + + parallel_count = 128 + min_intervals = 2 + + try: + _quadrature = {None: _quadrature_gk21, + 'gk21': _quadrature_gk21, + 'gk15': _quadrature_gk15, + 'trapz': _quadrature_trapezoid, # alias for backcompat + 'trapezoid': _quadrature_trapezoid}[quadrature] + except KeyError as e: + raise ValueError(f"unknown quadrature {quadrature!r}") from e + + if quadrature == "trapz": + msg = ("`quadrature='trapz'` is deprecated in favour of " + "`quadrature='trapezoid' and will raise an error from SciPy 1.16.0 " + "onwards.") + warnings.warn(msg, DeprecationWarning, stacklevel=2) + + # Initial interval set + if points is None: + initial_intervals = [(a, b)] + else: + prev = a + initial_intervals = [] + for p in sorted(points): + p = float(p) + if not (a < p < b) or p == prev: + continue + initial_intervals.append((prev, p)) + prev = p + initial_intervals.append((prev, b)) + + global_integral = None + global_error = None + rounding_error = None + interval_cache = None + intervals = [] + neval = 0 + + for x1, x2 in initial_intervals: + ig, err, rnd = _quadrature(x1, x2, f, norm_func) + neval += _quadrature.num_eval + + if global_integral is None: + if isinstance(ig, (float, complex)): + # Specialize for scalars + if norm_func in (_max_norm, np.linalg.norm): + norm_func = abs + + global_integral = ig + global_error = float(err) + rounding_error = float(rnd) + + cache_count = cache_size // _get_sizeof(ig) + interval_cache = LRUDict(cache_count) + else: + global_integral += ig + global_error += err + rounding_error += rnd + + interval_cache[(x1, x2)] = copy.copy(ig) + intervals.append((-err, x1, x2)) + + heapq.heapify(intervals) + + CONVERGED = 0 + NOT_CONVERGED = 1 + ROUNDING_ERROR = 2 + NOT_A_NUMBER = 3 + + status_msg = { + CONVERGED: "Target precision reached.", + NOT_CONVERGED: "Target precision not reached.", + ROUNDING_ERROR: "Target precision could not be reached due to rounding error.", + NOT_A_NUMBER: "Non-finite values encountered." + } + + # Process intervals + with MapWrapper(workers) as mapwrapper: + ier = NOT_CONVERGED + + while intervals and len(intervals) < limit: + # Select intervals with largest errors for subdivision + tol = max(epsabs, epsrel*norm_func(global_integral)) + + to_process = [] + err_sum = 0 + + for j in range(parallel_count): + if not intervals: + break + + if j > 0 and err_sum > global_error - tol/8: + # avoid unnecessary parallel splitting + break + + interval = heapq.heappop(intervals) + + neg_old_err, a, b = interval + old_int = interval_cache.pop((a, b), None) + to_process.append( + ((-neg_old_err, a, b, old_int), f, norm_func, _quadrature) + ) + err_sum += -neg_old_err + + # Subdivide intervals + for parts in mapwrapper(_subdivide_interval, to_process): + dint, derr, dround_err, subint, dneval = parts + neval += dneval + global_integral += dint + global_error += derr + rounding_error += dround_err + for x in subint: + x1, x2, ig, err = x + interval_cache[(x1, x2)] = ig + heapq.heappush(intervals, (-err, x1, x2)) + + # Termination check + if len(intervals) >= min_intervals: + tol = max(epsabs, epsrel*norm_func(global_integral)) + if global_error < tol/8: + ier = CONVERGED + break + if global_error < rounding_error: + ier = ROUNDING_ERROR + break + + if not (np.isfinite(global_error) and np.isfinite(rounding_error)): + ier = NOT_A_NUMBER + break + + res = global_integral + err = global_error + rounding_error + + if full_output: + res_arr = np.asarray(res) + dummy = np.full(res_arr.shape, np.nan, dtype=res_arr.dtype) + integrals = np.array([interval_cache.get((z[1], z[2]), dummy) + for z in intervals], dtype=res_arr.dtype) + errors = np.array([-z[0] for z in intervals]) + intervals = np.array([[z[1], z[2]] for z in intervals]) + + info = _Bunch(neval=neval, + success=(ier == CONVERGED), + status=ier, + message=status_msg[ier], + intervals=intervals, + integrals=integrals, + errors=errors) + return (res, err, info) + else: + return (res, err) + + +def _subdivide_interval(args): + interval, f, norm_func, _quadrature = args + old_err, a, b, old_int = interval + + c = 0.5 * (a + b) + + # Left-hand side + if getattr(_quadrature, 'cache_size', 0) > 0: + f = functools.lru_cache(_quadrature.cache_size)(f) + + s1, err1, round1 = _quadrature(a, c, f, norm_func) + dneval = _quadrature.num_eval + s2, err2, round2 = _quadrature(c, b, f, norm_func) + dneval += _quadrature.num_eval + if old_int is None: + old_int, _, _ = _quadrature(a, b, f, norm_func) + dneval += _quadrature.num_eval + + if getattr(_quadrature, 'cache_size', 0) > 0: + dneval = f.cache_info().misses + + dint = s1 + s2 - old_int + derr = err1 + err2 - old_err + dround_err = round1 + round2 + + subintervals = ((a, c, s1, err1), (c, b, s2, err2)) + return dint, derr, dround_err, subintervals, dneval + + +def _quadrature_trapezoid(x1, x2, f, norm_func): + """ + Composite trapezoid quadrature + """ + x3 = 0.5*(x1 + x2) + f1 = f(x1) + f2 = f(x2) + f3 = f(x3) + + s2 = 0.25 * (x2 - x1) * (f1 + 2*f3 + f2) + + round_err = 0.25 * abs(x2 - x1) * (float(norm_func(f1)) + + 2*float(norm_func(f3)) + + float(norm_func(f2))) * 2e-16 + + s1 = 0.5 * (x2 - x1) * (f1 + f2) + err = 1/3 * float(norm_func(s1 - s2)) + return s2, err, round_err + + +_quadrature_trapezoid.cache_size = 3 * 3 +_quadrature_trapezoid.num_eval = 3 + + +def _quadrature_gk(a, b, f, norm_func, x, w, v): + """ + Generic Gauss-Kronrod quadrature + """ + + fv = [0.0]*len(x) + + c = 0.5 * (a + b) + h = 0.5 * (b - a) + + # Gauss-Kronrod + s_k = 0.0 + s_k_abs = 0.0 + for i in range(len(x)): + ff = f(c + h*x[i]) + fv[i] = ff + + vv = v[i] + + # \int f(x) + s_k += vv * ff + # \int |f(x)| + s_k_abs += vv * abs(ff) + + # Gauss + s_g = 0.0 + for i in range(len(w)): + s_g += w[i] * fv[2*i + 1] + + # Quadrature of abs-deviation from average + s_k_dabs = 0.0 + y0 = s_k / 2.0 + for i in range(len(x)): + # \int |f(x) - y0| + s_k_dabs += v[i] * abs(fv[i] - y0) + + # Use similar error estimation as quadpack + err = float(norm_func((s_k - s_g) * h)) + dabs = float(norm_func(s_k_dabs * h)) + if dabs != 0 and err != 0: + err = dabs * min(1.0, (200 * err / dabs)**1.5) + + eps = sys.float_info.epsilon + round_err = float(norm_func(50 * eps * h * s_k_abs)) + + if round_err > sys.float_info.min: + err = max(err, round_err) + + return h * s_k, err, round_err + + +def _quadrature_gk21(a, b, f, norm_func): + """ + Gauss-Kronrod 21 quadrature with error estimate + """ + # Gauss-Kronrod points + x = (0.995657163025808080735527280689003, + 0.973906528517171720077964012084452, + 0.930157491355708226001207180059508, + 0.865063366688984510732096688423493, + 0.780817726586416897063717578345042, + 0.679409568299024406234327365114874, + 0.562757134668604683339000099272694, + 0.433395394129247190799265943165784, + 0.294392862701460198131126603103866, + 0.148874338981631210884826001129720, + 0, + -0.148874338981631210884826001129720, + -0.294392862701460198131126603103866, + -0.433395394129247190799265943165784, + -0.562757134668604683339000099272694, + -0.679409568299024406234327365114874, + -0.780817726586416897063717578345042, + -0.865063366688984510732096688423493, + -0.930157491355708226001207180059508, + -0.973906528517171720077964012084452, + -0.995657163025808080735527280689003) + + # 10-point weights + w = (0.066671344308688137593568809893332, + 0.149451349150580593145776339657697, + 0.219086362515982043995534934228163, + 0.269266719309996355091226921569469, + 0.295524224714752870173892994651338, + 0.295524224714752870173892994651338, + 0.269266719309996355091226921569469, + 0.219086362515982043995534934228163, + 0.149451349150580593145776339657697, + 0.066671344308688137593568809893332) + + # 21-point weights + v = (0.011694638867371874278064396062192, + 0.032558162307964727478818972459390, + 0.054755896574351996031381300244580, + 0.075039674810919952767043140916190, + 0.093125454583697605535065465083366, + 0.109387158802297641899210590325805, + 0.123491976262065851077958109831074, + 0.134709217311473325928054001771707, + 0.142775938577060080797094273138717, + 0.147739104901338491374841515972068, + 0.149445554002916905664936468389821, + 0.147739104901338491374841515972068, + 0.142775938577060080797094273138717, + 0.134709217311473325928054001771707, + 0.123491976262065851077958109831074, + 0.109387158802297641899210590325805, + 0.093125454583697605535065465083366, + 0.075039674810919952767043140916190, + 0.054755896574351996031381300244580, + 0.032558162307964727478818972459390, + 0.011694638867371874278064396062192) + + return _quadrature_gk(a, b, f, norm_func, x, w, v) + + +_quadrature_gk21.num_eval = 21 + + +def _quadrature_gk15(a, b, f, norm_func): + """ + Gauss-Kronrod 15 quadrature with error estimate + """ + # Gauss-Kronrod points + x = (0.991455371120812639206854697526329, + 0.949107912342758524526189684047851, + 0.864864423359769072789712788640926, + 0.741531185599394439863864773280788, + 0.586087235467691130294144838258730, + 0.405845151377397166906606412076961, + 0.207784955007898467600689403773245, + 0.000000000000000000000000000000000, + -0.207784955007898467600689403773245, + -0.405845151377397166906606412076961, + -0.586087235467691130294144838258730, + -0.741531185599394439863864773280788, + -0.864864423359769072789712788640926, + -0.949107912342758524526189684047851, + -0.991455371120812639206854697526329) + + # 7-point weights + w = (0.129484966168869693270611432679082, + 0.279705391489276667901467771423780, + 0.381830050505118944950369775488975, + 0.417959183673469387755102040816327, + 0.381830050505118944950369775488975, + 0.279705391489276667901467771423780, + 0.129484966168869693270611432679082) + + # 15-point weights + v = (0.022935322010529224963732008058970, + 0.063092092629978553290700663189204, + 0.104790010322250183839876322541518, + 0.140653259715525918745189590510238, + 0.169004726639267902826583426598550, + 0.190350578064785409913256402421014, + 0.204432940075298892414161999234649, + 0.209482141084727828012999174891714, + 0.204432940075298892414161999234649, + 0.190350578064785409913256402421014, + 0.169004726639267902826583426598550, + 0.140653259715525918745189590510238, + 0.104790010322250183839876322541518, + 0.063092092629978553290700663189204, + 0.022935322010529224963732008058970) + + return _quadrature_gk(a, b, f, norm_func, x, w, v) + + +_quadrature_gk15.num_eval = 15 diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_quadpack.cpython-310-x86_64-linux-gnu.so b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_quadpack.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..362783368a19904778dfafd1e24933be044f1102 --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_quadpack.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e703814f6fc19b664dac20ee3c9a604f4f2dda85a5460cf65f25458c040c45e9 +size 112024 diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_quadpack_py.py b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_quadpack_py.py new file mode 100644 index 0000000000000000000000000000000000000000..0d273f6d2c9943f4a35f9b0c761a944b7be84cfe --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_quadpack_py.py @@ -0,0 +1,1279 @@ +# Author: Travis Oliphant 2001 +# Author: Nathan Woods 2013 (nquad &c) +import sys +import warnings +from functools import partial + +from . import _quadpack +import numpy as np + +__all__ = ["quad", "dblquad", "tplquad", "nquad", "IntegrationWarning"] + + +class IntegrationWarning(UserWarning): + """ + Warning on issues during integration. + """ + pass + + +def quad(func, a, b, args=(), full_output=0, epsabs=1.49e-8, epsrel=1.49e-8, + limit=50, points=None, weight=None, wvar=None, wopts=None, maxp1=50, + limlst=50, complex_func=False): + """ + Compute a definite integral. + + Integrate func from `a` to `b` (possibly infinite interval) using a + technique from the Fortran library QUADPACK. + + Parameters + ---------- + func : {function, scipy.LowLevelCallable} + A Python function or method to integrate. If `func` takes many + arguments, it is integrated along the axis corresponding to the + first argument. + + If the user desires improved integration performance, then `f` may + be a `scipy.LowLevelCallable` with one of the signatures:: + + double func(double x) + double func(double x, void *user_data) + double func(int n, double *xx) + double func(int n, double *xx, void *user_data) + + The ``user_data`` is the data contained in the `scipy.LowLevelCallable`. + In the call forms with ``xx``, ``n`` is the length of the ``xx`` + array which contains ``xx[0] == x`` and the rest of the items are + numbers contained in the ``args`` argument of quad. + + In addition, certain ctypes call signatures are supported for + backward compatibility, but those should not be used in new code. + a : float + Lower limit of integration (use -numpy.inf for -infinity). + b : float + Upper limit of integration (use numpy.inf for +infinity). + args : tuple, optional + Extra arguments to pass to `func`. + full_output : int, optional + Non-zero to return a dictionary of integration information. + If non-zero, warning messages are also suppressed and the + message is appended to the output tuple. + complex_func : bool, optional + Indicate if the function's (`func`) return type is real + (``complex_func=False``: default) or complex (``complex_func=True``). + In both cases, the function's argument is real. + If full_output is also non-zero, the `infodict`, `message`, and + `explain` for the real and complex components are returned in + a dictionary with keys "real output" and "imag output". + + Returns + ------- + y : float + The integral of func from `a` to `b`. + abserr : float + An estimate of the absolute error in the result. + infodict : dict + A dictionary containing additional information. + message + A convergence message. + explain + Appended only with 'cos' or 'sin' weighting and infinite + integration limits, it contains an explanation of the codes in + infodict['ierlst'] + + Other Parameters + ---------------- + epsabs : float or int, optional + Absolute error tolerance. Default is 1.49e-8. `quad` tries to obtain + an accuracy of ``abs(i-result) <= max(epsabs, epsrel*abs(i))`` + where ``i`` = integral of `func` from `a` to `b`, and ``result`` is the + numerical approximation. See `epsrel` below. + epsrel : float or int, optional + Relative error tolerance. Default is 1.49e-8. + If ``epsabs <= 0``, `epsrel` must be greater than both 5e-29 + and ``50 * (machine epsilon)``. See `epsabs` above. + limit : float or int, optional + An upper bound on the number of subintervals used in the adaptive + algorithm. + points : (sequence of floats,ints), optional + A sequence of break points in the bounded integration interval + where local difficulties of the integrand may occur (e.g., + singularities, discontinuities). The sequence does not have + to be sorted. Note that this option cannot be used in conjunction + with ``weight``. + weight : float or int, optional + String indicating weighting function. Full explanation for this + and the remaining arguments can be found below. + wvar : optional + Variables for use with weighting functions. + wopts : optional + Optional input for reusing Chebyshev moments. + maxp1 : float or int, optional + An upper bound on the number of Chebyshev moments. + limlst : int, optional + Upper bound on the number of cycles (>=3) for use with a sinusoidal + weighting and an infinite end-point. + + See Also + -------- + dblquad : double integral + tplquad : triple integral + nquad : n-dimensional integrals (uses `quad` recursively) + fixed_quad : fixed-order Gaussian quadrature + simpson : integrator for sampled data + romb : integrator for sampled data + scipy.special : for coefficients and roots of orthogonal polynomials + + Notes + ----- + For valid results, the integral must converge; behavior for divergent + integrals is not guaranteed. + + **Extra information for quad() inputs and outputs** + + If full_output is non-zero, then the third output argument + (infodict) is a dictionary with entries as tabulated below. For + infinite limits, the range is transformed to (0,1) and the + optional outputs are given with respect to this transformed range. + Let M be the input argument limit and let K be infodict['last']. + The entries are: + + 'neval' + The number of function evaluations. + 'last' + The number, K, of subintervals produced in the subdivision process. + 'alist' + A rank-1 array of length M, the first K elements of which are the + left end points of the subintervals in the partition of the + integration range. + 'blist' + A rank-1 array of length M, the first K elements of which are the + right end points of the subintervals. + 'rlist' + A rank-1 array of length M, the first K elements of which are the + integral approximations on the subintervals. + 'elist' + A rank-1 array of length M, the first K elements of which are the + moduli of the absolute error estimates on the subintervals. + 'iord' + A rank-1 integer array of length M, the first L elements of + which are pointers to the error estimates over the subintervals + with ``L=K`` if ``K<=M/2+2`` or ``L=M+1-K`` otherwise. Let I be the + sequence ``infodict['iord']`` and let E be the sequence + ``infodict['elist']``. Then ``E[I[1]], ..., E[I[L]]`` forms a + decreasing sequence. + + If the input argument points is provided (i.e., it is not None), + the following additional outputs are placed in the output + dictionary. Assume the points sequence is of length P. + + 'pts' + A rank-1 array of length P+2 containing the integration limits + and the break points of the intervals in ascending order. + This is an array giving the subintervals over which integration + will occur. + 'level' + A rank-1 integer array of length M (=limit), containing the + subdivision levels of the subintervals, i.e., if (aa,bb) is a + subinterval of ``(pts[1], pts[2])`` where ``pts[0]`` and ``pts[2]`` + are adjacent elements of ``infodict['pts']``, then (aa,bb) has level l + if ``|bb-aa| = |pts[2]-pts[1]| * 2**(-l)``. + 'ndin' + A rank-1 integer array of length P+2. After the first integration + over the intervals (pts[1], pts[2]), the error estimates over some + of the intervals may have been increased artificially in order to + put their subdivision forward. This array has ones in slots + corresponding to the subintervals for which this happens. + + **Weighting the integrand** + + The input variables, *weight* and *wvar*, are used to weight the + integrand by a select list of functions. Different integration + methods are used to compute the integral with these weighting + functions, and these do not support specifying break points. The + possible values of weight and the corresponding weighting functions are. + + ========== =================================== ===================== + ``weight`` Weight function used ``wvar`` + ========== =================================== ===================== + 'cos' cos(w*x) wvar = w + 'sin' sin(w*x) wvar = w + 'alg' g(x) = ((x-a)**alpha)*((b-x)**beta) wvar = (alpha, beta) + 'alg-loga' g(x)*log(x-a) wvar = (alpha, beta) + 'alg-logb' g(x)*log(b-x) wvar = (alpha, beta) + 'alg-log' g(x)*log(x-a)*log(b-x) wvar = (alpha, beta) + 'cauchy' 1/(x-c) wvar = c + ========== =================================== ===================== + + wvar holds the parameter w, (alpha, beta), or c depending on the weight + selected. In these expressions, a and b are the integration limits. + + For the 'cos' and 'sin' weighting, additional inputs and outputs are + available. + + For finite integration limits, the integration is performed using a + Clenshaw-Curtis method which uses Chebyshev moments. For repeated + calculations, these moments are saved in the output dictionary: + + 'momcom' + The maximum level of Chebyshev moments that have been computed, + i.e., if ``M_c`` is ``infodict['momcom']`` then the moments have been + computed for intervals of length ``|b-a| * 2**(-l)``, + ``l=0,1,...,M_c``. + 'nnlog' + A rank-1 integer array of length M(=limit), containing the + subdivision levels of the subintervals, i.e., an element of this + array is equal to l if the corresponding subinterval is + ``|b-a|* 2**(-l)``. + 'chebmo' + A rank-2 array of shape (25, maxp1) containing the computed + Chebyshev moments. These can be passed on to an integration + over the same interval by passing this array as the second + element of the sequence wopts and passing infodict['momcom'] as + the first element. + + If one of the integration limits is infinite, then a Fourier integral is + computed (assuming w neq 0). If full_output is 1 and a numerical error + is encountered, besides the error message attached to the output tuple, + a dictionary is also appended to the output tuple which translates the + error codes in the array ``info['ierlst']`` to English messages. The + output information dictionary contains the following entries instead of + 'last', 'alist', 'blist', 'rlist', and 'elist': + + 'lst' + The number of subintervals needed for the integration (call it ``K_f``). + 'rslst' + A rank-1 array of length M_f=limlst, whose first ``K_f`` elements + contain the integral contribution over the interval + ``(a+(k-1)c, a+kc)`` where ``c = (2*floor(|w|) + 1) * pi / |w|`` + and ``k=1,2,...,K_f``. + 'erlst' + A rank-1 array of length ``M_f`` containing the error estimate + corresponding to the interval in the same position in + ``infodict['rslist']``. + 'ierlst' + A rank-1 integer array of length ``M_f`` containing an error flag + corresponding to the interval in the same position in + ``infodict['rslist']``. See the explanation dictionary (last entry + in the output tuple) for the meaning of the codes. + + + **Details of QUADPACK level routines** + + `quad` calls routines from the FORTRAN library QUADPACK. This section + provides details on the conditions for each routine to be called and a + short description of each routine. The routine called depends on + `weight`, `points` and the integration limits `a` and `b`. + + ================ ============== ========== ===================== + QUADPACK routine `weight` `points` infinite bounds + ================ ============== ========== ===================== + qagse None No No + qagie None No Yes + qagpe None Yes No + qawoe 'sin', 'cos' No No + qawfe 'sin', 'cos' No either `a` or `b` + qawse 'alg*' No No + qawce 'cauchy' No No + ================ ============== ========== ===================== + + The following provides a short description from [1]_ for each + routine. + + qagse + is an integrator based on globally adaptive interval + subdivision in connection with extrapolation, which will + eliminate the effects of integrand singularities of + several types. + qagie + handles integration over infinite intervals. The infinite range is + mapped onto a finite interval and subsequently the same strategy as + in ``QAGS`` is applied. + qagpe + serves the same purposes as QAGS, but also allows the + user to provide explicit information about the location + and type of trouble-spots i.e. the abscissae of internal + singularities, discontinuities and other difficulties of + the integrand function. + qawoe + is an integrator for the evaluation of + :math:`\\int^b_a \\cos(\\omega x)f(x)dx` or + :math:`\\int^b_a \\sin(\\omega x)f(x)dx` + over a finite interval [a,b], where :math:`\\omega` and :math:`f` + are specified by the user. The rule evaluation component is based + on the modified Clenshaw-Curtis technique + + An adaptive subdivision scheme is used in connection + with an extrapolation procedure, which is a modification + of that in ``QAGS`` and allows the algorithm to deal with + singularities in :math:`f(x)`. + qawfe + calculates the Fourier transform + :math:`\\int^\\infty_a \\cos(\\omega x)f(x)dx` or + :math:`\\int^\\infty_a \\sin(\\omega x)f(x)dx` + for user-provided :math:`\\omega` and :math:`f`. The procedure of + ``QAWO`` is applied on successive finite intervals, and convergence + acceleration by means of the :math:`\\varepsilon`-algorithm is applied + to the series of integral approximations. + qawse + approximate :math:`\\int^b_a w(x)f(x)dx`, with :math:`a < b` where + :math:`w(x) = (x-a)^{\\alpha}(b-x)^{\\beta}v(x)` with + :math:`\\alpha,\\beta > -1`, where :math:`v(x)` may be one of the + following functions: :math:`1`, :math:`\\log(x-a)`, :math:`\\log(b-x)`, + :math:`\\log(x-a)\\log(b-x)`. + + The user specifies :math:`\\alpha`, :math:`\\beta` and the type of the + function :math:`v`. A globally adaptive subdivision strategy is + applied, with modified Clenshaw-Curtis integration on those + subintervals which contain `a` or `b`. + qawce + compute :math:`\\int^b_a f(x) / (x-c)dx` where the integral must be + interpreted as a Cauchy principal value integral, for user specified + :math:`c` and :math:`f`. The strategy is globally adaptive. Modified + Clenshaw-Curtis integration is used on those intervals containing the + point :math:`x = c`. + + **Integration of Complex Function of a Real Variable** + + A complex valued function, :math:`f`, of a real variable can be written as + :math:`f = g + ih`. Similarly, the integral of :math:`f` can be + written as + + .. math:: + \\int_a^b f(x) dx = \\int_a^b g(x) dx + i\\int_a^b h(x) dx + + assuming that the integrals of :math:`g` and :math:`h` exist + over the interval :math:`[a,b]` [2]_. Therefore, ``quad`` integrates + complex-valued functions by integrating the real and imaginary components + separately. + + + References + ---------- + + .. [1] Piessens, Robert; de Doncker-Kapenga, Elise; + Überhuber, Christoph W.; Kahaner, David (1983). + QUADPACK: A subroutine package for automatic integration. + Springer-Verlag. + ISBN 978-3-540-12553-2. + + .. [2] McCullough, Thomas; Phillips, Keith (1973). + Foundations of Analysis in the Complex Plane. + Holt Rinehart Winston. + ISBN 0-03-086370-8 + + Examples + -------- + Calculate :math:`\\int^4_0 x^2 dx` and compare with an analytic result + + >>> from scipy import integrate + >>> import numpy as np + >>> x2 = lambda x: x**2 + >>> integrate.quad(x2, 0, 4) + (21.333333333333332, 2.3684757858670003e-13) + >>> print(4**3 / 3.) # analytical result + 21.3333333333 + + Calculate :math:`\\int^\\infty_0 e^{-x} dx` + + >>> invexp = lambda x: np.exp(-x) + >>> integrate.quad(invexp, 0, np.inf) + (1.0, 5.842605999138044e-11) + + Calculate :math:`\\int^1_0 a x \\,dx` for :math:`a = 1, 3` + + >>> f = lambda x, a: a*x + >>> y, err = integrate.quad(f, 0, 1, args=(1,)) + >>> y + 0.5 + >>> y, err = integrate.quad(f, 0, 1, args=(3,)) + >>> y + 1.5 + + Calculate :math:`\\int^1_0 x^2 + y^2 dx` with ctypes, holding + y parameter as 1:: + + testlib.c => + double func(int n, double args[n]){ + return args[0]*args[0] + args[1]*args[1];} + compile to library testlib.* + + :: + + from scipy import integrate + import ctypes + lib = ctypes.CDLL('/home/.../testlib.*') #use absolute path + lib.func.restype = ctypes.c_double + lib.func.argtypes = (ctypes.c_int,ctypes.c_double) + integrate.quad(lib.func,0,1,(1)) + #(1.3333333333333333, 1.4802973661668752e-14) + print((1.0**3/3.0 + 1.0) - (0.0**3/3.0 + 0.0)) #Analytic result + # 1.3333333333333333 + + Be aware that pulse shapes and other sharp features as compared to the + size of the integration interval may not be integrated correctly using + this method. A simplified example of this limitation is integrating a + y-axis reflected step function with many zero values within the integrals + bounds. + + >>> y = lambda x: 1 if x<=0 else 0 + >>> integrate.quad(y, -1, 1) + (1.0, 1.1102230246251565e-14) + >>> integrate.quad(y, -1, 100) + (1.0000000002199108, 1.0189464580163188e-08) + >>> integrate.quad(y, -1, 10000) + (0.0, 0.0) + + """ + if not isinstance(args, tuple): + args = (args,) + + # check the limits of integration: \int_a^b, expect a < b + flip, a, b = b < a, min(a, b), max(a, b) + + if complex_func: + def imfunc(x, *args): + return func(x, *args).imag + + def refunc(x, *args): + return func(x, *args).real + + re_retval = quad(refunc, a, b, args, full_output, epsabs, + epsrel, limit, points, weight, wvar, wopts, + maxp1, limlst, complex_func=False) + im_retval = quad(imfunc, a, b, args, full_output, epsabs, + epsrel, limit, points, weight, wvar, wopts, + maxp1, limlst, complex_func=False) + integral = re_retval[0] + 1j*im_retval[0] + error_estimate = re_retval[1] + 1j*im_retval[1] + retval = integral, error_estimate + if full_output: + msgexp = {} + msgexp["real"] = re_retval[2:] + msgexp["imag"] = im_retval[2:] + retval = retval + (msgexp,) + + return retval + + if weight is None: + retval = _quad(func, a, b, args, full_output, epsabs, epsrel, limit, + points) + else: + if points is not None: + msg = ("Break points cannot be specified when using weighted integrand.\n" + "Continuing, ignoring specified points.") + warnings.warn(msg, IntegrationWarning, stacklevel=2) + retval = _quad_weight(func, a, b, args, full_output, epsabs, epsrel, + limlst, limit, maxp1, weight, wvar, wopts) + + if flip: + retval = (-retval[0],) + retval[1:] + + ier = retval[-1] + if ier == 0: + return retval[:-1] + + msgs = {80: "A Python error occurred possibly while calling the function.", + 1: f"The maximum number of subdivisions ({limit}) has been achieved.\n " + f"If increasing the limit yields no improvement it is advised to " + f"analyze \n the integrand in order to determine the difficulties. " + f"If the position of a \n local difficulty can be determined " + f"(singularity, discontinuity) one will \n probably gain from " + f"splitting up the interval and calling the integrator \n on the " + f"subranges. Perhaps a special-purpose integrator should be used.", + 2: "The occurrence of roundoff error is detected, which prevents \n " + "the requested tolerance from being achieved. " + "The error may be \n underestimated.", + 3: "Extremely bad integrand behavior occurs at some points of the\n " + "integration interval.", + 4: "The algorithm does not converge. Roundoff error is detected\n " + "in the extrapolation table. It is assumed that the requested " + "tolerance\n cannot be achieved, and that the returned result " + "(if full_output = 1) is \n the best which can be obtained.", + 5: "The integral is probably divergent, or slowly convergent.", + 6: "The input is invalid.", + 7: "Abnormal termination of the routine. The estimates for result\n " + "and error are less reliable. It is assumed that the requested " + "accuracy\n has not been achieved.", + 'unknown': "Unknown error."} + + if weight in ['cos','sin'] and (b == np.inf or a == -np.inf): + msgs[1] = ( + "The maximum number of cycles allowed has been achieved., e.e.\n of " + "subintervals (a+(k-1)c, a+kc) where c = (2*int(abs(omega)+1))\n " + "*pi/abs(omega), for k = 1, 2, ..., lst. " + "One can allow more cycles by increasing the value of limlst. " + "Look at info['ierlst'] with full_output=1." + ) + msgs[4] = ( + "The extrapolation table constructed for convergence acceleration\n of " + "the series formed by the integral contributions over the cycles, \n does " + "not converge to within the requested accuracy. " + "Look at \n info['ierlst'] with full_output=1." + ) + msgs[7] = ( + "Bad integrand behavior occurs within one or more of the cycles.\n " + "Location and type of the difficulty involved can be determined from \n " + "the vector info['ierlist'] obtained with full_output=1." + ) + explain = {1: "The maximum number of subdivisions (= limit) has been \n " + "achieved on this cycle.", + 2: "The occurrence of roundoff error is detected and prevents\n " + "the tolerance imposed on this cycle from being achieved.", + 3: "Extremely bad integrand behavior occurs at some points of\n " + "this cycle.", + 4: "The integral over this cycle does not converge (to within the " + "required accuracy) due to roundoff in the extrapolation " + "procedure invoked on this cycle. It is assumed that the result " + "on this interval is the best which can be obtained.", + 5: "The integral over this cycle is probably divergent or " + "slowly convergent."} + + try: + msg = msgs[ier] + except KeyError: + msg = msgs['unknown'] + + if ier in [1,2,3,4,5,7]: + if full_output: + if weight in ['cos', 'sin'] and (b == np.inf or a == -np.inf): + return retval[:-1] + (msg, explain) + else: + return retval[:-1] + (msg,) + else: + warnings.warn(msg, IntegrationWarning, stacklevel=2) + return retval[:-1] + + elif ier == 6: # Forensic decision tree when QUADPACK throws ier=6 + if epsabs <= 0: # Small error tolerance - applies to all methods + if epsrel < max(50 * sys.float_info.epsilon, 5e-29): + msg = ("If 'epsabs'<=0, 'epsrel' must be greater than both" + " 5e-29 and 50*(machine epsilon).") + elif weight in ['sin', 'cos'] and (abs(a) + abs(b) == np.inf): + msg = ("Sine or cosine weighted integrals with infinite domain" + " must have 'epsabs'>0.") + + elif weight is None: + if points is None: # QAGSE/QAGIE + msg = ("Invalid 'limit' argument. There must be" + " at least one subinterval") + else: # QAGPE + if not (min(a, b) <= min(points) <= max(points) <= max(a, b)): + msg = ("All break points in 'points' must lie within the" + " integration limits.") + elif len(points) >= limit: + msg = (f"Number of break points ({len(points):d}) " + f"must be less than subinterval limit ({limit:d})") + + else: + if maxp1 < 1: + msg = "Chebyshev moment limit maxp1 must be >=1." + + elif weight in ('cos', 'sin') and abs(a+b) == np.inf: # QAWFE + msg = "Cycle limit limlst must be >=3." + + elif weight.startswith('alg'): # QAWSE + if min(wvar) < -1: + msg = "wvar parameters (alpha, beta) must both be >= -1." + if b < a: + msg = "Integration limits a, b must satistfy a>> import numpy as np + >>> from scipy import integrate + >>> f = lambda y, x: x*y**2 + >>> integrate.dblquad(f, 0, 2, 0, 1) + (0.6666666666666667, 7.401486830834377e-15) + + Calculate :math:`\\int^{x=\\pi/4}_{x=0} \\int^{y=\\cos(x)}_{y=\\sin(x)} 1 + \\,dy \\,dx`. + + >>> f = lambda y, x: 1 + >>> integrate.dblquad(f, 0, np.pi/4, np.sin, np.cos) + (0.41421356237309503, 1.1083280054755938e-14) + + Calculate :math:`\\int^{x=1}_{x=0} \\int^{y=2-x}_{y=x} a x y \\,dy \\,dx` + for :math:`a=1, 3`. + + >>> f = lambda y, x, a: a*x*y + >>> integrate.dblquad(f, 0, 1, lambda x: x, lambda x: 2-x, args=(1,)) + (0.33333333333333337, 5.551115123125783e-15) + >>> integrate.dblquad(f, 0, 1, lambda x: x, lambda x: 2-x, args=(3,)) + (0.9999999999999999, 1.6653345369377348e-14) + + Compute the two-dimensional Gaussian Integral, which is the integral of the + Gaussian function :math:`f(x,y) = e^{-(x^{2} + y^{2})}`, over + :math:`(-\\infty,+\\infty)`. That is, compute the integral + :math:`\\iint^{+\\infty}_{-\\infty} e^{-(x^{2} + y^{2})} \\,dy\\,dx`. + + >>> f = lambda x, y: np.exp(-(x ** 2 + y ** 2)) + >>> integrate.dblquad(f, -np.inf, np.inf, -np.inf, np.inf) + (3.141592653589777, 2.5173086737433208e-08) + + """ + + def temp_ranges(*args): + return [gfun(args[0]) if callable(gfun) else gfun, + hfun(args[0]) if callable(hfun) else hfun] + + return nquad(func, [temp_ranges, [a, b]], args=args, + opts={"epsabs": epsabs, "epsrel": epsrel}) + + +def tplquad(func, a, b, gfun, hfun, qfun, rfun, args=(), epsabs=1.49e-8, + epsrel=1.49e-8): + """ + Compute a triple (definite) integral. + + Return the triple integral of ``func(z, y, x)`` from ``x = a..b``, + ``y = gfun(x)..hfun(x)``, and ``z = qfun(x,y)..rfun(x,y)``. + + Parameters + ---------- + func : function + A Python function or method of at least three variables in the + order (z, y, x). + a, b : float + The limits of integration in x: `a` < `b` + gfun : function or float + The lower boundary curve in y which is a function taking a single + floating point argument (x) and returning a floating point result + or a float indicating a constant boundary curve. + hfun : function or float + The upper boundary curve in y (same requirements as `gfun`). + qfun : function or float + The lower boundary surface in z. It must be a function that takes + two floats in the order (x, y) and returns a float or a float + indicating a constant boundary surface. + rfun : function or float + The upper boundary surface in z. (Same requirements as `qfun`.) + args : tuple, optional + Extra arguments to pass to `func`. + epsabs : float, optional + Absolute tolerance passed directly to the innermost 1-D quadrature + integration. Default is 1.49e-8. + epsrel : float, optional + Relative tolerance of the innermost 1-D integrals. Default is 1.49e-8. + + Returns + ------- + y : float + The resultant integral. + abserr : float + An estimate of the error. + + See Also + -------- + quad : Adaptive quadrature using QUADPACK + fixed_quad : Fixed-order Gaussian quadrature + dblquad : Double integrals + nquad : N-dimensional integrals + romb : Integrators for sampled data + simpson : Integrators for sampled data + scipy.special : For coefficients and roots of orthogonal polynomials + + Notes + ----- + For valid results, the integral must converge; behavior for divergent + integrals is not guaranteed. + + **Details of QUADPACK level routines** + + `quad` calls routines from the FORTRAN library QUADPACK. This section + provides details on the conditions for each routine to be called and a + short description of each routine. For each level of integration, ``qagse`` + is used for finite limits or ``qagie`` is used, if either limit (or both!) + are infinite. The following provides a short description from [1]_ for each + routine. + + qagse + is an integrator based on globally adaptive interval + subdivision in connection with extrapolation, which will + eliminate the effects of integrand singularities of + several types. + qagie + handles integration over infinite intervals. The infinite range is + mapped onto a finite interval and subsequently the same strategy as + in ``QAGS`` is applied. + + References + ---------- + + .. [1] Piessens, Robert; de Doncker-Kapenga, Elise; + Überhuber, Christoph W.; Kahaner, David (1983). + QUADPACK: A subroutine package for automatic integration. + Springer-Verlag. + ISBN 978-3-540-12553-2. + + Examples + -------- + Compute the triple integral of ``x * y * z``, over ``x`` ranging + from 1 to 2, ``y`` ranging from 2 to 3, ``z`` ranging from 0 to 1. + That is, :math:`\\int^{x=2}_{x=1} \\int^{y=3}_{y=2} \\int^{z=1}_{z=0} x y z + \\,dz \\,dy \\,dx`. + + >>> import numpy as np + >>> from scipy import integrate + >>> f = lambda z, y, x: x*y*z + >>> integrate.tplquad(f, 1, 2, 2, 3, 0, 1) + (1.8749999999999998, 3.3246447942574074e-14) + + Calculate :math:`\\int^{x=1}_{x=0} \\int^{y=1-2x}_{y=0} + \\int^{z=1-x-2y}_{z=0} x y z \\,dz \\,dy \\,dx`. + Note: `qfun`/`rfun` takes arguments in the order (x, y), even though ``f`` + takes arguments in the order (z, y, x). + + >>> f = lambda z, y, x: x*y*z + >>> integrate.tplquad(f, 0, 1, 0, lambda x: 1-2*x, 0, lambda x, y: 1-x-2*y) + (0.05416666666666668, 2.1774196738157757e-14) + + Calculate :math:`\\int^{x=1}_{x=0} \\int^{y=1}_{y=0} \\int^{z=1}_{z=0} + a x y z \\,dz \\,dy \\,dx` for :math:`a=1, 3`. + + >>> f = lambda z, y, x, a: a*x*y*z + >>> integrate.tplquad(f, 0, 1, 0, 1, 0, 1, args=(1,)) + (0.125, 5.527033708952211e-15) + >>> integrate.tplquad(f, 0, 1, 0, 1, 0, 1, args=(3,)) + (0.375, 1.6581101126856635e-14) + + Compute the three-dimensional Gaussian Integral, which is the integral of + the Gaussian function :math:`f(x,y,z) = e^{-(x^{2} + y^{2} + z^{2})}`, over + :math:`(-\\infty,+\\infty)`. That is, compute the integral + :math:`\\iiint^{+\\infty}_{-\\infty} e^{-(x^{2} + y^{2} + z^{2})} \\,dz + \\,dy\\,dx`. + + >>> f = lambda x, y, z: np.exp(-(x ** 2 + y ** 2 + z ** 2)) + >>> integrate.tplquad(f, -np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf) + (5.568327996830833, 4.4619078828029765e-08) + + """ + # f(z, y, x) + # qfun/rfun(x, y) + # gfun/hfun(x) + # nquad will hand (y, x, t0, ...) to ranges0 + # nquad will hand (x, t0, ...) to ranges1 + # Only qfun / rfun is different API... + + def ranges0(*args): + return [qfun(args[1], args[0]) if callable(qfun) else qfun, + rfun(args[1], args[0]) if callable(rfun) else rfun] + + def ranges1(*args): + return [gfun(args[0]) if callable(gfun) else gfun, + hfun(args[0]) if callable(hfun) else hfun] + + ranges = [ranges0, ranges1, [a, b]] + return nquad(func, ranges, args=args, + opts={"epsabs": epsabs, "epsrel": epsrel}) + + +def nquad(func, ranges, args=None, opts=None, full_output=False): + r""" + Integration over multiple variables. + + Wraps `quad` to enable integration over multiple variables. + Various options allow improved integration of discontinuous functions, as + well as the use of weighted integration, and generally finer control of the + integration process. + + Parameters + ---------- + func : {callable, scipy.LowLevelCallable} + The function to be integrated. Has arguments of ``x0, ... xn``, + ``t0, ... tm``, where integration is carried out over ``x0, ... xn``, + which must be floats. Where ``t0, ... tm`` are extra arguments + passed in args. + Function signature should be ``func(x0, x1, ..., xn, t0, t1, ..., tm)``. + Integration is carried out in order. That is, integration over ``x0`` + is the innermost integral, and ``xn`` is the outermost. + + If the user desires improved integration performance, then `f` may + be a `scipy.LowLevelCallable` with one of the signatures:: + + double func(int n, double *xx) + double func(int n, double *xx, void *user_data) + + where ``n`` is the number of variables and args. The ``xx`` array + contains the coordinates and extra arguments. ``user_data`` is the data + contained in the `scipy.LowLevelCallable`. + ranges : iterable object + Each element of ranges may be either a sequence of 2 numbers, or else + a callable that returns such a sequence. ``ranges[0]`` corresponds to + integration over x0, and so on. If an element of ranges is a callable, + then it will be called with all of the integration arguments available, + as well as any parametric arguments. e.g., if + ``func = f(x0, x1, x2, t0, t1)``, then ``ranges[0]`` may be defined as + either ``(a, b)`` or else as ``(a, b) = range0(x1, x2, t0, t1)``. + args : iterable object, optional + Additional arguments ``t0, ... tn``, required by ``func``, ``ranges``, + and ``opts``. + opts : iterable object or dict, optional + Options to be passed to `quad`. May be empty, a dict, or + a sequence of dicts or functions that return a dict. If empty, the + default options from scipy.integrate.quad are used. If a dict, the same + options are used for all levels of integraion. If a sequence, then each + element of the sequence corresponds to a particular integration. e.g., + ``opts[0]`` corresponds to integration over ``x0``, and so on. If a + callable, the signature must be the same as for ``ranges``. The + available options together with their default values are: + + - epsabs = 1.49e-08 + - epsrel = 1.49e-08 + - limit = 50 + - points = None + - weight = None + - wvar = None + - wopts = None + + For more information on these options, see `quad`. + + full_output : bool, optional + Partial implementation of ``full_output`` from scipy.integrate.quad. + The number of integrand function evaluations ``neval`` can be obtained + by setting ``full_output=True`` when calling nquad. + + Returns + ------- + result : float + The result of the integration. + abserr : float + The maximum of the estimates of the absolute error in the various + integration results. + out_dict : dict, optional + A dict containing additional information on the integration. + + See Also + -------- + quad : 1-D numerical integration + dblquad, tplquad : double and triple integrals + fixed_quad : fixed-order Gaussian quadrature + + Notes + ----- + For valid results, the integral must converge; behavior for divergent + integrals is not guaranteed. + + **Details of QUADPACK level routines** + + `nquad` calls routines from the FORTRAN library QUADPACK. This section + provides details on the conditions for each routine to be called and a + short description of each routine. The routine called depends on + `weight`, `points` and the integration limits `a` and `b`. + + ================ ============== ========== ===================== + QUADPACK routine `weight` `points` infinite bounds + ================ ============== ========== ===================== + qagse None No No + qagie None No Yes + qagpe None Yes No + qawoe 'sin', 'cos' No No + qawfe 'sin', 'cos' No either `a` or `b` + qawse 'alg*' No No + qawce 'cauchy' No No + ================ ============== ========== ===================== + + The following provides a short description from [1]_ for each + routine. + + qagse + is an integrator based on globally adaptive interval + subdivision in connection with extrapolation, which will + eliminate the effects of integrand singularities of + several types. + qagie + handles integration over infinite intervals. The infinite range is + mapped onto a finite interval and subsequently the same strategy as + in ``QAGS`` is applied. + qagpe + serves the same purposes as QAGS, but also allows the + user to provide explicit information about the location + and type of trouble-spots i.e. the abscissae of internal + singularities, discontinuities and other difficulties of + the integrand function. + qawoe + is an integrator for the evaluation of + :math:`\int^b_a \cos(\omega x)f(x)dx` or + :math:`\int^b_a \sin(\omega x)f(x)dx` + over a finite interval [a,b], where :math:`\omega` and :math:`f` + are specified by the user. The rule evaluation component is based + on the modified Clenshaw-Curtis technique + + An adaptive subdivision scheme is used in connection + with an extrapolation procedure, which is a modification + of that in ``QAGS`` and allows the algorithm to deal with + singularities in :math:`f(x)`. + qawfe + calculates the Fourier transform + :math:`\int^\infty_a \cos(\omega x)f(x)dx` or + :math:`\int^\infty_a \sin(\omega x)f(x)dx` + for user-provided :math:`\omega` and :math:`f`. The procedure of + ``QAWO`` is applied on successive finite intervals, and convergence + acceleration by means of the :math:`\varepsilon`-algorithm is applied + to the series of integral approximations. + qawse + approximate :math:`\int^b_a w(x)f(x)dx`, with :math:`a < b` where + :math:`w(x) = (x-a)^{\alpha}(b-x)^{\beta}v(x)` with + :math:`\alpha,\beta > -1`, where :math:`v(x)` may be one of the + following functions: :math:`1`, :math:`\log(x-a)`, :math:`\log(b-x)`, + :math:`\log(x-a)\log(b-x)`. + + The user specifies :math:`\alpha`, :math:`\beta` and the type of the + function :math:`v`. A globally adaptive subdivision strategy is + applied, with modified Clenshaw-Curtis integration on those + subintervals which contain `a` or `b`. + qawce + compute :math:`\int^b_a f(x) / (x-c)dx` where the integral must be + interpreted as a Cauchy principal value integral, for user specified + :math:`c` and :math:`f`. The strategy is globally adaptive. Modified + Clenshaw-Curtis integration is used on those intervals containing the + point :math:`x = c`. + + References + ---------- + + .. [1] Piessens, Robert; de Doncker-Kapenga, Elise; + Überhuber, Christoph W.; Kahaner, David (1983). + QUADPACK: A subroutine package for automatic integration. + Springer-Verlag. + ISBN 978-3-540-12553-2. + + Examples + -------- + Calculate + + .. math:: + + \int^{1}_{-0.15} \int^{0.8}_{0.13} \int^{1}_{-1} \int^{1}_{0} + f(x_0, x_1, x_2, x_3) \,dx_0 \,dx_1 \,dx_2 \,dx_3 , + + where + + .. math:: + + f(x_0, x_1, x_2, x_3) = \begin{cases} + x_0^2+x_1 x_2-x_3^3+ \sin{x_0}+1 & (x_0-0.2 x_3-0.5-0.25 x_1 > 0) \\ + x_0^2+x_1 x_2-x_3^3+ \sin{x_0}+0 & (x_0-0.2 x_3-0.5-0.25 x_1 \leq 0) + \end{cases} . + + >>> import numpy as np + >>> from scipy import integrate + >>> func = lambda x0,x1,x2,x3 : x0**2 + x1*x2 - x3**3 + np.sin(x0) + ( + ... 1 if (x0-.2*x3-.5-.25*x1>0) else 0) + >>> def opts0(*args, **kwargs): + ... return {'points':[0.2*args[2] + 0.5 + 0.25*args[0]]} + >>> integrate.nquad(func, [[0,1], [-1,1], [.13,.8], [-.15,1]], + ... opts=[opts0,{},{},{}], full_output=True) + (1.5267454070738633, 2.9437360001402324e-14, {'neval': 388962}) + + Calculate + + .. math:: + + \int^{t_0+t_1+1}_{t_0+t_1-1} + \int^{x_2+t_0^2 t_1^3+1}_{x_2+t_0^2 t_1^3-1} + \int^{t_0 x_1+t_1 x_2+1}_{t_0 x_1+t_1 x_2-1} + f(x_0,x_1, x_2,t_0,t_1) + \,dx_0 \,dx_1 \,dx_2, + + where + + .. math:: + + f(x_0, x_1, x_2, t_0, t_1) = \begin{cases} + x_0 x_2^2 + \sin{x_1}+2 & (x_0+t_1 x_1-t_0 > 0) \\ + x_0 x_2^2 +\sin{x_1}+1 & (x_0+t_1 x_1-t_0 \leq 0) + \end{cases} + + and :math:`(t_0, t_1) = (0, 1)` . + + >>> def func2(x0, x1, x2, t0, t1): + ... return x0*x2**2 + np.sin(x1) + 1 + (1 if x0+t1*x1-t0>0 else 0) + >>> def lim0(x1, x2, t0, t1): + ... return [t0*x1 + t1*x2 - 1, t0*x1 + t1*x2 + 1] + >>> def lim1(x2, t0, t1): + ... return [x2 + t0**2*t1**3 - 1, x2 + t0**2*t1**3 + 1] + >>> def lim2(t0, t1): + ... return [t0 + t1 - 1, t0 + t1 + 1] + >>> def opts0(x1, x2, t0, t1): + ... return {'points' : [t0 - t1*x1]} + >>> def opts1(x2, t0, t1): + ... return {} + >>> def opts2(t0, t1): + ... return {} + >>> integrate.nquad(func2, [lim0, lim1, lim2], args=(0,1), + ... opts=[opts0, opts1, opts2]) + (36.099919226771625, 1.8546948553373528e-07) + + """ + depth = len(ranges) + ranges = [rng if callable(rng) else _RangeFunc(rng) for rng in ranges] + if args is None: + args = () + if opts is None: + opts = [dict([])] * depth + + if isinstance(opts, dict): + opts = [_OptFunc(opts)] * depth + else: + opts = [opt if callable(opt) else _OptFunc(opt) for opt in opts] + return _NQuad(func, ranges, opts, full_output).integrate(*args) + + +class _RangeFunc: + def __init__(self, range_): + self.range_ = range_ + + def __call__(self, *args): + """Return stored value. + + *args needed because range_ can be float or func, and is called with + variable number of parameters. + """ + return self.range_ + + +class _OptFunc: + def __init__(self, opt): + self.opt = opt + + def __call__(self, *args): + """Return stored dict.""" + return self.opt + + +class _NQuad: + def __init__(self, func, ranges, opts, full_output): + self.abserr = 0 + self.func = func + self.ranges = ranges + self.opts = opts + self.maxdepth = len(ranges) + self.full_output = full_output + if self.full_output: + self.out_dict = {'neval': 0} + + def integrate(self, *args, **kwargs): + depth = kwargs.pop('depth', 0) + if kwargs: + raise ValueError('unexpected kwargs') + + # Get the integration range and options for this depth. + ind = -(depth + 1) + fn_range = self.ranges[ind] + low, high = fn_range(*args) + fn_opt = self.opts[ind] + opt = dict(fn_opt(*args)) + + if 'points' in opt: + opt['points'] = [x for x in opt['points'] if low <= x <= high] + if depth + 1 == self.maxdepth: + f = self.func + else: + f = partial(self.integrate, depth=depth+1) + quad_r = quad(f, low, high, args=args, full_output=self.full_output, + **opt) + value = quad_r[0] + abserr = quad_r[1] + if self.full_output: + infodict = quad_r[2] + # The 'neval' parameter in full_output returns the total + # number of times the integrand function was evaluated. + # Therefore, only the innermost integration loop counts. + if depth + 1 == self.maxdepth: + self.out_dict['neval'] += infodict['neval'] + self.abserr = max(self.abserr, abserr) + if depth > 0: + return value + else: + # Final result of N-D integration with error + if self.full_output: + return value, self.abserr, self.out_dict + else: + return value, self.abserr diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_quadrature.py b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_quadrature.py new file mode 100644 index 0000000000000000000000000000000000000000..44cf10b32335014cd7cb26459c8dc89ac8f851ff --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_quadrature.py @@ -0,0 +1,1336 @@ +import numpy as np +import numpy.typing as npt +import math +import warnings +from collections import namedtuple +from collections.abc import Callable + +from scipy.special import roots_legendre +from scipy.special import gammaln, logsumexp +from scipy._lib._util import _rng_spawn +from scipy._lib._array_api import _asarray, array_namespace, xp_broadcast_promote + + +__all__ = ['fixed_quad', 'romb', + 'trapezoid', 'simpson', + 'cumulative_trapezoid', 'newton_cotes', + 'qmc_quad', 'cumulative_simpson'] + + +def trapezoid(y, x=None, dx=1.0, axis=-1): + r""" + Integrate along the given axis using the composite trapezoidal rule. + + If `x` is provided, the integration happens in sequence along its + elements - they are not sorted. + + Integrate `y` (`x`) along each 1d slice on the given axis, compute + :math:`\int y(x) dx`. + When `x` is specified, this integrates along the parametric curve, + computing :math:`\int_t y(t) dt = + \int_t y(t) \left.\frac{dx}{dt}\right|_{x=x(t)} dt`. + + Parameters + ---------- + y : array_like + Input array to integrate. + x : array_like, optional + The sample points corresponding to the `y` values. If `x` is None, + the sample points are assumed to be evenly spaced `dx` apart. The + default is None. + dx : scalar, optional + The spacing between sample points when `x` is None. The default is 1. + axis : int, optional + The axis along which to integrate. The default is the last axis. + + Returns + ------- + trapezoid : float or ndarray + Definite integral of `y` = n-dimensional array as approximated along + a single axis by the trapezoidal rule. If `y` is a 1-dimensional array, + then the result is a float. If `n` is greater than 1, then the result + is an `n`-1 dimensional array. + + See Also + -------- + cumulative_trapezoid, simpson, romb + + Notes + ----- + Image [2]_ illustrates trapezoidal rule -- y-axis locations of points + will be taken from `y` array, by default x-axis distances between + points will be 1.0, alternatively they can be provided with `x` array + or with `dx` scalar. Return value will be equal to combined area under + the red lines. + + References + ---------- + .. [1] Wikipedia page: https://en.wikipedia.org/wiki/Trapezoidal_rule + + .. [2] Illustration image: + https://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png + + Examples + -------- + Use the trapezoidal rule on evenly spaced points: + + >>> import numpy as np + >>> from scipy import integrate + >>> integrate.trapezoid([1, 2, 3]) + 4.0 + + The spacing between sample points can be selected by either the + ``x`` or ``dx`` arguments: + + >>> integrate.trapezoid([1, 2, 3], x=[4, 6, 8]) + 8.0 + >>> integrate.trapezoid([1, 2, 3], dx=2) + 8.0 + + Using a decreasing ``x`` corresponds to integrating in reverse: + + >>> integrate.trapezoid([1, 2, 3], x=[8, 6, 4]) + -8.0 + + More generally ``x`` is used to integrate along a parametric curve. We can + estimate the integral :math:`\int_0^1 x^2 = 1/3` using: + + >>> x = np.linspace(0, 1, num=50) + >>> y = x**2 + >>> integrate.trapezoid(y, x) + 0.33340274885464394 + + Or estimate the area of a circle, noting we repeat the sample which closes + the curve: + + >>> theta = np.linspace(0, 2 * np.pi, num=1000, endpoint=True) + >>> integrate.trapezoid(np.cos(theta), x=np.sin(theta)) + 3.141571941375841 + + ``trapezoid`` can be applied along a specified axis to do multiple + computations in one call: + + >>> a = np.arange(6).reshape(2, 3) + >>> a + array([[0, 1, 2], + [3, 4, 5]]) + >>> integrate.trapezoid(a, axis=0) + array([1.5, 2.5, 3.5]) + >>> integrate.trapezoid(a, axis=1) + array([2., 8.]) + """ + xp = array_namespace(y) + y = _asarray(y, xp=xp, subok=True) + # Cannot just use the broadcasted arrays that are returned + # because trapezoid does not follow normal broadcasting rules + # cf. https://github.com/scipy/scipy/pull/21524#issuecomment-2354105942 + result_dtype = xp_broadcast_promote(y, force_floating=True, xp=xp)[0].dtype + nd = y.ndim + slice1 = [slice(None)]*nd + slice2 = [slice(None)]*nd + slice1[axis] = slice(1, None) + slice2[axis] = slice(None, -1) + if x is None: + d = dx + else: + x = _asarray(x, xp=xp, subok=True) + if x.ndim == 1: + d = x[1:] - x[:-1] + # make d broadcastable to y + slice3 = [None] * nd + slice3[axis] = slice(None) + d = d[tuple(slice3)] + else: + # if x is n-D it should be broadcastable to y + x = xp.broadcast_to(x, y.shape) + d = x[tuple(slice1)] - x[tuple(slice2)] + try: + ret = xp.sum( + d * (y[tuple(slice1)] + y[tuple(slice2)]) / 2.0, + axis=axis, dtype=result_dtype + ) + except ValueError: + # Operations didn't work, cast to ndarray + d = xp.asarray(d) + y = xp.asarray(y) + ret = xp.sum( + d * (y[tuple(slice1)] + y[tuple(slice2)]) / 2.0, + axis=axis, dtype=result_dtype + ) + return ret + + +def _cached_roots_legendre(n): + """ + Cache roots_legendre results to speed up calls of the fixed_quad + function. + """ + if n in _cached_roots_legendre.cache: + return _cached_roots_legendre.cache[n] + + _cached_roots_legendre.cache[n] = roots_legendre(n) + return _cached_roots_legendre.cache[n] + + +_cached_roots_legendre.cache = dict() + + +def fixed_quad(func, a, b, args=(), n=5): + """ + Compute a definite integral using fixed-order Gaussian quadrature. + + Integrate `func` from `a` to `b` using Gaussian quadrature of + order `n`. + + Parameters + ---------- + func : callable + A Python function or method to integrate (must accept vector inputs). + If integrating a vector-valued function, the returned array must have + shape ``(..., len(x))``. + a : float + Lower limit of integration. + b : float + Upper limit of integration. + args : tuple, optional + Extra arguments to pass to function, if any. + n : int, optional + Order of quadrature integration. Default is 5. + + Returns + ------- + val : float + Gaussian quadrature approximation to the integral + none : None + Statically returned value of None + + See Also + -------- + quad : adaptive quadrature using QUADPACK + dblquad : double integrals + tplquad : triple integrals + romb : integrators for sampled data + simpson : integrators for sampled data + cumulative_trapezoid : cumulative integration for sampled data + + Examples + -------- + >>> from scipy import integrate + >>> import numpy as np + >>> f = lambda x: x**8 + >>> integrate.fixed_quad(f, 0.0, 1.0, n=4) + (0.1110884353741496, None) + >>> integrate.fixed_quad(f, 0.0, 1.0, n=5) + (0.11111111111111102, None) + >>> print(1/9.0) # analytical result + 0.1111111111111111 + + >>> integrate.fixed_quad(np.cos, 0.0, np.pi/2, n=4) + (0.9999999771971152, None) + >>> integrate.fixed_quad(np.cos, 0.0, np.pi/2, n=5) + (1.000000000039565, None) + >>> np.sin(np.pi/2)-np.sin(0) # analytical result + 1.0 + + """ + x, w = _cached_roots_legendre(n) + x = np.real(x) + if np.isinf(a) or np.isinf(b): + raise ValueError("Gaussian quadrature is only available for " + "finite limits.") + y = (b-a)*(x+1)/2.0 + a + return (b-a)/2.0 * np.sum(w*func(y, *args), axis=-1), None + + +def tupleset(t, i, value): + l = list(t) + l[i] = value + return tuple(l) + + +def cumulative_trapezoid(y, x=None, dx=1.0, axis=-1, initial=None): + """ + Cumulatively integrate y(x) using the composite trapezoidal rule. + + Parameters + ---------- + y : array_like + Values to integrate. + x : array_like, optional + The coordinate to integrate along. If None (default), use spacing `dx` + between consecutive elements in `y`. + dx : float, optional + Spacing between elements of `y`. Only used if `x` is None. + axis : int, optional + Specifies the axis to cumulate. Default is -1 (last axis). + initial : scalar, optional + If given, insert this value at the beginning of the returned result. + 0 or None are the only values accepted. Default is None, which means + `res` has one element less than `y` along the axis of integration. + + Returns + ------- + res : ndarray + The result of cumulative integration of `y` along `axis`. + If `initial` is None, the shape is such that the axis of integration + has one less value than `y`. If `initial` is given, the shape is equal + to that of `y`. + + See Also + -------- + numpy.cumsum, numpy.cumprod + cumulative_simpson : cumulative integration using Simpson's 1/3 rule + quad : adaptive quadrature using QUADPACK + fixed_quad : fixed-order Gaussian quadrature + dblquad : double integrals + tplquad : triple integrals + romb : integrators for sampled data + + Examples + -------- + >>> from scipy import integrate + >>> import numpy as np + >>> import matplotlib.pyplot as plt + + >>> x = np.linspace(-2, 2, num=20) + >>> y = x + >>> y_int = integrate.cumulative_trapezoid(y, x, initial=0) + >>> plt.plot(x, y_int, 'ro', x, y[0] + 0.5 * x**2, 'b-') + >>> plt.show() + + """ + y = np.asarray(y) + if y.shape[axis] == 0: + raise ValueError("At least one point is required along `axis`.") + if x is None: + d = dx + else: + x = np.asarray(x) + if x.ndim == 1: + d = np.diff(x) + # reshape to correct shape + shape = [1] * y.ndim + shape[axis] = -1 + d = d.reshape(shape) + elif len(x.shape) != len(y.shape): + raise ValueError("If given, shape of x must be 1-D or the " + "same as y.") + else: + d = np.diff(x, axis=axis) + + if d.shape[axis] != y.shape[axis] - 1: + raise ValueError("If given, length of x along axis must be the " + "same as y.") + + nd = len(y.shape) + slice1 = tupleset((slice(None),)*nd, axis, slice(1, None)) + slice2 = tupleset((slice(None),)*nd, axis, slice(None, -1)) + res = np.cumsum(d * (y[slice1] + y[slice2]) / 2.0, axis=axis) + + if initial is not None: + if initial != 0: + raise ValueError("`initial` must be `None` or `0`.") + if not np.isscalar(initial): + raise ValueError("`initial` parameter should be a scalar.") + + shape = list(res.shape) + shape[axis] = 1 + res = np.concatenate([np.full(shape, initial, dtype=res.dtype), res], + axis=axis) + + return res + + +def _basic_simpson(y, start, stop, x, dx, axis): + nd = len(y.shape) + if start is None: + start = 0 + step = 2 + slice_all = (slice(None),)*nd + slice0 = tupleset(slice_all, axis, slice(start, stop, step)) + slice1 = tupleset(slice_all, axis, slice(start+1, stop+1, step)) + slice2 = tupleset(slice_all, axis, slice(start+2, stop+2, step)) + + if x is None: # Even-spaced Simpson's rule. + result = np.sum(y[slice0] + 4.0*y[slice1] + y[slice2], axis=axis) + result *= dx / 3.0 + else: + # Account for possibly different spacings. + # Simpson's rule changes a bit. + h = np.diff(x, axis=axis) + sl0 = tupleset(slice_all, axis, slice(start, stop, step)) + sl1 = tupleset(slice_all, axis, slice(start+1, stop+1, step)) + h0 = h[sl0].astype(float, copy=False) + h1 = h[sl1].astype(float, copy=False) + hsum = h0 + h1 + hprod = h0 * h1 + h0divh1 = np.true_divide(h0, h1, out=np.zeros_like(h0), where=h1 != 0) + tmp = hsum/6.0 * (y[slice0] * + (2.0 - np.true_divide(1.0, h0divh1, + out=np.zeros_like(h0divh1), + where=h0divh1 != 0)) + + y[slice1] * (hsum * + np.true_divide(hsum, hprod, + out=np.zeros_like(hsum), + where=hprod != 0)) + + y[slice2] * (2.0 - h0divh1)) + result = np.sum(tmp, axis=axis) + return result + + +def simpson(y, x=None, *, dx=1.0, axis=-1): + """ + Integrate y(x) using samples along the given axis and the composite + Simpson's rule. If x is None, spacing of dx is assumed. + + Parameters + ---------- + y : array_like + Array to be integrated. + x : array_like, optional + If given, the points at which `y` is sampled. + dx : float, optional + Spacing of integration points along axis of `x`. Only used when + `x` is None. Default is 1. + axis : int, optional + Axis along which to integrate. Default is the last axis. + + Returns + ------- + float + The estimated integral computed with the composite Simpson's rule. + + See Also + -------- + quad : adaptive quadrature using QUADPACK + fixed_quad : fixed-order Gaussian quadrature + dblquad : double integrals + tplquad : triple integrals + romb : integrators for sampled data + cumulative_trapezoid : cumulative integration for sampled data + cumulative_simpson : cumulative integration using Simpson's 1/3 rule + + Notes + ----- + For an odd number of samples that are equally spaced the result is + exact if the function is a polynomial of order 3 or less. If + the samples are not equally spaced, then the result is exact only + if the function is a polynomial of order 2 or less. + + References + ---------- + .. [1] Cartwright, Kenneth V. Simpson's Rule Cumulative Integration with + MS Excel and Irregularly-spaced Data. Journal of Mathematical + Sciences and Mathematics Education. 12 (2): 1-9 + + Examples + -------- + >>> from scipy import integrate + >>> import numpy as np + >>> x = np.arange(0, 10) + >>> y = np.arange(0, 10) + + >>> integrate.simpson(y, x=x) + 40.5 + + >>> y = np.power(x, 3) + >>> integrate.simpson(y, x=x) + 1640.5 + >>> integrate.quad(lambda x: x**3, 0, 9)[0] + 1640.25 + + """ + y = np.asarray(y) + nd = len(y.shape) + N = y.shape[axis] + last_dx = dx + returnshape = 0 + if x is not None: + x = np.asarray(x) + if len(x.shape) == 1: + shapex = [1] * nd + shapex[axis] = x.shape[0] + saveshape = x.shape + returnshape = 1 + x = x.reshape(tuple(shapex)) + elif len(x.shape) != len(y.shape): + raise ValueError("If given, shape of x must be 1-D or the " + "same as y.") + if x.shape[axis] != N: + raise ValueError("If given, length of x along axis must be the " + "same as y.") + + if N % 2 == 0: + val = 0.0 + result = 0.0 + slice_all = (slice(None),) * nd + + if N == 2: + # need at least 3 points in integration axis to form parabolic + # segment. If there are two points then any of 'avg', 'first', + # 'last' should give the same result. + slice1 = tupleset(slice_all, axis, -1) + slice2 = tupleset(slice_all, axis, -2) + if x is not None: + last_dx = x[slice1] - x[slice2] + val += 0.5 * last_dx * (y[slice1] + y[slice2]) + else: + # use Simpson's rule on first intervals + result = _basic_simpson(y, 0, N-3, x, dx, axis) + + slice1 = tupleset(slice_all, axis, -1) + slice2 = tupleset(slice_all, axis, -2) + slice3 = tupleset(slice_all, axis, -3) + + h = np.asarray([dx, dx], dtype=np.float64) + if x is not None: + # grab the last two spacings from the appropriate axis + hm2 = tupleset(slice_all, axis, slice(-2, -1, 1)) + hm1 = tupleset(slice_all, axis, slice(-1, None, 1)) + + diffs = np.float64(np.diff(x, axis=axis)) + h = [np.squeeze(diffs[hm2], axis=axis), + np.squeeze(diffs[hm1], axis=axis)] + + # This is the correction for the last interval according to + # Cartwright. + # However, I used the equations given at + # https://en.wikipedia.org/wiki/Simpson%27s_rule#Composite_Simpson's_rule_for_irregularly_spaced_data + # A footnote on Wikipedia says: + # Cartwright 2017, Equation 8. The equation in Cartwright is + # calculating the first interval whereas the equations in the + # Wikipedia article are adjusting for the last integral. If the + # proper algebraic substitutions are made, the equation results in + # the values shown. + num = 2 * h[1] ** 2 + 3 * h[0] * h[1] + den = 6 * (h[1] + h[0]) + alpha = np.true_divide( + num, + den, + out=np.zeros_like(den), + where=den != 0 + ) + + num = h[1] ** 2 + 3.0 * h[0] * h[1] + den = 6 * h[0] + beta = np.true_divide( + num, + den, + out=np.zeros_like(den), + where=den != 0 + ) + + num = 1 * h[1] ** 3 + den = 6 * h[0] * (h[0] + h[1]) + eta = np.true_divide( + num, + den, + out=np.zeros_like(den), + where=den != 0 + ) + + result += alpha*y[slice1] + beta*y[slice2] - eta*y[slice3] + + result += val + else: + result = _basic_simpson(y, 0, N-2, x, dx, axis) + if returnshape: + x = x.reshape(saveshape) + return result + + +def _cumulatively_sum_simpson_integrals( + y: np.ndarray, + dx: np.ndarray, + integration_func: Callable[[np.ndarray, np.ndarray], np.ndarray], +) -> np.ndarray: + """Calculate cumulative sum of Simpson integrals. + Takes as input the integration function to be used. + The integration_func is assumed to return the cumulative sum using + composite Simpson's rule. Assumes the axis of summation is -1. + """ + sub_integrals_h1 = integration_func(y, dx) + sub_integrals_h2 = integration_func(y[..., ::-1], dx[..., ::-1])[..., ::-1] + + shape = list(sub_integrals_h1.shape) + shape[-1] += 1 + sub_integrals = np.empty(shape) + sub_integrals[..., :-1:2] = sub_integrals_h1[..., ::2] + sub_integrals[..., 1::2] = sub_integrals_h2[..., ::2] + # Integral over last subinterval can only be calculated from + # formula for h2 + sub_integrals[..., -1] = sub_integrals_h2[..., -1] + res = np.cumsum(sub_integrals, axis=-1) + return res + + +def _cumulative_simpson_equal_intervals(y: np.ndarray, dx: np.ndarray) -> np.ndarray: + """Calculate the Simpson integrals for all h1 intervals assuming equal interval + widths. The function can also be used to calculate the integral for all + h2 intervals by reversing the inputs, `y` and `dx`. + """ + d = dx[..., :-1] + f1 = y[..., :-2] + f2 = y[..., 1:-1] + f3 = y[..., 2:] + + # Calculate integral over the subintervals (eqn (10) of Reference [2]) + return d / 3 * (5 * f1 / 4 + 2 * f2 - f3 / 4) + + +def _cumulative_simpson_unequal_intervals(y: np.ndarray, dx: np.ndarray) -> np.ndarray: + """Calculate the Simpson integrals for all h1 intervals assuming unequal interval + widths. The function can also be used to calculate the integral for all + h2 intervals by reversing the inputs, `y` and `dx`. + """ + x21 = dx[..., :-1] + x32 = dx[..., 1:] + f1 = y[..., :-2] + f2 = y[..., 1:-1] + f3 = y[..., 2:] + + x31 = x21 + x32 + x21_x31 = x21/x31 + x21_x32 = x21/x32 + x21x21_x31x32 = x21_x31 * x21_x32 + + # Calculate integral over the subintervals (eqn (8) of Reference [2]) + coeff1 = 3 - x21_x31 + coeff2 = 3 + x21x21_x31x32 + x21_x31 + coeff3 = -x21x21_x31x32 + + return x21/6 * (coeff1*f1 + coeff2*f2 + coeff3*f3) + + +def _ensure_float_array(arr: npt.ArrayLike) -> np.ndarray: + arr = np.asarray(arr) + if np.issubdtype(arr.dtype, np.integer): + arr = arr.astype(float, copy=False) + return arr + + +def cumulative_simpson(y, *, x=None, dx=1.0, axis=-1, initial=None): + r""" + Cumulatively integrate y(x) using the composite Simpson's 1/3 rule. + The integral of the samples at every point is calculated by assuming a + quadratic relationship between each point and the two adjacent points. + + Parameters + ---------- + y : array_like + Values to integrate. Requires at least one point along `axis`. If two or fewer + points are provided along `axis`, Simpson's integration is not possible and the + result is calculated with `cumulative_trapezoid`. + x : array_like, optional + The coordinate to integrate along. Must have the same shape as `y` or + must be 1D with the same length as `y` along `axis`. `x` must also be + strictly increasing along `axis`. + If `x` is None (default), integration is performed using spacing `dx` + between consecutive elements in `y`. + dx : scalar or array_like, optional + Spacing between elements of `y`. Only used if `x` is None. Can either + be a float, or an array with the same shape as `y`, but of length one along + `axis`. Default is 1.0. + axis : int, optional + Specifies the axis to integrate along. Default is -1 (last axis). + initial : scalar or array_like, optional + If given, insert this value at the beginning of the returned result, + and add it to the rest of the result. Default is None, which means no + value at ``x[0]`` is returned and `res` has one element less than `y` + along the axis of integration. Can either be a float, or an array with + the same shape as `y`, but of length one along `axis`. + + Returns + ------- + res : ndarray + The result of cumulative integration of `y` along `axis`. + If `initial` is None, the shape is such that the axis of integration + has one less value than `y`. If `initial` is given, the shape is equal + to that of `y`. + + See Also + -------- + numpy.cumsum + cumulative_trapezoid : cumulative integration using the composite + trapezoidal rule + simpson : integrator for sampled data using the Composite Simpson's Rule + + Notes + ----- + + .. versionadded:: 1.12.0 + + The composite Simpson's 1/3 method can be used to approximate the definite + integral of a sampled input function :math:`y(x)` [1]_. The method assumes + a quadratic relationship over the interval containing any three consecutive + sampled points. + + Consider three consecutive points: + :math:`(x_1, y_1), (x_2, y_2), (x_3, y_3)`. + + Assuming a quadratic relationship over the three points, the integral over + the subinterval between :math:`x_1` and :math:`x_2` is given by formula + (8) of [2]_: + + .. math:: + \int_{x_1}^{x_2} y(x) dx\ &= \frac{x_2-x_1}{6}\left[\ + \left\{3-\frac{x_2-x_1}{x_3-x_1}\right\} y_1 + \ + \left\{3 + \frac{(x_2-x_1)^2}{(x_3-x_2)(x_3-x_1)} + \ + \frac{x_2-x_1}{x_3-x_1}\right\} y_2\\ + - \frac{(x_2-x_1)^2}{(x_3-x_2)(x_3-x_1)} y_3\right] + + The integral between :math:`x_2` and :math:`x_3` is given by swapping + appearances of :math:`x_1` and :math:`x_3`. The integral is estimated + separately for each subinterval and then cumulatively summed to obtain + the final result. + + For samples that are equally spaced, the result is exact if the function + is a polynomial of order three or less [1]_ and the number of subintervals + is even. Otherwise, the integral is exact for polynomials of order two or + less. + + References + ---------- + .. [1] Wikipedia page: https://en.wikipedia.org/wiki/Simpson's_rule + .. [2] Cartwright, Kenneth V. Simpson's Rule Cumulative Integration with + MS Excel and Irregularly-spaced Data. Journal of Mathematical + Sciences and Mathematics Education. 12 (2): 1-9 + + Examples + -------- + >>> from scipy import integrate + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> x = np.linspace(-2, 2, num=20) + >>> y = x**2 + >>> y_int = integrate.cumulative_simpson(y, x=x, initial=0) + >>> fig, ax = plt.subplots() + >>> ax.plot(x, y_int, 'ro', x, x**3/3 - (x[0])**3/3, 'b-') + >>> ax.grid() + >>> plt.show() + + The output of `cumulative_simpson` is similar to that of iteratively + calling `simpson` with successively higher upper limits of integration, but + not identical. + + >>> def cumulative_simpson_reference(y, x): + ... return np.asarray([integrate.simpson(y[:i], x=x[:i]) + ... for i in range(2, len(y) + 1)]) + >>> + >>> rng = np.random.default_rng(354673834679465) + >>> x, y = rng.random(size=(2, 10)) + >>> x.sort() + >>> + >>> res = integrate.cumulative_simpson(y, x=x) + >>> ref = cumulative_simpson_reference(y, x) + >>> equal = np.abs(res - ref) < 1e-15 + >>> equal # not equal when `simpson` has even number of subintervals + array([False, True, False, True, False, True, False, True, True]) + + This is expected: because `cumulative_simpson` has access to more + information than `simpson`, it can typically produce more accurate + estimates of the underlying integral over subintervals. + + """ + y = _ensure_float_array(y) + + # validate `axis` and standardize to work along the last axis + original_y = y + original_shape = y.shape + try: + y = np.swapaxes(y, axis, -1) + except IndexError as e: + message = f"`axis={axis}` is not valid for `y` with `y.ndim={y.ndim}`." + raise ValueError(message) from e + if y.shape[-1] < 3: + res = cumulative_trapezoid(original_y, x, dx=dx, axis=axis, initial=None) + res = np.swapaxes(res, axis, -1) + + elif x is not None: + x = _ensure_float_array(x) + message = ("If given, shape of `x` must be the same as `y` or 1-D with " + "the same length as `y` along `axis`.") + if not (x.shape == original_shape + or (x.ndim == 1 and len(x) == original_shape[axis])): + raise ValueError(message) + + x = np.broadcast_to(x, y.shape) if x.ndim == 1 else np.swapaxes(x, axis, -1) + dx = np.diff(x, axis=-1) + if np.any(dx <= 0): + raise ValueError("Input x must be strictly increasing.") + res = _cumulatively_sum_simpson_integrals( + y, dx, _cumulative_simpson_unequal_intervals + ) + + else: + dx = _ensure_float_array(dx) + final_dx_shape = tupleset(original_shape, axis, original_shape[axis] - 1) + alt_input_dx_shape = tupleset(original_shape, axis, 1) + message = ("If provided, `dx` must either be a scalar or have the same " + "shape as `y` but with only 1 point along `axis`.") + if not (dx.ndim == 0 or dx.shape == alt_input_dx_shape): + raise ValueError(message) + dx = np.broadcast_to(dx, final_dx_shape) + dx = np.swapaxes(dx, axis, -1) + res = _cumulatively_sum_simpson_integrals( + y, dx, _cumulative_simpson_equal_intervals + ) + + if initial is not None: + initial = _ensure_float_array(initial) + alt_initial_input_shape = tupleset(original_shape, axis, 1) + message = ("If provided, `initial` must either be a scalar or have the " + "same shape as `y` but with only 1 point along `axis`.") + if not (initial.ndim == 0 or initial.shape == alt_initial_input_shape): + raise ValueError(message) + initial = np.broadcast_to(initial, alt_initial_input_shape) + initial = np.swapaxes(initial, axis, -1) + + res += initial + res = np.concatenate((initial, res), axis=-1) + + res = np.swapaxes(res, -1, axis) + return res + + +def romb(y, dx=1.0, axis=-1, show=False): + """ + Romberg integration using samples of a function. + + Parameters + ---------- + y : array_like + A vector of ``2**k + 1`` equally-spaced samples of a function. + dx : float, optional + The sample spacing. Default is 1. + axis : int, optional + The axis along which to integrate. Default is -1 (last axis). + show : bool, optional + When `y` is a single 1-D array, then if this argument is True + print the table showing Richardson extrapolation from the + samples. Default is False. + + Returns + ------- + romb : ndarray + The integrated result for `axis`. + + See Also + -------- + quad : adaptive quadrature using QUADPACK + fixed_quad : fixed-order Gaussian quadrature + dblquad : double integrals + tplquad : triple integrals + simpson : integrators for sampled data + cumulative_trapezoid : cumulative integration for sampled data + + Examples + -------- + >>> from scipy import integrate + >>> import numpy as np + >>> x = np.arange(10, 14.25, 0.25) + >>> y = np.arange(3, 12) + + >>> integrate.romb(y) + 56.0 + + >>> y = np.sin(np.power(x, 2.5)) + >>> integrate.romb(y) + -0.742561336672229 + + >>> integrate.romb(y, show=True) + Richardson Extrapolation Table for Romberg Integration + ====================================================== + -0.81576 + 4.63862 6.45674 + -1.10581 -3.02062 -3.65245 + -2.57379 -3.06311 -3.06595 -3.05664 + -1.34093 -0.92997 -0.78776 -0.75160 -0.74256 + ====================================================== + -0.742561336672229 # may vary + + """ + y = np.asarray(y) + nd = len(y.shape) + Nsamps = y.shape[axis] + Ninterv = Nsamps-1 + n = 1 + k = 0 + while n < Ninterv: + n <<= 1 + k += 1 + if n != Ninterv: + raise ValueError("Number of samples must be one plus a " + "non-negative power of 2.") + + R = {} + slice_all = (slice(None),) * nd + slice0 = tupleset(slice_all, axis, 0) + slicem1 = tupleset(slice_all, axis, -1) + h = Ninterv * np.asarray(dx, dtype=float) + R[(0, 0)] = (y[slice0] + y[slicem1])/2.0*h + slice_R = slice_all + start = stop = step = Ninterv + for i in range(1, k+1): + start >>= 1 + slice_R = tupleset(slice_R, axis, slice(start, stop, step)) + step >>= 1 + R[(i, 0)] = 0.5*(R[(i-1, 0)] + h*y[slice_R].sum(axis=axis)) + for j in range(1, i+1): + prev = R[(i, j-1)] + R[(i, j)] = prev + (prev-R[(i-1, j-1)]) / ((1 << (2*j))-1) + h /= 2.0 + + if show: + if not np.isscalar(R[(0, 0)]): + print("*** Printing table only supported for integrals" + + " of a single data set.") + else: + try: + precis = show[0] + except (TypeError, IndexError): + precis = 5 + try: + width = show[1] + except (TypeError, IndexError): + width = 8 + formstr = "%%%d.%df" % (width, precis) + + title = "Richardson Extrapolation Table for Romberg Integration" + print(title, "=" * len(title), sep="\n", end="\n") + for i in range(k+1): + for j in range(i+1): + print(formstr % R[(i, j)], end=" ") + print() + print("=" * len(title)) + + return R[(k, k)] + + +# Coefficients for Newton-Cotes quadrature +# +# These are the points being used +# to construct the local interpolating polynomial +# a are the weights for Newton-Cotes integration +# B is the error coefficient. +# error in these coefficients grows as N gets larger. +# or as samples are closer and closer together + +# You can use maxima to find these rational coefficients +# for equally spaced data using the commands +# a(i,N) := (integrate(product(r-j,j,0,i-1) * product(r-j,j,i+1,N),r,0,N) +# / ((N-i)! * i!) * (-1)^(N-i)); +# Be(N) := N^(N+2)/(N+2)! * (N/(N+3) - sum((i/N)^(N+2)*a(i,N),i,0,N)); +# Bo(N) := N^(N+1)/(N+1)! * (N/(N+2) - sum((i/N)^(N+1)*a(i,N),i,0,N)); +# B(N) := (if (mod(N,2)=0) then Be(N) else Bo(N)); +# +# pre-computed for equally-spaced weights +# +# num_a, den_a, int_a, num_B, den_B = _builtincoeffs[N] +# +# a = num_a*array(int_a)/den_a +# B = num_B*1.0 / den_B +# +# integrate(f(x),x,x_0,x_N) = dx*sum(a*f(x_i)) + B*(dx)^(2k+3) f^(2k+2)(x*) +# where k = N // 2 +# +_builtincoeffs = { + 1: (1,2,[1,1],-1,12), + 2: (1,3,[1,4,1],-1,90), + 3: (3,8,[1,3,3,1],-3,80), + 4: (2,45,[7,32,12,32,7],-8,945), + 5: (5,288,[19,75,50,50,75,19],-275,12096), + 6: (1,140,[41,216,27,272,27,216,41],-9,1400), + 7: (7,17280,[751,3577,1323,2989,2989,1323,3577,751],-8183,518400), + 8: (4,14175,[989,5888,-928,10496,-4540,10496,-928,5888,989], + -2368,467775), + 9: (9,89600,[2857,15741,1080,19344,5778,5778,19344,1080, + 15741,2857], -4671, 394240), + 10: (5,299376,[16067,106300,-48525,272400,-260550,427368, + -260550,272400,-48525,106300,16067], + -673175, 163459296), + 11: (11,87091200,[2171465,13486539,-3237113, 25226685,-9595542, + 15493566,15493566,-9595542,25226685,-3237113, + 13486539,2171465], -2224234463, 237758976000), + 12: (1, 5255250, [1364651,9903168,-7587864,35725120,-51491295, + 87516288,-87797136,87516288,-51491295,35725120, + -7587864,9903168,1364651], -3012, 875875), + 13: (13, 402361344000,[8181904909, 56280729661, -31268252574, + 156074417954,-151659573325,206683437987, + -43111992612,-43111992612,206683437987, + -151659573325,156074417954,-31268252574, + 56280729661,8181904909], -2639651053, + 344881152000), + 14: (7, 2501928000, [90241897,710986864,-770720657,3501442784, + -6625093363,12630121616,-16802270373,19534438464, + -16802270373,12630121616,-6625093363,3501442784, + -770720657,710986864,90241897], -3740727473, + 1275983280000) + } + + +def newton_cotes(rn, equal=0): + r""" + Return weights and error coefficient for Newton-Cotes integration. + + Suppose we have (N+1) samples of f at the positions + x_0, x_1, ..., x_N. Then an N-point Newton-Cotes formula for the + integral between x_0 and x_N is: + + :math:`\int_{x_0}^{x_N} f(x)dx = \Delta x \sum_{i=0}^{N} a_i f(x_i) + + B_N (\Delta x)^{N+2} f^{N+1} (\xi)` + + where :math:`\xi \in [x_0,x_N]` + and :math:`\Delta x = \frac{x_N-x_0}{N}` is the average samples spacing. + + If the samples are equally-spaced and N is even, then the error + term is :math:`B_N (\Delta x)^{N+3} f^{N+2}(\xi)`. + + Parameters + ---------- + rn : int + The integer order for equally-spaced data or the relative positions of + the samples with the first sample at 0 and the last at N, where N+1 is + the length of `rn`. N is the order of the Newton-Cotes integration. + equal : int, optional + Set to 1 to enforce equally spaced data. + + Returns + ------- + an : ndarray + 1-D array of weights to apply to the function at the provided sample + positions. + B : float + Error coefficient. + + Notes + ----- + Normally, the Newton-Cotes rules are used on smaller integration + regions and a composite rule is used to return the total integral. + + Examples + -------- + Compute the integral of sin(x) in [0, :math:`\pi`]: + + >>> from scipy.integrate import newton_cotes + >>> import numpy as np + >>> def f(x): + ... return np.sin(x) + >>> a = 0 + >>> b = np.pi + >>> exact = 2 + >>> for N in [2, 4, 6, 8, 10]: + ... x = np.linspace(a, b, N + 1) + ... an, B = newton_cotes(N, 1) + ... dx = (b - a) / N + ... quad = dx * np.sum(an * f(x)) + ... error = abs(quad - exact) + ... print('{:2d} {:10.9f} {:.5e}'.format(N, quad, error)) + ... + 2 2.094395102 9.43951e-02 + 4 1.998570732 1.42927e-03 + 6 2.000017814 1.78136e-05 + 8 1.999999835 1.64725e-07 + 10 2.000000001 1.14677e-09 + + """ + try: + N = len(rn)-1 + if equal: + rn = np.arange(N+1) + elif np.all(np.diff(rn) == 1): + equal = 1 + except Exception: + N = rn + rn = np.arange(N+1) + equal = 1 + + if equal and N in _builtincoeffs: + na, da, vi, nb, db = _builtincoeffs[N] + an = na * np.array(vi, dtype=float) / da + return an, float(nb)/db + + if (rn[0] != 0) or (rn[-1] != N): + raise ValueError("The sample positions must start at 0" + " and end at N") + yi = rn / float(N) + ti = 2 * yi - 1 + nvec = np.arange(N+1) + C = ti ** nvec[:, np.newaxis] + Cinv = np.linalg.inv(C) + # improve precision of result + for i in range(2): + Cinv = 2*Cinv - Cinv.dot(C).dot(Cinv) + vec = 2.0 / (nvec[::2]+1) + ai = Cinv[:, ::2].dot(vec) * (N / 2.) + + if (N % 2 == 0) and equal: + BN = N/(N+3.) + power = N+2 + else: + BN = N/(N+2.) + power = N+1 + + BN = BN - np.dot(yi**power, ai) + p1 = power+1 + fac = power*math.log(N) - gammaln(p1) + fac = math.exp(fac) + return ai, BN*fac + + +def _qmc_quad_iv(func, a, b, n_points, n_estimates, qrng, log): + + # lazy import to avoid issues with partially-initialized submodule + if not hasattr(qmc_quad, 'qmc'): + from scipy import stats + qmc_quad.stats = stats + else: + stats = qmc_quad.stats + + if not callable(func): + message = "`func` must be callable." + raise TypeError(message) + + # a, b will be modified, so copy. Oh well if it's copied twice. + a = np.atleast_1d(a).copy() + b = np.atleast_1d(b).copy() + a, b = np.broadcast_arrays(a, b) + dim = a.shape[0] + + try: + func((a + b) / 2) + except Exception as e: + message = ("`func` must evaluate the integrand at points within " + "the integration range; e.g. `func( (a + b) / 2)` " + "must return the integrand at the centroid of the " + "integration volume.") + raise ValueError(message) from e + + try: + func(np.array([a, b]).T) + vfunc = func + except Exception as e: + message = ("Exception encountered when attempting vectorized call to " + f"`func`: {e}. For better performance, `func` should " + "accept two-dimensional array `x` with shape `(len(a), " + "n_points)` and return an array of the integrand value at " + "each of the `n_points.") + warnings.warn(message, stacklevel=3) + + def vfunc(x): + return np.apply_along_axis(func, axis=-1, arr=x) + + n_points_int = np.int64(n_points) + if n_points != n_points_int: + message = "`n_points` must be an integer." + raise TypeError(message) + + n_estimates_int = np.int64(n_estimates) + if n_estimates != n_estimates_int: + message = "`n_estimates` must be an integer." + raise TypeError(message) + + if qrng is None: + qrng = stats.qmc.Halton(dim) + elif not isinstance(qrng, stats.qmc.QMCEngine): + message = "`qrng` must be an instance of scipy.stats.qmc.QMCEngine." + raise TypeError(message) + + if qrng.d != a.shape[0]: + message = ("`qrng` must be initialized with dimensionality equal to " + "the number of variables in `a`, i.e., " + "`qrng.random().shape[-1]` must equal `a.shape[0]`.") + raise ValueError(message) + + rng_seed = getattr(qrng, 'rng_seed', None) + rng = stats._qmc.check_random_state(rng_seed) + + if log not in {True, False}: + message = "`log` must be boolean (`True` or `False`)." + raise TypeError(message) + + return (vfunc, a, b, n_points_int, n_estimates_int, qrng, rng, log, stats) + + +QMCQuadResult = namedtuple('QMCQuadResult', ['integral', 'standard_error']) + + +def qmc_quad(func, a, b, *, n_estimates=8, n_points=1024, qrng=None, + log=False): + """ + Compute an integral in N-dimensions using Quasi-Monte Carlo quadrature. + + Parameters + ---------- + func : callable + The integrand. Must accept a single argument ``x``, an array which + specifies the point(s) at which to evaluate the scalar-valued + integrand, and return the value(s) of the integrand. + For efficiency, the function should be vectorized to accept an array of + shape ``(d, n_points)``, where ``d`` is the number of variables (i.e. + the dimensionality of the function domain) and `n_points` is the number + of quadrature points, and return an array of shape ``(n_points,)``, + the integrand at each quadrature point. + a, b : array-like + One-dimensional arrays specifying the lower and upper integration + limits, respectively, of each of the ``d`` variables. + n_estimates, n_points : int, optional + `n_estimates` (default: 8) statistically independent QMC samples, each + of `n_points` (default: 1024) points, will be generated by `qrng`. + The total number of points at which the integrand `func` will be + evaluated is ``n_points * n_estimates``. See Notes for details. + qrng : `~scipy.stats.qmc.QMCEngine`, optional + An instance of the QMCEngine from which to sample QMC points. + The QMCEngine must be initialized to a number of dimensions ``d`` + corresponding with the number of variables ``x1, ..., xd`` passed to + `func`. + The provided QMCEngine is used to produce the first integral estimate. + If `n_estimates` is greater than one, additional QMCEngines are + spawned from the first (with scrambling enabled, if it is an option.) + If a QMCEngine is not provided, the default `scipy.stats.qmc.Halton` + will be initialized with the number of dimensions determine from + the length of `a`. + log : boolean, default: False + When set to True, `func` returns the log of the integrand, and + the result object contains the log of the integral. + + Returns + ------- + result : object + A result object with attributes: + + integral : float + The estimate of the integral. + standard_error : + The error estimate. See Notes for interpretation. + + Notes + ----- + Values of the integrand at each of the `n_points` points of a QMC sample + are used to produce an estimate of the integral. This estimate is drawn + from a population of possible estimates of the integral, the value of + which we obtain depends on the particular points at which the integral + was evaluated. We perform this process `n_estimates` times, each time + evaluating the integrand at different scrambled QMC points, effectively + drawing i.i.d. random samples from the population of integral estimates. + The sample mean :math:`m` of these integral estimates is an + unbiased estimator of the true value of the integral, and the standard + error of the mean :math:`s` of these estimates may be used to generate + confidence intervals using the t distribution with ``n_estimates - 1`` + degrees of freedom. Perhaps counter-intuitively, increasing `n_points` + while keeping the total number of function evaluation points + ``n_points * n_estimates`` fixed tends to reduce the actual error, whereas + increasing `n_estimates` tends to decrease the error estimate. + + Examples + -------- + QMC quadrature is particularly useful for computing integrals in higher + dimensions. An example integrand is the probability density function + of a multivariate normal distribution. + + >>> import numpy as np + >>> from scipy import stats + >>> dim = 8 + >>> mean = np.zeros(dim) + >>> cov = np.eye(dim) + >>> def func(x): + ... # `multivariate_normal` expects the _last_ axis to correspond with + ... # the dimensionality of the space, so `x` must be transposed + ... return stats.multivariate_normal.pdf(x.T, mean, cov) + + To compute the integral over the unit hypercube: + + >>> from scipy.integrate import qmc_quad + >>> a = np.zeros(dim) + >>> b = np.ones(dim) + >>> rng = np.random.default_rng() + >>> qrng = stats.qmc.Halton(d=dim, seed=rng) + >>> n_estimates = 8 + >>> res = qmc_quad(func, a, b, n_estimates=n_estimates, qrng=qrng) + >>> res.integral, res.standard_error + (0.00018429555666024108, 1.0389431116001344e-07) + + A two-sided, 99% confidence interval for the integral may be estimated + as: + + >>> t = stats.t(df=n_estimates-1, loc=res.integral, + ... scale=res.standard_error) + >>> t.interval(0.99) + (0.0001839319802536469, 0.00018465913306683527) + + Indeed, the value reported by `scipy.stats.multivariate_normal` is + within this range. + + >>> stats.multivariate_normal.cdf(b, mean, cov, lower_limit=a) + 0.00018430867675187443 + + """ + args = _qmc_quad_iv(func, a, b, n_points, n_estimates, qrng, log) + func, a, b, n_points, n_estimates, qrng, rng, log, stats = args + + def sum_product(integrands, dA, log=False): + if log: + return logsumexp(integrands) + np.log(dA) + else: + return np.sum(integrands * dA) + + def mean(estimates, log=False): + if log: + return logsumexp(estimates) - np.log(n_estimates) + else: + return np.mean(estimates) + + def std(estimates, m=None, ddof=0, log=False): + m = m or mean(estimates, log) + if log: + estimates, m = np.broadcast_arrays(estimates, m) + temp = np.vstack((estimates, m + np.pi * 1j)) + diff = logsumexp(temp, axis=0) + return np.real(0.5 * (logsumexp(2 * diff) + - np.log(n_estimates - ddof))) + else: + return np.std(estimates, ddof=ddof) + + def sem(estimates, m=None, s=None, log=False): + m = m or mean(estimates, log) + s = s or std(estimates, m, ddof=1, log=log) + if log: + return s - 0.5*np.log(n_estimates) + else: + return s / np.sqrt(n_estimates) + + # The sign of the integral depends on the order of the limits. Fix this by + # ensuring that lower bounds are indeed lower and setting sign of resulting + # integral manually + if np.any(a == b): + message = ("A lower limit was equal to an upper limit, so the value " + "of the integral is zero by definition.") + warnings.warn(message, stacklevel=2) + return QMCQuadResult(-np.inf if log else 0, 0) + + i_swap = b < a + sign = (-1)**(i_swap.sum(axis=-1)) # odd # of swaps -> negative + a[i_swap], b[i_swap] = b[i_swap], a[i_swap] + + A = np.prod(b - a) + dA = A / n_points + + estimates = np.zeros(n_estimates) + rngs = _rng_spawn(qrng.rng, n_estimates) + for i in range(n_estimates): + # Generate integral estimate + sample = qrng.random(n_points) + # The rationale for transposing is that this allows users to easily + # unpack `x` into separate variables, if desired. This is consistent + # with the `xx` array passed into the `scipy.integrate.nquad` `func`. + x = stats.qmc.scale(sample, a, b).T # (n_dim, n_points) + integrands = func(x) + estimates[i] = sum_product(integrands, dA, log) + + # Get a new, independently-scrambled QRNG for next time + qrng = type(qrng)(seed=rngs[i], **qrng._init_quad) + + integral = mean(estimates, log) + standard_error = sem(estimates, m=integral, log=log) + integral = integral + np.pi*1j if (log and sign < 0) else integral*sign + return QMCQuadResult(integral, standard_error) diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_rules/__init__.py b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_rules/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4c91aa324478d49a8723f05618801f9b256d07af --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_rules/__init__.py @@ -0,0 +1,12 @@ +"""Numerical cubature algorithms""" + +from ._base import ( + Rule, FixedRule, + NestedFixedRule, + ProductNestedFixed, +) +from ._genz_malik import GenzMalikCubature +from ._gauss_kronrod import GaussKronrodQuadrature +from ._gauss_legendre import GaussLegendreQuadrature + +__all__ = [s for s in dir() if not s.startswith('_')] diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_rules/__pycache__/__init__.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_rules/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d79cd54bf99bed75f320bf0c37c6995e5c050519 Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_rules/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_rules/__pycache__/_base.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_rules/__pycache__/_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0aaae101d4c5e9f7462e844229c3500a3a991e3b Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_rules/__pycache__/_base.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_rules/__pycache__/_gauss_kronrod.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_rules/__pycache__/_gauss_kronrod.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5bb6e3bc5c6b27d54f19047017dccb045795300f Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_rules/__pycache__/_gauss_kronrod.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_rules/__pycache__/_gauss_legendre.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_rules/__pycache__/_gauss_legendre.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2d575b4e525ddc6c38ad3a4b6b0a5b0f63b9bb83 Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_rules/__pycache__/_gauss_legendre.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_rules/__pycache__/_genz_malik.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_rules/__pycache__/_genz_malik.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5037d4d41e93a46db458ba647caa7a97ad979756 Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_rules/__pycache__/_genz_malik.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_rules/_base.py b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_rules/_base.py new file mode 100644 index 0000000000000000000000000000000000000000..3a3ae5f506505c9c03b2ac8be33d301d60074681 --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_rules/_base.py @@ -0,0 +1,518 @@ +from scipy._lib._array_api import array_namespace, xp_size + +from functools import cached_property + + +class Rule: + """ + Base class for numerical integration algorithms (cubatures). + + Finds an estimate for the integral of ``f`` over the region described by two arrays + ``a`` and ``b`` via `estimate`, and find an estimate for the error of this + approximation via `estimate_error`. + + If a subclass does not implement its own `estimate_error`, then it will use a + default error estimate based on the difference between the estimate over the whole + region and the sum of estimates over that region divided into ``2^ndim`` subregions. + + See Also + -------- + FixedRule + + Examples + -------- + In the following, a custom rule is created which uses 3D Genz-Malik cubature for + the estimate of the integral, and the difference between this estimate and a less + accurate estimate using 5-node Gauss-Legendre quadrature as an estimate for the + error. + + >>> import numpy as np + >>> from scipy.integrate import cubature + >>> from scipy.integrate._rules import ( + ... Rule, ProductNestedFixed, GenzMalikCubature, GaussLegendreQuadrature + ... ) + >>> def f(x, r, alphas): + ... # f(x) = cos(2*pi*r + alpha @ x) + ... # Need to allow r and alphas to be arbitrary shape + ... npoints, ndim = x.shape[0], x.shape[-1] + ... alphas_reshaped = alphas[np.newaxis, :] + ... x_reshaped = x.reshape(npoints, *([1]*(len(alphas.shape) - 1)), ndim) + ... return np.cos(2*np.pi*r + np.sum(alphas_reshaped * x_reshaped, axis=-1)) + >>> genz = GenzMalikCubature(ndim=3) + >>> gauss = GaussKronrodQuadrature(npoints=21) + >>> # Gauss-Kronrod is 1D, so we find the 3D product rule: + >>> gauss_3d = ProductNestedFixed([gauss, gauss, gauss]) + >>> class CustomRule(Rule): + ... def estimate(self, f, a, b, args=()): + ... return genz.estimate(f, a, b, args) + ... def estimate_error(self, f, a, b, args=()): + ... return np.abs( + ... genz.estimate(f, a, b, args) + ... - gauss_3d.estimate(f, a, b, args) + ... ) + >>> rng = np.random.default_rng() + >>> res = cubature( + ... f=f, + ... a=np.array([0, 0, 0]), + ... b=np.array([1, 1, 1]), + ... rule=CustomRule(), + ... args=(rng.random((2,)), rng.random((3, 2, 3))) + ... ) + >>> res.estimate + array([[-0.95179502, 0.12444608], + [-0.96247411, 0.60866385], + [-0.97360014, 0.25515587]]) + """ + + def estimate(self, f, a, b, args=()): + r""" + Calculate estimate of integral of `f` in rectangular region described by + corners `a` and ``b``. + + Parameters + ---------- + f : callable + Function to integrate. `f` must have the signature:: + f(x : ndarray, \*args) -> ndarray + + `f` should accept arrays ``x`` of shape:: + (npoints, ndim) + + and output arrays of shape:: + (npoints, output_dim_1, ..., output_dim_n) + + In this case, `estimate` will return arrays of shape:: + (output_dim_1, ..., output_dim_n) + a, b : ndarray + Lower and upper limits of integration as rank-1 arrays specifying the left + and right endpoints of the intervals being integrated over. Infinite limits + are currently not supported. + args : tuple, optional + Additional positional args passed to ``f``, if any. + + Returns + ------- + est : ndarray + Result of estimation. If `f` returns arrays of shape ``(npoints, + output_dim_1, ..., output_dim_n)``, then `est` will be of shape + ``(output_dim_1, ..., output_dim_n)``. + """ + raise NotImplementedError + + def estimate_error(self, f, a, b, args=()): + r""" + Estimate the error of the approximation for the integral of `f` in rectangular + region described by corners `a` and `b`. + + If a subclass does not override this method, then a default error estimator is + used. This estimates the error as ``|est - refined_est|`` where ``est`` is + ``estimate(f, a, b)`` and ``refined_est`` is the sum of + ``estimate(f, a_k, b_k)`` where ``a_k, b_k`` are the coordinates of each + subregion of the region described by ``a`` and ``b``. In the 1D case, this + is equivalent to comparing the integral over an entire interval ``[a, b]`` to + the sum of the integrals over the left and right subintervals, ``[a, (a+b)/2]`` + and ``[(a+b)/2, b]``. + + Parameters + ---------- + f : callable + Function to estimate error for. `f` must have the signature:: + f(x : ndarray, \*args) -> ndarray + + `f` should accept arrays `x` of shape:: + (npoints, ndim) + + and output arrays of shape:: + (npoints, output_dim_1, ..., output_dim_n) + + In this case, `estimate` will return arrays of shape:: + (output_dim_1, ..., output_dim_n) + a, b : ndarray + Lower and upper limits of integration as rank-1 arrays specifying the left + and right endpoints of the intervals being integrated over. Infinite limits + are currently not supported. + args : tuple, optional + Additional positional args passed to `f`, if any. + + Returns + ------- + err_est : ndarray + Result of error estimation. If `f` returns arrays of shape + ``(npoints, output_dim_1, ..., output_dim_n)``, then `est` will be + of shape ``(output_dim_1, ..., output_dim_n)``. + """ + + est = self.estimate(f, a, b, args) + refined_est = 0 + + for a_k, b_k in _split_subregion(a, b): + refined_est += self.estimate(f, a_k, b_k, args) + + return self.xp.abs(est - refined_est) + + +class FixedRule(Rule): + """ + A rule implemented as the weighted sum of function evaluations at fixed nodes. + + Attributes + ---------- + nodes_and_weights : (ndarray, ndarray) + A tuple ``(nodes, weights)`` of nodes at which to evaluate ``f`` and the + corresponding weights. ``nodes`` should be of shape ``(num_nodes,)`` for 1D + cubature rules (quadratures) and more generally for N-D cubature rules, it + should be of shape ``(num_nodes, ndim)``. ``weights`` should be of shape + ``(num_nodes,)``. The nodes and weights should be for integrals over + :math:`[-1, 1]^n`. + + See Also + -------- + GaussLegendreQuadrature, GaussKronrodQuadrature, GenzMalikCubature + + Examples + -------- + + Implementing Simpson's 1/3 rule: + + >>> import numpy as np + >>> from scipy.integrate._rules import FixedRule + >>> class SimpsonsQuad(FixedRule): + ... @property + ... def nodes_and_weights(self): + ... nodes = np.array([-1, 0, 1]) + ... weights = np.array([1/3, 4/3, 1/3]) + ... return (nodes, weights) + >>> rule = SimpsonsQuad() + >>> rule.estimate( + ... f=lambda x: x**2, + ... a=np.array([0]), + ... b=np.array([1]), + ... ) + [0.3333333] + """ + + def __init__(self): + self.xp = None + + @property + def nodes_and_weights(self): + raise NotImplementedError + + def estimate(self, f, a, b, args=()): + r""" + Calculate estimate of integral of `f` in rectangular region described by + corners `a` and `b` as ``sum(weights * f(nodes))``. + + Nodes and weights will automatically be adjusted from calculating integrals over + :math:`[-1, 1]^n` to :math:`[a, b]^n`. + + Parameters + ---------- + f : callable + Function to integrate. `f` must have the signature:: + f(x : ndarray, \*args) -> ndarray + + `f` should accept arrays `x` of shape:: + (npoints, ndim) + + and output arrays of shape:: + (npoints, output_dim_1, ..., output_dim_n) + + In this case, `estimate` will return arrays of shape:: + (output_dim_1, ..., output_dim_n) + a, b : ndarray + Lower and upper limits of integration as rank-1 arrays specifying the left + and right endpoints of the intervals being integrated over. Infinite limits + are currently not supported. + args : tuple, optional + Additional positional args passed to `f`, if any. + + Returns + ------- + est : ndarray + Result of estimation. If `f` returns arrays of shape ``(npoints, + output_dim_1, ..., output_dim_n)``, then `est` will be of shape + ``(output_dim_1, ..., output_dim_n)``. + """ + nodes, weights = self.nodes_and_weights + + if self.xp is None: + self.xp = array_namespace(nodes) + + return _apply_fixed_rule(f, a, b, nodes, weights, args, self.xp) + + +class NestedFixedRule(FixedRule): + r""" + A cubature rule with error estimate given by the difference between two underlying + fixed rules. + + If constructed as ``NestedFixedRule(higher, lower)``, this will use:: + + estimate(f, a, b) := higher.estimate(f, a, b) + estimate_error(f, a, b) := \|higher.estimate(f, a, b) - lower.estimate(f, a, b)| + + (where the absolute value is taken elementwise). + + Attributes + ---------- + higher : Rule + Higher accuracy rule. + + lower : Rule + Lower accuracy rule. + + See Also + -------- + GaussKronrodQuadrature + + Examples + -------- + + >>> from scipy.integrate import cubature + >>> from scipy.integrate._rules import ( + ... GaussLegendreQuadrature, NestedFixedRule, ProductNestedFixed + ... ) + >>> higher = GaussLegendreQuadrature(10) + >>> lower = GaussLegendreQuadrature(5) + >>> rule = NestedFixedRule( + ... higher, + ... lower + ... ) + >>> rule_2d = ProductNestedFixed([rule, rule]) + """ + + def __init__(self, higher, lower): + self.higher = higher + self.lower = lower + self.xp = None + + @property + def nodes_and_weights(self): + if self.higher is not None: + return self.higher.nodes_and_weights + else: + raise NotImplementedError + + @property + def lower_nodes_and_weights(self): + if self.lower is not None: + return self.lower.nodes_and_weights + else: + raise NotImplementedError + + def estimate_error(self, f, a, b, args=()): + r""" + Estimate the error of the approximation for the integral of `f` in rectangular + region described by corners `a` and `b`. + + Parameters + ---------- + f : callable + Function to estimate error for. `f` must have the signature:: + f(x : ndarray, \*args) -> ndarray + + `f` should accept arrays `x` of shape:: + (npoints, ndim) + + and output arrays of shape:: + (npoints, output_dim_1, ..., output_dim_n) + + In this case, `estimate` will return arrays of shape:: + (output_dim_1, ..., output_dim_n) + a, b : ndarray + Lower and upper limits of integration as rank-1 arrays specifying the left + and right endpoints of the intervals being integrated over. Infinite limits + are currently not supported. + args : tuple, optional + Additional positional args passed to `f`, if any. + + Returns + ------- + err_est : ndarray + Result of error estimation. If `f` returns arrays of shape + ``(npoints, output_dim_1, ..., output_dim_n)``, then `est` will be + of shape ``(output_dim_1, ..., output_dim_n)``. + """ + + nodes, weights = self.nodes_and_weights + lower_nodes, lower_weights = self.lower_nodes_and_weights + + if self.xp is None: + self.xp = array_namespace(nodes) + + error_nodes = self.xp.concat([nodes, lower_nodes], axis=0) + error_weights = self.xp.concat([weights, -lower_weights], axis=0) + + return self.xp.abs( + _apply_fixed_rule(f, a, b, error_nodes, error_weights, args, self.xp) + ) + + +class ProductNestedFixed(NestedFixedRule): + """ + Find the n-dimensional cubature rule constructed from the Cartesian product of 1-D + `NestedFixedRule` quadrature rules. + + Given a list of N 1-dimensional quadrature rules which support error estimation + using NestedFixedRule, this will find the N-dimensional cubature rule obtained by + taking the Cartesian product of their nodes, and estimating the error by taking the + difference with a lower-accuracy N-dimensional cubature rule obtained using the + ``.lower_nodes_and_weights`` rule in each of the base 1-dimensional rules. + + Parameters + ---------- + base_rules : list of NestedFixedRule + List of base 1-dimensional `NestedFixedRule` quadrature rules. + + Attributes + ---------- + base_rules : list of NestedFixedRule + List of base 1-dimensional `NestedFixedRule` qudarature rules. + + Examples + -------- + + Evaluate a 2D integral by taking the product of two 1D rules: + + >>> import numpy as np + >>> from scipy.integrate import cubature + >>> from scipy.integrate._rules import ( + ... ProductNestedFixed, GaussKronrodQuadrature + ... ) + >>> def f(x): + ... # f(x) = cos(x_1) + cos(x_2) + ... return np.sum(np.cos(x), axis=-1) + >>> rule = ProductNestedFixed( + ... [GaussKronrodQuadrature(15), GaussKronrodQuadrature(15)] + ... ) # Use 15-point Gauss-Kronrod, which implements NestedFixedRule + >>> a, b = np.array([0, 0]), np.array([1, 1]) + >>> rule.estimate(f, a, b) # True value 2*sin(1), approximately 1.6829 + np.float64(1.682941969615793) + >>> rule.estimate_error(f, a, b) + np.float64(2.220446049250313e-16) + """ + + def __init__(self, base_rules): + for rule in base_rules: + if not isinstance(rule, NestedFixedRule): + raise ValueError("base rules for product need to be instance of" + "NestedFixedRule") + + self.base_rules = base_rules + self.xp = None + + @cached_property + def nodes_and_weights(self): + nodes = _cartesian_product( + [rule.nodes_and_weights[0] for rule in self.base_rules] + ) + + if self.xp is None: + self.xp = array_namespace(nodes) + + weights = self.xp.prod( + _cartesian_product( + [rule.nodes_and_weights[1] for rule in self.base_rules] + ), + axis=-1, + ) + + return nodes, weights + + @cached_property + def lower_nodes_and_weights(self): + nodes = _cartesian_product( + [cubature.lower_nodes_and_weights[0] for cubature in self.base_rules] + ) + + if self.xp is None: + self.xp = array_namespace(nodes) + + weights = self.xp.prod( + _cartesian_product( + [cubature.lower_nodes_and_weights[1] for cubature in self.base_rules] + ), + axis=-1, + ) + + return nodes, weights + + +def _cartesian_product(arrays): + xp = array_namespace(*arrays) + + arrays_ix = xp.meshgrid(*arrays, indexing='ij') + result = xp.reshape(xp.stack(arrays_ix, axis=-1), (-1, len(arrays))) + + return result + + +def _split_subregion(a, b, xp, split_at=None): + """ + Given the coordinates of a region like a=[0, 0] and b=[1, 1], yield the coordinates + of all subregions, which in this case would be:: + + ([0, 0], [1/2, 1/2]), + ([0, 1/2], [1/2, 1]), + ([1/2, 0], [1, 1/2]), + ([1/2, 1/2], [1, 1]) + """ + xp = array_namespace(a, b) + + if split_at is None: + split_at = (a + b) / 2 + + left = [xp.asarray([a[i], split_at[i]]) for i in range(a.shape[0])] + right = [xp.asarray([split_at[i], b[i]]) for i in range(b.shape[0])] + + a_sub = _cartesian_product(left) + b_sub = _cartesian_product(right) + + for i in range(a_sub.shape[0]): + yield a_sub[i, ...], b_sub[i, ...] + + +def _apply_fixed_rule(f, a, b, orig_nodes, orig_weights, args, xp): + # Downcast nodes and weights to common dtype of a and b + result_dtype = a.dtype + orig_nodes = xp.astype(orig_nodes, result_dtype) + orig_weights = xp.astype(orig_weights, result_dtype) + + # Ensure orig_nodes are at least 2D, since 1D cubature methods can return arrays of + # shape (npoints,) rather than (npoints, 1) + if orig_nodes.ndim == 1: + orig_nodes = orig_nodes[:, None] + + rule_ndim = orig_nodes.shape[-1] + + a_ndim = xp_size(a) + b_ndim = xp_size(b) + + if rule_ndim != a_ndim or rule_ndim != b_ndim: + raise ValueError(f"rule and function are of incompatible dimension, nodes have" + f"ndim {rule_ndim}, while limit of integration has ndim" + f"a_ndim={a_ndim}, b_ndim={b_ndim}") + + lengths = b - a + + # The underlying rule is for the hypercube [-1, 1]^n. + # + # To handle arbitrary regions of integration, it's necessary to apply a linear + # change of coordinates to map each interval [a[i], b[i]] to [-1, 1]. + nodes = (orig_nodes + 1) * (lengths * 0.5) + a + + # Also need to multiply the weights by a scale factor equal to the determinant + # of the Jacobian for this coordinate change. + weight_scale_factor = xp.prod(lengths, dtype=result_dtype) / 2**rule_ndim + weights = orig_weights * weight_scale_factor + + f_nodes = f(nodes, *args) + weights_reshaped = xp.reshape(weights, (-1, *([1] * (f_nodes.ndim - 1)))) + + # f(nodes) will have shape (num_nodes, output_dim_1, ..., output_dim_n) + # Summing along the first axis means estimate will shape (output_dim_1, ..., + # output_dim_n) + est = xp.sum(weights_reshaped * f_nodes, axis=0, dtype=result_dtype) + + return est diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_rules/_gauss_kronrod.py b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_rules/_gauss_kronrod.py new file mode 100644 index 0000000000000000000000000000000000000000..b2a3518c55cf49cd14c777d243ea7e93a489f86c --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_rules/_gauss_kronrod.py @@ -0,0 +1,202 @@ +from scipy._lib._array_api import np_compat, array_namespace + +from functools import cached_property + +from ._base import NestedFixedRule +from ._gauss_legendre import GaussLegendreQuadrature + + +class GaussKronrodQuadrature(NestedFixedRule): + """ + Gauss-Kronrod quadrature. + + Gauss-Kronrod rules consist of two quadrature rules, one higher-order and one + lower-order. The higher-order rule is used as the estimate of the integral and the + difference between them is used as an estimate for the error. + + Gauss-Kronrod is a 1D rule. To use it for multidimensional integrals, it will be + necessary to use ProductNestedFixed and multiple Gauss-Kronrod rules. See Examples. + + For n-node Gauss-Kronrod, the lower-order rule has ``n//2`` nodes, which are the + ordinary Gauss-Legendre nodes with corresponding weights. The higher-order rule has + ``n`` nodes, ``n//2`` of which are the same as the lower-order rule and the + remaining nodes are the Kronrod extension of those nodes. + + Parameters + ---------- + npoints : int + Number of nodes for the higher-order rule. + + xp : array_namespace, optional + The namespace for the node and weight arrays. Default is None, where NumPy is + used. + + Attributes + ---------- + lower : Rule + Lower-order rule. + + References + ---------- + .. [1] R. Piessens, E. de Doncker, Quadpack: A Subroutine Package for Automatic + Integration, files: dqk21.f, dqk15.f (1983). + + Examples + -------- + Evaluate a 1D integral. Note in this example that ``f`` returns an array, so the + estimates will also be arrays, despite the fact that this is a 1D problem. + + >>> import numpy as np + >>> from scipy.integrate import cubature + >>> from scipy.integrate._rules import GaussKronrodQuadrature + >>> def f(x): + ... return np.cos(x) + >>> rule = GaussKronrodQuadrature(21) # Use 21-point GaussKronrod + >>> a, b = np.array([0]), np.array([1]) + >>> rule.estimate(f, a, b) # True value sin(1), approximately 0.84147 + array([0.84147098]) + >>> rule.estimate_error(f, a, b) + array([1.11022302e-16]) + + Evaluate a 2D integral. Note that in this example ``f`` returns a float, so the + estimates will also be floats. + + >>> import numpy as np + >>> from scipy.integrate import cubature + >>> from scipy.integrate._rules import ( + ... ProductNestedFixed, GaussKronrodQuadrature + ... ) + >>> def f(x): + ... # f(x) = cos(x_1) + cos(x_2) + ... return np.sum(np.cos(x), axis=-1) + >>> rule = ProductNestedFixed( + ... [GaussKronrodQuadrature(15), GaussKronrodQuadrature(15)] + ... ) # Use 15-point Gauss-Kronrod + >>> a, b = np.array([0, 0]), np.array([1, 1]) + >>> rule.estimate(f, a, b) # True value 2*sin(1), approximately 1.6829 + np.float64(1.682941969615793) + >>> rule.estimate_error(f, a, b) + np.float64(2.220446049250313e-16) + """ + + def __init__(self, npoints, xp=None): + # TODO: nodes and weights are currently hard-coded for values 15 and 21, but in + # the future it would be best to compute the Kronrod extension of the lower rule + if npoints != 15 and npoints != 21: + raise NotImplementedError("Gauss-Kronrod quadrature is currently only" + "supported for 15 or 21 nodes") + + self.npoints = npoints + + if xp is None: + xp = np_compat + + self.xp = array_namespace(xp.empty(0)) + + self.gauss = GaussLegendreQuadrature(npoints//2, xp=self.xp) + + @cached_property + def nodes_and_weights(self): + # These values are from QUADPACK's `dqk21.f` and `dqk15.f` (1983). + if self.npoints == 21: + nodes = self.xp.asarray( + [ + 0.995657163025808080735527280689003, + 0.973906528517171720077964012084452, + 0.930157491355708226001207180059508, + 0.865063366688984510732096688423493, + 0.780817726586416897063717578345042, + 0.679409568299024406234327365114874, + 0.562757134668604683339000099272694, + 0.433395394129247190799265943165784, + 0.294392862701460198131126603103866, + 0.148874338981631210884826001129720, + 0, + -0.148874338981631210884826001129720, + -0.294392862701460198131126603103866, + -0.433395394129247190799265943165784, + -0.562757134668604683339000099272694, + -0.679409568299024406234327365114874, + -0.780817726586416897063717578345042, + -0.865063366688984510732096688423493, + -0.930157491355708226001207180059508, + -0.973906528517171720077964012084452, + -0.995657163025808080735527280689003, + ], + dtype=self.xp.float64, + ) + + weights = self.xp.asarray( + [ + 0.011694638867371874278064396062192, + 0.032558162307964727478818972459390, + 0.054755896574351996031381300244580, + 0.075039674810919952767043140916190, + 0.093125454583697605535065465083366, + 0.109387158802297641899210590325805, + 0.123491976262065851077958109831074, + 0.134709217311473325928054001771707, + 0.142775938577060080797094273138717, + 0.147739104901338491374841515972068, + 0.149445554002916905664936468389821, + 0.147739104901338491374841515972068, + 0.142775938577060080797094273138717, + 0.134709217311473325928054001771707, + 0.123491976262065851077958109831074, + 0.109387158802297641899210590325805, + 0.093125454583697605535065465083366, + 0.075039674810919952767043140916190, + 0.054755896574351996031381300244580, + 0.032558162307964727478818972459390, + 0.011694638867371874278064396062192, + ], + dtype=self.xp.float64, + ) + elif self.npoints == 15: + nodes = self.xp.asarray( + [ + 0.991455371120812639206854697526329, + 0.949107912342758524526189684047851, + 0.864864423359769072789712788640926, + 0.741531185599394439863864773280788, + 0.586087235467691130294144838258730, + 0.405845151377397166906606412076961, + 0.207784955007898467600689403773245, + 0.000000000000000000000000000000000, + -0.207784955007898467600689403773245, + -0.405845151377397166906606412076961, + -0.586087235467691130294144838258730, + -0.741531185599394439863864773280788, + -0.864864423359769072789712788640926, + -0.949107912342758524526189684047851, + -0.991455371120812639206854697526329, + ], + dtype=self.xp.float64, + ) + + weights = self.xp.asarray( + [ + 0.022935322010529224963732008058970, + 0.063092092629978553290700663189204, + 0.104790010322250183839876322541518, + 0.140653259715525918745189590510238, + 0.169004726639267902826583426598550, + 0.190350578064785409913256402421014, + 0.204432940075298892414161999234649, + 0.209482141084727828012999174891714, + 0.204432940075298892414161999234649, + 0.190350578064785409913256402421014, + 0.169004726639267902826583426598550, + 0.140653259715525918745189590510238, + 0.104790010322250183839876322541518, + 0.063092092629978553290700663189204, + 0.022935322010529224963732008058970, + ], + dtype=self.xp.float64, + ) + + return nodes, weights + + @property + def lower_nodes_and_weights(self): + return self.gauss.nodes_and_weights diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_rules/_gauss_legendre.py b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_rules/_gauss_legendre.py new file mode 100644 index 0000000000000000000000000000000000000000..1163aec5370fb93951402ab99ee2ae4b79158d52 --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_rules/_gauss_legendre.py @@ -0,0 +1,62 @@ +from scipy._lib._array_api import array_namespace, np_compat + +from functools import cached_property + +from scipy.special import roots_legendre + +from ._base import FixedRule + + +class GaussLegendreQuadrature(FixedRule): + """ + Gauss-Legendre quadrature. + + Parameters + ---------- + npoints : int + Number of nodes for the higher-order rule. + + xp : array_namespace, optional + The namespace for the node and weight arrays. Default is None, where NumPy is + used. + + Examples + -------- + Evaluate a 1D integral. Note in this example that ``f`` returns an array, so the + estimates will also be arrays. + + >>> import numpy as np + >>> from scipy.integrate import cubature + >>> from scipy.integrate._rules import GaussLegendreQuadrature + >>> def f(x): + ... return np.cos(x) + >>> rule = GaussLegendreQuadrature(21) # Use 21-point GaussLegendre + >>> a, b = np.array([0]), np.array([1]) + >>> rule.estimate(f, a, b) # True value sin(1), approximately 0.84147 + array([0.84147098]) + >>> rule.estimate_error(f, a, b) + array([1.11022302e-16]) + """ + + def __init__(self, npoints, xp=None): + if npoints < 2: + raise ValueError( + "At least 2 nodes required for Gauss-Legendre cubature" + ) + + self.npoints = npoints + + if xp is None: + xp = np_compat + + self.xp = array_namespace(xp.empty(0)) + + @cached_property + def nodes_and_weights(self): + # TODO: current converting to/from numpy + nodes, weights = roots_legendre(self.npoints) + + return ( + self.xp.asarray(nodes, dtype=self.xp.float64), + self.xp.asarray(weights, dtype=self.xp.float64) + ) diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_rules/_genz_malik.py b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_rules/_genz_malik.py new file mode 100644 index 0000000000000000000000000000000000000000..4873805e3364b10a3366de47c15fe3c4b306e5d6 --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_rules/_genz_malik.py @@ -0,0 +1,210 @@ +import math +import itertools + +from functools import cached_property + +from scipy._lib._array_api import array_namespace, np_compat + +from scipy.integrate._rules import NestedFixedRule + + +class GenzMalikCubature(NestedFixedRule): + """ + Genz-Malik cubature. + + Genz-Malik is only defined for integrals of dimension >= 2. + + Parameters + ---------- + ndim : int + The spatial dimension of the integrand. + + xp : array_namespace, optional + The namespace for the node and weight arrays. Default is None, where NumPy is + used. + + Attributes + ---------- + higher : Cubature + Higher-order rule. + + lower : Cubature + Lower-order rule. + + References + ---------- + .. [1] A.C. Genz, A.A. Malik, Remarks on algorithm 006: An adaptive algorithm for + numerical integration over an N-dimensional rectangular region, Journal of + Computational and Applied Mathematics, Volume 6, Issue 4, 1980, Pages 295-302, + ISSN 0377-0427, https://doi.org/10.1016/0771-050X(80)90039-X. + + Examples + -------- + Evaluate a 3D integral: + + >>> import numpy as np + >>> from scipy.integrate import cubature + >>> from scipy.integrate._rules import GenzMalikCubature + >>> def f(x): + ... # f(x) = cos(x_1) + cos(x_2) + cos(x_3) + ... return np.sum(np.cos(x), axis=-1) + >>> rule = GenzMalikCubature(3) # Use 3D Genz-Malik + >>> a, b = np.array([0, 0, 0]), np.array([1, 1, 1]) + >>> rule.estimate(f, a, b) # True value 3*sin(1), approximately 2.5244 + np.float64(2.5244129547230862) + >>> rule.estimate_error(f, a, b) + np.float64(1.378269656626685e-06) + """ + + def __init__(self, ndim, degree=7, lower_degree=5, xp=None): + if ndim < 2: + raise ValueError("Genz-Malik cubature is only defined for ndim >= 2") + + if degree != 7 or lower_degree != 5: + raise NotImplementedError("Genz-Malik cubature is currently only supported" + "for degree=7, lower_degree=5") + + self.ndim = ndim + self.degree = degree + self.lower_degree = lower_degree + + if xp is None: + xp = np_compat + + self.xp = array_namespace(xp.empty(0)) + + @cached_property + def nodes_and_weights(self): + # TODO: Currently only support for degree 7 Genz-Malik cubature, should aim to + # support arbitrary degree + l_2 = math.sqrt(9/70) + l_3 = math.sqrt(9/10) + l_4 = math.sqrt(9/10) + l_5 = math.sqrt(9/19) + + its = itertools.chain( + [(0,) * self.ndim], + _distinct_permutations((l_2,) + (0,) * (self.ndim - 1)), + _distinct_permutations((-l_2,) + (0,) * (self.ndim - 1)), + _distinct_permutations((l_3,) + (0,) * (self.ndim - 1)), + _distinct_permutations((-l_3,) + (0,) * (self.ndim - 1)), + _distinct_permutations((l_4, l_4) + (0,) * (self.ndim - 2)), + _distinct_permutations((l_4, -l_4) + (0,) * (self.ndim - 2)), + _distinct_permutations((-l_4, -l_4) + (0,) * (self.ndim - 2)), + itertools.product((l_5, -l_5), repeat=self.ndim), + ) + + nodes_size = 1 + (2 * (self.ndim + 1) * self.ndim) + 2**self.ndim + + nodes = self.xp.asarray( + list(zip(*its)), + dtype=self.xp.float64, + ) + + nodes = self.xp.reshape(nodes, (self.ndim, nodes_size)) + + # It's convenient to generate the nodes as a sequence of evaluation points + # as an array of shape (npoints, ndim), but nodes needs to have shape + # (ndim, npoints) + nodes = nodes.T + + w_1 = ( + (2**self.ndim) * (12824 - 9120*self.ndim + (400 * self.ndim**2)) / 19683 + ) + w_2 = (2**self.ndim) * 980/6561 + w_3 = (2**self.ndim) * (1820 - 400 * self.ndim) / 19683 + w_4 = (2**self.ndim) * (200 / 19683) + w_5 = 6859 / 19683 + + weights = self.xp.concat([ + self.xp.asarray([w_1] * 1, dtype=self.xp.float64), + self.xp.asarray([w_2] * (2 * self.ndim), dtype=self.xp.float64), + self.xp.asarray([w_3] * (2 * self.ndim), dtype=self.xp.float64), + self.xp.asarray( + [w_4] * (2 * (self.ndim - 1) * self.ndim), + dtype=self.xp.float64, + ), + self.xp.asarray([w_5] * (2**self.ndim), dtype=self.xp.float64), + ]) + + return nodes, weights + + @cached_property + def lower_nodes_and_weights(self): + # TODO: Currently only support for the degree 5 lower rule, in the future it + # would be worth supporting arbitrary degree + + # Nodes are almost the same as the full rule, but there are no nodes + # corresponding to l_5. + l_2 = math.sqrt(9/70) + l_3 = math.sqrt(9/10) + l_4 = math.sqrt(9/10) + + its = itertools.chain( + [(0,) * self.ndim], + _distinct_permutations((l_2,) + (0,) * (self.ndim - 1)), + _distinct_permutations((-l_2,) + (0,) * (self.ndim - 1)), + _distinct_permutations((l_3,) + (0,) * (self.ndim - 1)), + _distinct_permutations((-l_3,) + (0,) * (self.ndim - 1)), + _distinct_permutations((l_4, l_4) + (0,) * (self.ndim - 2)), + _distinct_permutations((l_4, -l_4) + (0,) * (self.ndim - 2)), + _distinct_permutations((-l_4, -l_4) + (0,) * (self.ndim - 2)), + ) + + nodes_size = 1 + (2 * (self.ndim + 1) * self.ndim) + + nodes = self.xp.asarray(list(zip(*its)), dtype=self.xp.float64) + nodes = self.xp.reshape(nodes, (self.ndim, nodes_size)) + nodes = nodes.T + + # Weights are different from those in the full rule. + w_1 = (2**self.ndim) * (729 - 950*self.ndim + 50*self.ndim**2) / 729 + w_2 = (2**self.ndim) * (245 / 486) + w_3 = (2**self.ndim) * (265 - 100*self.ndim) / 1458 + w_4 = (2**self.ndim) * (25 / 729) + + weights = self.xp.concat([ + self.xp.asarray([w_1] * 1, dtype=self.xp.float64), + self.xp.asarray([w_2] * (2 * self.ndim), dtype=self.xp.float64), + self.xp.asarray([w_3] * (2 * self.ndim), dtype=self.xp.float64), + self.xp.asarray( + [w_4] * (2 * (self.ndim - 1) * self.ndim), + dtype=self.xp.float64, + ), + ]) + + return nodes, weights + + +def _distinct_permutations(iterable): + """ + Find the number of distinct permutations of elements of `iterable`. + """ + + # Algorithm: https://w.wiki/Qai + + items = sorted(iterable) + size = len(items) + + while True: + # Yield the permutation we have + yield tuple(items) + + # Find the largest index i such that A[i] < A[i + 1] + for i in range(size - 2, -1, -1): + if items[i] < items[i + 1]: + break + + # If no such index exists, this permutation is the last one + else: + return + + # Find the largest index j greater than j such that A[i] < A[j] + for j in range(size - 1, i, -1): + if items[i] < items[j]: + break + + # Swap the value of A[i] with that of A[j], then reverse the + # sequence from A[i + 1] to form the new permutation + items[i], items[j] = items[j], items[i] + items[i+1:] = items[:i-size:-1] # A[i + 1:][::-1] diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_tanhsinh.py b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_tanhsinh.py new file mode 100644 index 0000000000000000000000000000000000000000..d0b303ef690fac6618ac00dde586b5cbab68fbc3 --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_tanhsinh.py @@ -0,0 +1,1382 @@ +# mypy: disable-error-code="attr-defined" +import math +import numpy as np +from scipy import special +import scipy._lib._elementwise_iterative_method as eim +from scipy._lib._util import _RichResult +from scipy._lib._array_api import (array_namespace, xp_copy, xp_ravel, + xp_real, xp_take_along_axis) + + +__all__ = ['nsum'] + + +# todo: +# figure out warning situation +# address https://github.com/scipy/scipy/pull/18650#discussion_r1233032521 +# without `minweight`, we are also suppressing infinities within the interval. +# Is that OK? If so, we can probably get rid of `status=3`. +# Add heuristic to stop when improvement is too slow / antithrashing +# support singularities? interval subdivision? this feature will be added +# eventually, but do we adjust the interface now? +# When doing log-integration, should the tolerances control the error of the +# log-integral or the error of the integral? The trouble is that `log` +# inherently looses some precision so it may not be possible to refine +# the integral further. Example: 7th moment of stats.f(15, 20) +# respect function evaluation limit? +# make public? + + +def tanhsinh(f, a, b, *, args=(), log=False, maxlevel=None, minlevel=2, + atol=None, rtol=None, preserve_shape=False, callback=None): + """Evaluate a convergent integral numerically using tanh-sinh quadrature. + + In practice, tanh-sinh quadrature achieves quadratic convergence for + many integrands: the number of accurate *digits* scales roughly linearly + with the number of function evaluations [1]_. + + Either or both of the limits of integration may be infinite, and + singularities at the endpoints are acceptable. Divergent integrals and + integrands with non-finite derivatives or singularities within an interval + are out of scope, but the latter may be evaluated be calling `tanhsinh` on + each sub-interval separately. + + Parameters + ---------- + f : callable + The function to be integrated. The signature must be:: + + f(xi: ndarray, *argsi) -> ndarray + + where each element of ``xi`` is a finite real number and ``argsi`` is a tuple, + which may contain an arbitrary number of arrays that are broadcastable + with ``xi``. `f` must be an elementwise function: see documentation of parameter + `preserve_shape` for details. It must not mutate the array ``xi`` or the arrays + in ``argsi``. + If ``f`` returns a value with complex dtype when evaluated at + either endpoint, subsequent arguments ``x`` will have complex dtype + (but zero imaginary part). + a, b : float array_like + Real lower and upper limits of integration. Must be broadcastable with one + another and with arrays in `args`. Elements may be infinite. + args : tuple of array_like, optional + Additional positional array arguments to be passed to `f`. Arrays + must be broadcastable with one another and the arrays of `a` and `b`. + If the callable for which the root is desired requires arguments that are + not broadcastable with `x`, wrap that callable with `f` such that `f` + accepts only `x` and broadcastable ``*args``. + log : bool, default: False + Setting to True indicates that `f` returns the log of the integrand + and that `atol` and `rtol` are expressed as the logs of the absolute + and relative errors. In this case, the result object will contain the + log of the integral and error. This is useful for integrands for which + numerical underflow or overflow would lead to inaccuracies. + When ``log=True``, the integrand (the exponential of `f`) must be real, + but it may be negative, in which case the log of the integrand is a + complex number with an imaginary part that is an odd multiple of π. + maxlevel : int, default: 10 + The maximum refinement level of the algorithm. + + At the zeroth level, `f` is called once, performing 16 function + evaluations. At each subsequent level, `f` is called once more, + approximately doubling the number of function evaluations that have + been performed. Accordingly, for many integrands, each successive level + will double the number of accurate digits in the result (up to the + limits of floating point precision). + + The algorithm will terminate after completing level `maxlevel` or after + another termination condition is satisfied, whichever comes first. + minlevel : int, default: 2 + The level at which to begin iteration (default: 2). This does not + change the total number of function evaluations or the abscissae at + which the function is evaluated; it changes only the *number of times* + `f` is called. If ``minlevel=k``, then the integrand is evaluated at + all abscissae from levels ``0`` through ``k`` in a single call. + Note that if `minlevel` exceeds `maxlevel`, the provided `minlevel` is + ignored, and `minlevel` is set equal to `maxlevel`. + atol, rtol : float, optional + Absolute termination tolerance (default: 0) and relative termination + tolerance (default: ``eps**0.75``, where ``eps`` is the precision of + the result dtype), respectively. Iteration will stop when + ``res.error < atol + rtol * abs(res.df)``. The error estimate is as + described in [1]_ Section 5. While not theoretically rigorous or + conservative, it is said to work well in practice. Must be non-negative + and finite if `log` is False, and must be expressed as the log of a + non-negative and finite number if `log` is True. + preserve_shape : bool, default: False + In the following, "arguments of `f`" refers to the array ``xi`` and + any arrays within ``argsi``. Let ``shape`` be the broadcasted shape + of `a`, `b`, and all elements of `args` (which is conceptually + distinct from ``xi` and ``argsi`` passed into `f`). + + - When ``preserve_shape=False`` (default), `f` must accept arguments + of *any* broadcastable shapes. + + - When ``preserve_shape=True``, `f` must accept arguments of shape + ``shape`` *or* ``shape + (n,)``, where ``(n,)`` is the number of + abscissae at which the function is being evaluated. + + In either case, for each scalar element ``xi[j]`` within ``xi``, the array + returned by `f` must include the scalar ``f(xi[j])`` at the same index. + Consequently, the shape of the output is always the shape of the input + ``xi``. + + See Examples. + + callback : callable, optional + An optional user-supplied function to be called before the first + iteration and after each iteration. + Called as ``callback(res)``, where ``res`` is a ``_RichResult`` + similar to that returned by `_differentiate` (but containing the + current iterate's values of all variables). If `callback` raises a + ``StopIteration``, the algorithm will terminate immediately and + `tanhsinh` will return a result object. `callback` must not mutate + `res` or its attributes. + + Returns + ------- + res : _RichResult + An object similar to an instance of `scipy.optimize.OptimizeResult` with the + following attributes. (The descriptions are written as though the values will + be scalars; however, if `f` returns an array, the outputs will be + arrays of the same shape.) + + success : bool array + ``True`` when the algorithm terminated successfully (status ``0``). + ``False`` otherwise. + status : int array + An integer representing the exit status of the algorithm. + + ``0`` : The algorithm converged to the specified tolerances. + ``-1`` : (unused) + ``-2`` : The maximum number of iterations was reached. + ``-3`` : A non-finite value was encountered. + ``-4`` : Iteration was terminated by `callback`. + ``1`` : The algorithm is proceeding normally (in `callback` only). + + integral : float array + An estimate of the integral. + error : float array + An estimate of the error. Only available if level two or higher + has been completed; otherwise NaN. + maxlevel : int array + The maximum refinement level used. + nfev : int array + The number of points at which `f` was evaluated. + + See Also + -------- + quad + + Notes + ----- + Implements the algorithm as described in [1]_ with minor adaptations for + finite-precision arithmetic, including some described by [2]_ and [3]_. The + tanh-sinh scheme was originally introduced in [4]_. + + Due to floating-point error in the abscissae, the function may be evaluated + at the endpoints of the interval during iterations, but the values returned by + the function at the endpoints will be ignored. + + References + ---------- + .. [1] Bailey, David H., Karthik Jeyabalan, and Xiaoye S. Li. "A comparison of + three high-precision quadrature schemes." Experimental Mathematics 14.3 + (2005): 317-329. + .. [2] Vanherck, Joren, Bart Sorée, and Wim Magnus. "Tanh-sinh quadrature for + single and multiple integration using floating-point arithmetic." + arXiv preprint arXiv:2007.15057 (2020). + .. [3] van Engelen, Robert A. "Improving the Double Exponential Quadrature + Tanh-Sinh, Sinh-Sinh and Exp-Sinh Formulas." + https://www.genivia.com/files/qthsh.pdf + .. [4] Takahasi, Hidetosi, and Masatake Mori. "Double exponential formulas for + numerical integration." Publications of the Research Institute for + Mathematical Sciences 9.3 (1974): 721-741. + + Examples + -------- + Evaluate the Gaussian integral: + + >>> import numpy as np + >>> from scipy.integrate import tanhsinh + >>> def f(x): + ... return np.exp(-x**2) + >>> res = tanhsinh(f, -np.inf, np.inf) + >>> res.integral # true value is np.sqrt(np.pi), 1.7724538509055159 + 1.7724538509055159 + >>> res.error # actual error is 0 + 4.0007963937534104e-16 + + The value of the Gaussian function (bell curve) is nearly zero for + arguments sufficiently far from zero, so the value of the integral + over a finite interval is nearly the same. + + >>> tanhsinh(f, -20, 20).integral + 1.772453850905518 + + However, with unfavorable integration limits, the integration scheme + may not be able to find the important region. + + >>> tanhsinh(f, -np.inf, 1000).integral + 4.500490856616431 + + In such cases, or when there are singularities within the interval, + break the integral into parts with endpoints at the important points. + + >>> tanhsinh(f, -np.inf, 0).integral + tanhsinh(f, 0, 1000).integral + 1.772453850905404 + + For integration involving very large or very small magnitudes, use + log-integration. (For illustrative purposes, the following example shows a + case in which both regular and log-integration work, but for more extreme + limits of integration, log-integration would avoid the underflow + experienced when evaluating the integral normally.) + + >>> res = tanhsinh(f, 20, 30, rtol=1e-10) + >>> res.integral, res.error + (4.7819613911309014e-176, 4.670364401645202e-187) + >>> def log_f(x): + ... return -x**2 + >>> res = tanhsinh(log_f, 20, 30, log=True, rtol=np.log(1e-10)) + >>> np.exp(res.integral), np.exp(res.error) + (4.7819613911306924e-176, 4.670364401645093e-187) + + The limits of integration and elements of `args` may be broadcastable + arrays, and integration is performed elementwise. + + >>> from scipy import stats + >>> dist = stats.gausshyper(13.8, 3.12, 2.51, 5.18) + >>> a, b = dist.support() + >>> x = np.linspace(a, b, 100) + >>> res = tanhsinh(dist.pdf, a, x) + >>> ref = dist.cdf(x) + >>> np.allclose(res.integral, ref) + True + + By default, `preserve_shape` is False, and therefore the callable + `f` may be called with arrays of any broadcastable shapes. + For example: + + >>> shapes = [] + >>> def f(x, c): + ... shape = np.broadcast_shapes(x.shape, c.shape) + ... shapes.append(shape) + ... return np.sin(c*x) + >>> + >>> c = [1, 10, 30, 100] + >>> res = tanhsinh(f, 0, 1, args=(c,), minlevel=1) + >>> shapes + [(4,), (4, 34), (4, 32), (3, 64), (2, 128), (1, 256)] + + To understand where these shapes are coming from - and to better + understand how `tanhsinh` computes accurate results - note that + higher values of ``c`` correspond with higher frequency sinusoids. + The higher frequency sinusoids make the integrand more complicated, + so more function evaluations are required to achieve the target + accuracy: + + >>> res.nfev + array([ 67, 131, 259, 515], dtype=int32) + + The initial ``shape``, ``(4,)``, corresponds with evaluating the + integrand at a single abscissa and all four frequencies; this is used + for input validation and to determine the size and dtype of the arrays + that store results. The next shape corresponds with evaluating the + integrand at an initial grid of abscissae and all four frequencies. + Successive calls to the function double the total number of abscissae at + which the function has been evaluated. However, in later function + evaluations, the integrand is evaluated at fewer frequencies because + the corresponding integral has already converged to the required + tolerance. This saves function evaluations to improve performance, but + it requires the function to accept arguments of any shape. + + "Vector-valued" integrands, such as those written for use with + `scipy.integrate.quad_vec`, are unlikely to satisfy this requirement. + For example, consider + + >>> def f(x): + ... return [x, np.sin(10*x), np.cos(30*x), x*np.sin(100*x)**2] + + This integrand is not compatible with `tanhsinh` as written; for instance, + the shape of the output will not be the same as the shape of ``x``. Such a + function *could* be converted to a compatible form with the introduction of + additional parameters, but this would be inconvenient. In such cases, + a simpler solution would be to use `preserve_shape`. + + >>> shapes = [] + >>> def f(x): + ... shapes.append(x.shape) + ... x0, x1, x2, x3 = x + ... return [x0, np.sin(10*x1), np.cos(30*x2), x3*np.sin(100*x3)] + >>> + >>> a = np.zeros(4) + >>> res = tanhsinh(f, a, 1, preserve_shape=True) + >>> shapes + [(4,), (4, 66), (4, 64), (4, 128), (4, 256)] + + Here, the broadcasted shape of `a` and `b` is ``(4,)``. With + ``preserve_shape=True``, the function may be called with argument + ``x`` of shape ``(4,)`` or ``(4, n)``, and this is what we observe. + + """ + maxfun = None # unused right now + (f, a, b, log, maxfun, maxlevel, minlevel, + atol, rtol, args, preserve_shape, callback, xp) = _tanhsinh_iv( + f, a, b, log, maxfun, maxlevel, minlevel, atol, + rtol, args, preserve_shape, callback) + + # Initialization + # `eim._initialize` does several important jobs, including + # ensuring that limits, each of the `args`, and the output of `f` + # broadcast correctly and are of consistent types. To save a function + # evaluation, I pass the midpoint of the integration interval. This comes + # at a cost of some gymnastics to ensure that the midpoint has the right + # shape and dtype. Did you know that 0d and >0d arrays follow different + # type promotion rules? + with np.errstate(over='ignore', invalid='ignore', divide='ignore'): + c = xp.reshape((xp_ravel(a) + xp_ravel(b))/2, a.shape) + inf_a, inf_b = xp.isinf(a), xp.isinf(b) + c[inf_a] = b[inf_a] - 1. # takes care of infinite a + c[inf_b] = a[inf_b] + 1. # takes care of infinite b + c[inf_a & inf_b] = 0. # takes care of infinite a and b + temp = eim._initialize(f, (c,), args, complex_ok=True, + preserve_shape=preserve_shape, xp=xp) + f, xs, fs, args, shape, dtype, xp = temp + a = xp_ravel(xp.astype(xp.broadcast_to(a, shape), dtype)) + b = xp_ravel(xp.astype(xp.broadcast_to(b, shape), dtype)) + + # Transform improper integrals + a, b, a0, negative, abinf, ainf, binf = _transform_integrals(a, b, xp) + + # Define variables we'll need + nit, nfev = 0, 1 # one function evaluation performed above + zero = -xp.inf if log else 0 + pi = xp.asarray(xp.pi, dtype=dtype)[()] + maxiter = maxlevel - minlevel + 1 + eps = xp.finfo(dtype).eps + if rtol is None: + rtol = 0.75*math.log(eps) if log else eps**0.75 + + Sn = xp_ravel(xp.full(shape, zero, dtype=dtype)) # latest integral estimate + Sn[xp.isnan(a) | xp.isnan(b) | xp.isnan(fs[0])] = xp.nan + Sk = xp.reshape(xp.empty_like(Sn), (-1, 1))[:, 0:0] # all integral estimates + aerr = xp_ravel(xp.full(shape, xp.nan, dtype=dtype)) # absolute error + status = xp_ravel(xp.full(shape, eim._EINPROGRESS, dtype=xp.int32)) + h0 = _get_base_step(dtype, xp) + h0 = xp_real(h0) # base step + + # For term `d4` of error estimate ([1] Section 5), we need to keep the + # most extreme abscissae and corresponding `fj`s, `wj`s in Euler-Maclaurin + # sum. Here, we initialize these variables. + xr0 = xp_ravel(xp.full(shape, -xp.inf, dtype=dtype)) + fr0 = xp_ravel(xp.full(shape, xp.nan, dtype=dtype)) + wr0 = xp_ravel(xp.zeros(shape, dtype=dtype)) + xl0 = xp_ravel(xp.full(shape, xp.inf, dtype=dtype)) + fl0 = xp_ravel(xp.full(shape, xp.nan, dtype=dtype)) + wl0 = xp_ravel(xp.zeros(shape, dtype=dtype)) + d4 = xp_ravel(xp.zeros(shape, dtype=dtype)) + + work = _RichResult( + Sn=Sn, Sk=Sk, aerr=aerr, h=h0, log=log, dtype=dtype, pi=pi, eps=eps, + a=xp.reshape(a, (-1, 1)), b=xp.reshape(b, (-1, 1)), # integration limits + n=minlevel, nit=nit, nfev=nfev, status=status, # iter/eval counts + xr0=xr0, fr0=fr0, wr0=wr0, xl0=xl0, fl0=fl0, wl0=wl0, d4=d4, # err est + ainf=ainf, binf=binf, abinf=abinf, a0=xp.reshape(a0, (-1, 1)), # transforms + # Store the xjc/wj pair cache in an object so they can't get compressed + # Using RichResult to allow dot notation, but a dictionary would suffice + pair_cache=_RichResult(xjc=None, wj=None, indices=[0], h0=None)) # pair cache + + # Constant scalars don't need to be put in `work` unless they need to be + # passed outside `tanhsinh`. Examples: atol, rtol, h0, minlevel. + + # Correspondence between terms in the `work` object and the result + res_work_pairs = [('status', 'status'), ('integral', 'Sn'), + ('error', 'aerr'), ('nit', 'nit'), ('nfev', 'nfev')] + + def pre_func_eval(work): + # Determine abscissae at which to evaluate `f` + work.h = h0 / 2**work.n + xjc, wj = _get_pairs(work.n, h0, dtype=work.dtype, + inclusive=(work.n == minlevel), xp=xp, work=work) + work.xj, work.wj = _transform_to_limits(xjc, wj, work.a, work.b, xp) + + # Perform abscissae substitutions for infinite limits of integration + xj = xp_copy(work.xj) + # use xp_real here to avoid cupy/cupy#8434 + xj[work.abinf] = xj[work.abinf] / (1 - xp_real(xj[work.abinf])**2) + xj[work.binf] = 1/xj[work.binf] - 1 + work.a0[work.binf] + xj[work.ainf] *= -1 + return xj + + def post_func_eval(x, fj, work): + # Weight integrand as required by substitutions for infinite limits + if work.log: + fj[work.abinf] += (xp.log(1 + work.xj[work.abinf]**2) + - 2*xp.log(1 - work.xj[work.abinf]**2)) + fj[work.binf] -= 2 * xp.log(work.xj[work.binf]) + else: + fj[work.abinf] *= ((1 + work.xj[work.abinf]**2) / + (1 - work.xj[work.abinf]**2)**2) + fj[work.binf] *= work.xj[work.binf]**-2. + + # Estimate integral with Euler-Maclaurin Sum + fjwj, Sn = _euler_maclaurin_sum(fj, work, xp) + if work.Sk.shape[-1]: + Snm1 = work.Sk[:, -1] + Sn = (special.logsumexp(xp.stack([Snm1 - math.log(2), Sn]), axis=0) if log + else Snm1 / 2 + Sn) + + work.fjwj = fjwj + work.Sn = Sn + + def check_termination(work): + """Terminate due to convergence or encountering non-finite values""" + stop = xp.zeros(work.Sn.shape, dtype=bool) + + # Terminate before first iteration if integration limits are equal + if work.nit == 0: + i = xp_ravel(work.a == work.b) # ravel singleton dimension + zero = xp.asarray(-xp.inf if log else 0.) + zero = xp.full(work.Sn.shape, zero, dtype=Sn.dtype) + zero[xp.isnan(Sn)] = xp.nan + work.Sn[i] = zero[i] + work.aerr[i] = zero[i] + work.status[i] = eim._ECONVERGED + stop[i] = True + else: + # Terminate if convergence criterion is met + work.rerr, work.aerr = _estimate_error(work, xp) + i = ((work.rerr < rtol) | (work.rerr + xp_real(work.Sn) < atol) if log + else (work.rerr < rtol) | (work.rerr * xp.abs(work.Sn) < atol)) + work.status[i] = eim._ECONVERGED + stop[i] = True + + # Terminate if integral estimate becomes invalid + if log: + Sn_real = xp_real(work.Sn) + Sn_pos_inf = xp.isinf(Sn_real) & (Sn_real > 0) + i = (Sn_pos_inf | xp.isnan(work.Sn)) & ~stop + else: + i = ~xp.isfinite(work.Sn) & ~stop + work.status[i] = eim._EVALUEERR + stop[i] = True + + return stop + + def post_termination_check(work): + work.n += 1 + work.Sk = xp.concat((work.Sk, work.Sn[:, xp.newaxis]), axis=-1) + return + + def customize_result(res, shape): + # If the integration limits were such that b < a, we reversed them + # to perform the calculation, and the final result needs to be negated. + if log and xp.any(negative): + dtype = res['integral'].dtype + pi = xp.asarray(xp.pi, dtype=dtype)[()] + j = xp.asarray(1j, dtype=xp.complex64)[()] # minimum complex type + res['integral'] = res['integral'] + negative*pi*j + else: + res['integral'][negative] *= -1 + + # For this algorithm, it seems more appropriate to report the maximum + # level rather than the number of iterations in which it was performed. + res['maxlevel'] = minlevel + res['nit'] - 1 + res['maxlevel'][res['nit'] == 0] = -1 + del res['nit'] + return shape + + # Suppress all warnings initially, since there are many places in the code + # for which this is expected behavior. + with np.errstate(over='ignore', invalid='ignore', divide='ignore'): + res = eim._loop(work, callback, shape, maxiter, f, args, dtype, pre_func_eval, + post_func_eval, check_termination, post_termination_check, + customize_result, res_work_pairs, xp, preserve_shape) + return res + + +def _get_base_step(dtype, xp): + # Compute the base step length for the provided dtype. Theoretically, the + # Euler-Maclaurin sum is infinite, but it gets cut off when either the + # weights underflow or the abscissae cannot be distinguished from the + # limits of integration. The latter happens to occur first for float32 and + # float64, and it occurs when `xjc` (the abscissa complement) + # in `_compute_pair` underflows. We can solve for the argument `tmax` at + # which it will underflow using [2] Eq. 13. + fmin = 4*xp.finfo(dtype).smallest_normal # stay a little away from the limit + tmax = math.asinh(math.log(2/fmin - 1) / xp.pi) + + # Based on this, we can choose a base step size `h` for level 0. + # The number of function evaluations will be `2 + m*2^(k+1)`, where `k` is + # the level and `m` is an integer we get to choose. I choose + # m = _N_BASE_STEPS = `8` somewhat arbitrarily, but a rationale is that a + # power of 2 makes floating point arithmetic more predictable. It also + # results in a base step size close to `1`, which is what [1] uses (and I + # used here until I found [2] and these ideas settled). + h0 = tmax / _N_BASE_STEPS + return xp.asarray(h0, dtype=dtype)[()] + + +_N_BASE_STEPS = 8 + + +def _compute_pair(k, h0, xp): + # Compute the abscissa-weight pairs for each level k. See [1] page 9. + + # For now, we compute and store in 64-bit precision. If higher-precision + # data types become better supported, it would be good to compute these + # using the highest precision available. Or, once there is an Array API- + # compatible arbitrary precision array, we can compute at the required + # precision. + + # "....each level k of abscissa-weight pairs uses h = 2 **-k" + # We adapt to floating point arithmetic using ideas of [2]. + h = h0 / 2**k + max = _N_BASE_STEPS * 2**k + + # For iterations after the first, "....the integrand function needs to be + # evaluated only at the odd-indexed abscissas at each level." + j = xp.arange(max+1) if k == 0 else xp.arange(1, max+1, 2) + jh = j * h + + # "In this case... the weights wj = u1/cosh(u2)^2, where..." + pi_2 = xp.pi / 2 + u1 = pi_2*xp.cosh(jh) + u2 = pi_2*xp.sinh(jh) + # Denominators get big here. Overflow then underflow doesn't need warning. + # with np.errstate(under='ignore', over='ignore'): + wj = u1 / xp.cosh(u2)**2 + # "We actually store 1-xj = 1/(...)." + xjc = 1 / (xp.exp(u2) * xp.cosh(u2)) # complement of xj = xp.tanh(u2) + + # When level k == 0, the zeroth xj corresponds with xj = 0. To simplify + # code, the function will be evaluated there twice; each gets half weight. + wj[0] = wj[0] / 2 if k == 0 else wj[0] + + return xjc, wj # store at full precision + + +def _pair_cache(k, h0, xp, work): + # Cache the abscissa-weight pairs up to a specified level. + # Abscissae and weights of consecutive levels are concatenated. + # `index` records the indices that correspond with each level: + # `xjc[index[k]:index[k+1]` extracts the level `k` abscissae. + if not isinstance(h0, type(work.pair_cache.h0)) or h0 != work.pair_cache.h0: + work.pair_cache.xjc = xp.empty(0) + work.pair_cache.wj = xp.empty(0) + work.pair_cache.indices = [0] + + xjcs = [work.pair_cache.xjc] + wjs = [work.pair_cache.wj] + + for i in range(len(work.pair_cache.indices)-1, k + 1): + xjc, wj = _compute_pair(i, h0, xp) + xjcs.append(xjc) + wjs.append(wj) + work.pair_cache.indices.append(work.pair_cache.indices[-1] + xjc.shape[0]) + + work.pair_cache.xjc = xp.concat(xjcs) + work.pair_cache.wj = xp.concat(wjs) + work.pair_cache.h0 = h0 + + +def _get_pairs(k, h0, inclusive, dtype, xp, work): + # Retrieve the specified abscissa-weight pairs from the cache + # If `inclusive`, return all up to and including the specified level + if (len(work.pair_cache.indices) <= k+2 + or not isinstance (h0, type(work.pair_cache.h0)) + or h0 != work.pair_cache.h0): + _pair_cache(k, h0, xp, work) + + xjc = work.pair_cache.xjc + wj = work.pair_cache.wj + indices = work.pair_cache.indices + + start = 0 if inclusive else indices[k] + end = indices[k+1] + + return xp.astype(xjc[start:end], dtype), xp.astype(wj[start:end], dtype) + + +def _transform_to_limits(xjc, wj, a, b, xp): + # Transform integral according to user-specified limits. This is just + # math that follows from the fact that the standard limits are (-1, 1). + # Note: If we had stored xj instead of xjc, we would have + # xj = alpha * xj + beta, where beta = (a + b)/2 + alpha = (b - a) / 2 + xj = xp.concat((-alpha * xjc + b, alpha * xjc + a), axis=-1) + wj = wj*alpha # arguments get broadcasted, so we can't use *= + wj = xp.concat((wj, wj), axis=-1) + + # Points at the boundaries can be generated due to finite precision + # arithmetic, but these function values aren't supposed to be included in + # the Euler-Maclaurin sum. Ideally we wouldn't evaluate the function at + # these points; however, we can't easily filter out points since this + # function is vectorized. Instead, zero the weights. + # Note: values may have complex dtype, but have zero imaginary part + xj_real, a_real, b_real = xp_real(xj), xp_real(a), xp_real(b) + invalid = (xj_real <= a_real) | (xj_real >= b_real) + wj[invalid] = 0 + return xj, wj + + +def _euler_maclaurin_sum(fj, work, xp): + # Perform the Euler-Maclaurin Sum, [1] Section 4 + + # The error estimate needs to know the magnitude of the last term + # omitted from the Euler-Maclaurin sum. This is a bit involved because + # it may have been computed at a previous level. I sure hope it's worth + # all the trouble. + xr0, fr0, wr0 = work.xr0, work.fr0, work.wr0 + xl0, fl0, wl0 = work.xl0, work.fl0, work.wl0 + + # It is much more convenient to work with the transposes of our work + # variables here. + xj, fj, wj = work.xj.T, fj.T, work.wj.T + n_x, n_active = xj.shape # number of abscissae, number of active elements + + # We'll work with the left and right sides separately + xr, xl = xp_copy(xp.reshape(xj, (2, n_x // 2, n_active))) # this gets modified + fr, fl = xp.reshape(fj, (2, n_x // 2, n_active)) + wr, wl = xp.reshape(wj, (2, n_x // 2, n_active)) + + invalid_r = ~xp.isfinite(fr) | (wr == 0) + invalid_l = ~xp.isfinite(fl) | (wl == 0) + + # integer index of the maximum abscissa at this level + xr[invalid_r] = -xp.inf + ir = xp.argmax(xp_real(xr), axis=0, keepdims=True) + # abscissa, function value, and weight at this index + ### Not Array API Compatible... yet ### + xr_max = xp_take_along_axis(xr, ir, axis=0)[0] + fr_max = xp_take_along_axis(fr, ir, axis=0)[0] + wr_max = xp_take_along_axis(wr, ir, axis=0)[0] + # boolean indices at which maximum abscissa at this level exceeds + # the incumbent maximum abscissa (from all previous levels) + # note: abscissa may have complex dtype, but will have zero imaginary part + j = xp_real(xr_max) > xp_real(xr0) + # Update record of the incumbent abscissa, function value, and weight + xr0[j] = xr_max[j] + fr0[j] = fr_max[j] + wr0[j] = wr_max[j] + + # integer index of the minimum abscissa at this level + xl[invalid_l] = xp.inf + il = xp.argmin(xp_real(xl), axis=0, keepdims=True) + # abscissa, function value, and weight at this index + xl_min = xp_take_along_axis(xl, il, axis=0)[0] + fl_min = xp_take_along_axis(fl, il, axis=0)[0] + wl_min = xp_take_along_axis(wl, il, axis=0)[0] + # boolean indices at which minimum abscissa at this level is less than + # the incumbent minimum abscissa (from all previous levels) + # note: abscissa may have complex dtype, but will have zero imaginary part + j = xp_real(xl_min) < xp_real(xl0) + # Update record of the incumbent abscissa, function value, and weight + xl0[j] = xl_min[j] + fl0[j] = fl_min[j] + wl0[j] = wl_min[j] + fj = fj.T + + # Compute the error estimate `d4` - the magnitude of the leftmost or + # rightmost term, whichever is greater. + flwl0 = fl0 + xp.log(wl0) if work.log else fl0 * wl0 # leftmost term + frwr0 = fr0 + xp.log(wr0) if work.log else fr0 * wr0 # rightmost term + magnitude = xp_real if work.log else xp.abs + work.d4 = xp.maximum(magnitude(flwl0), magnitude(frwr0)) + + # There are two approaches to dealing with function values that are + # numerically infinite due to approaching a singularity - zero them, or + # replace them with the function value at the nearest non-infinite point. + # [3] pg. 22 suggests the latter, so let's do that given that we have the + # information. + fr0b = xp.broadcast_to(fr0[xp.newaxis, :], fr.shape) + fl0b = xp.broadcast_to(fl0[xp.newaxis, :], fl.shape) + fr[invalid_r] = fr0b[invalid_r] + fl[invalid_l] = fl0b[invalid_l] + + # When wj is zero, log emits a warning + # with np.errstate(divide='ignore'): + fjwj = fj + xp.log(work.wj) if work.log else fj * work.wj + + # update integral estimate + Sn = (special.logsumexp(fjwj + xp.log(work.h), axis=-1) if work.log + else xp.sum(fjwj, axis=-1) * work.h) + + work.xr0, work.fr0, work.wr0 = xr0, fr0, wr0 + work.xl0, work.fl0, work.wl0 = xl0, fl0, wl0 + + return fjwj, Sn + + +def _estimate_error(work, xp): + # Estimate the error according to [1] Section 5 + + if work.n == 0 or work.nit == 0: + # The paper says to use "one" as the error before it can be calculated. + # NaN seems to be more appropriate. + nan = xp.full_like(work.Sn, xp.nan) + return nan, nan + + indices = work.pair_cache.indices + + n_active = work.Sn.shape[0] # number of active elements + axis_kwargs = dict(axis=-1, keepdims=True) + + # With a jump start (starting at level higher than 0), we haven't + # explicitly calculated the integral estimate at lower levels. But we have + # all the function value-weight products, so we can compute the + # lower-level estimates. + if work.Sk.shape[-1] == 0: + h = 2 * work.h # step size at this level + n_x = indices[work.n] # number of abscissa up to this level + # The right and left fjwj terms from all levels are concatenated along + # the last axis. Get out only the terms up to this level. + fjwj_rl = xp.reshape(work.fjwj, (n_active, 2, -1)) + fjwj = xp.reshape(fjwj_rl[:, :, :n_x], (n_active, 2*n_x)) + # Compute the Euler-Maclaurin sum at this level + Snm1 = (special.logsumexp(fjwj, **axis_kwargs) + xp.log(h) if work.log + else xp.sum(fjwj, **axis_kwargs) * h) + work.Sk = xp.concat((Snm1, work.Sk), axis=-1) + + if work.n == 1: + nan = xp.full_like(work.Sn, xp.nan) + return nan, nan + + # The paper says not to calculate the error for n<=2, but it's not clear + # about whether it starts at level 0 or level 1. We start at level 0, so + # why not compute the error beginning in level 2? + if work.Sk.shape[-1] < 2: + h = 4 * work.h # step size at this level + n_x = indices[work.n-1] # number of abscissa up to this level + # The right and left fjwj terms from all levels are concatenated along + # the last axis. Get out only the terms up to this level. + fjwj_rl = xp.reshape(work.fjwj, (work.Sn.shape[0], 2, -1)) + fjwj = xp.reshape(fjwj_rl[..., :n_x], (n_active, 2*n_x)) + # Compute the Euler-Maclaurin sum at this level + Snm2 = (special.logsumexp(fjwj, **axis_kwargs) + xp.log(h) if work.log + else xp.sum(fjwj, **axis_kwargs) * h) + work.Sk = xp.concat((Snm2, work.Sk), axis=-1) + + Snm2 = work.Sk[..., -2] + Snm1 = work.Sk[..., -1] + + e1 = xp.asarray(work.eps)[()] + + if work.log: + log_e1 = xp.log(e1) + # Currently, only real integrals are supported in log-scale. All + # complex values have imaginary part in increments of pi*j, which just + # carries sign information of the original integral, so use of + # `xp.real` here is equivalent to absolute value in real scale. + d1 = xp_real(special.logsumexp(xp.stack([work.Sn, Snm1 + work.pi*1j]), axis=0)) + d2 = xp_real(special.logsumexp(xp.stack([work.Sn, Snm2 + work.pi*1j]), axis=0)) + d3 = log_e1 + xp.max(xp_real(work.fjwj), axis=-1) + d4 = work.d4 + ds = xp.stack([d1 ** 2 / d2, 2 * d1, d3, d4]) + aerr = xp.max(ds, axis=0) + rerr = xp.maximum(log_e1, aerr - xp_real(work.Sn)) + else: + # Note: explicit computation of log10 of each of these is unnecessary. + d1 = xp.abs(work.Sn - Snm1) + d2 = xp.abs(work.Sn - Snm2) + d3 = e1 * xp.max(xp.abs(work.fjwj), axis=-1) + d4 = work.d4 + # If `d1` is 0, no need to warn. This does the right thing. + # with np.errstate(divide='ignore'): + ds = xp.stack([d1**(xp.log(d1)/xp.log(d2)), d1**2, d3, d4]) + aerr = xp.max(ds, axis=0) + rerr = xp.maximum(e1, aerr/xp.abs(work.Sn)) + + aerr = xp.reshape(xp.astype(aerr, work.dtype), work.Sn.shape) + return rerr, aerr + + +def _transform_integrals(a, b, xp): + # Transform integrals to a form with finite a <= b + # For b == a (even infinite), we ensure that the limits remain equal + # For b < a, we reverse the limits and will multiply the final result by -1 + # For infinite limit on the right, we use the substitution x = 1/t - 1 + a + # For infinite limit on the left, we substitute x = -x and treat as above + # For infinite limits, we substitute x = t / (1-t**2) + ab_same = (a == b) + a[ab_same], b[ab_same] = 1, 1 + + # `a, b` may have complex dtype but have zero imaginary part + negative = xp_real(b) < xp_real(a) + a[negative], b[negative] = b[negative], a[negative] + + abinf = xp.isinf(a) & xp.isinf(b) + a[abinf], b[abinf] = -1, 1 + + ainf = xp.isinf(a) + a[ainf], b[ainf] = -b[ainf], -a[ainf] + + binf = xp.isinf(b) + a0 = xp_copy(a) + a[binf], b[binf] = 0, 1 + + return a, b, a0, negative, abinf, ainf, binf + + +def _tanhsinh_iv(f, a, b, log, maxfun, maxlevel, minlevel, + atol, rtol, args, preserve_shape, callback): + # Input validation and standardization + + xp = array_namespace(a, b) + + message = '`f` must be callable.' + if not callable(f): + raise ValueError(message) + + message = 'All elements of `a` and `b` must be real numbers.' + a, b = xp.asarray(a), xp.asarray(b) + a, b = xp.broadcast_arrays(a, b) + if (xp.isdtype(a.dtype, 'complex floating') + or xp.isdtype(b.dtype, 'complex floating')): + raise ValueError(message) + + message = '`log` must be True or False.' + if log not in {True, False}: + raise ValueError(message) + log = bool(log) + + if atol is None: + atol = -xp.inf if log else 0 + + rtol_temp = rtol if rtol is not None else 0. + + # using NumPy for convenience here; these are just floats, not arrays + params = np.asarray([atol, rtol_temp, 0.]) + message = "`atol` and `rtol` must be real numbers." + if not np.issubdtype(params.dtype, np.floating): + raise ValueError(message) + + if log: + message = '`atol` and `rtol` may not be positive infinity.' + if np.any(np.isposinf(params)): + raise ValueError(message) + else: + message = '`atol` and `rtol` must be non-negative and finite.' + if np.any(params < 0) or np.any(np.isinf(params)): + raise ValueError(message) + atol = params[0] + rtol = rtol if rtol is None else params[1] + + BIGINT = float(2**62) + if maxfun is None and maxlevel is None: + maxlevel = 10 + + maxfun = BIGINT if maxfun is None else maxfun + maxlevel = BIGINT if maxlevel is None else maxlevel + + message = '`maxfun`, `maxlevel`, and `minlevel` must be integers.' + params = np.asarray([maxfun, maxlevel, minlevel]) + if not (np.issubdtype(params.dtype, np.number) + and np.all(np.isreal(params)) + and np.all(params.astype(np.int64) == params)): + raise ValueError(message) + message = '`maxfun`, `maxlevel`, and `minlevel` must be non-negative.' + if np.any(params < 0): + raise ValueError(message) + maxfun, maxlevel, minlevel = params.astype(np.int64) + minlevel = min(minlevel, maxlevel) + + if not np.iterable(args): + args = (args,) + args = (xp.asarray(arg) for arg in args) + + message = '`preserve_shape` must be True or False.' + if preserve_shape not in {True, False}: + raise ValueError(message) + + if callback is not None and not callable(callback): + raise ValueError('`callback` must be callable.') + + return (f, a, b, log, maxfun, maxlevel, minlevel, + atol, rtol, args, preserve_shape, callback, xp) + + +def _nsum_iv(f, a, b, step, args, log, maxterms, tolerances): + # Input validation and standardization + + xp = array_namespace(a, b) + + message = '`f` must be callable.' + if not callable(f): + raise ValueError(message) + + message = 'All elements of `a`, `b`, and `step` must be real numbers.' + a, b, step = xp.broadcast_arrays(xp.asarray(a), xp.asarray(b), xp.asarray(step)) + dtype = xp.result_type(a.dtype, b.dtype, step.dtype) + if not xp.isdtype(dtype, 'numeric') or xp.isdtype(dtype, 'complex floating'): + raise ValueError(message) + + valid_b = b >= a # NaNs will be False + valid_step = xp.isfinite(step) & (step > 0) + valid_abstep = valid_b & valid_step + + message = '`log` must be True or False.' + if log not in {True, False}: + raise ValueError(message) + + tolerances = {} if tolerances is None else tolerances + + atol = tolerances.get('atol', None) + if atol is None: + atol = -xp.inf if log else 0 + + rtol = tolerances.get('rtol', None) + rtol_temp = rtol if rtol is not None else 0. + + # using NumPy for convenience here; these are just floats, not arrays + params = np.asarray([atol, rtol_temp, 0.]) + message = "`atol` and `rtol` must be real numbers." + if not np.issubdtype(params.dtype, np.floating): + raise ValueError(message) + + if log: + message = '`atol`, `rtol` may not be positive infinity or NaN.' + if np.any(np.isposinf(params) | np.isnan(params)): + raise ValueError(message) + else: + message = '`atol`, and `rtol` must be non-negative and finite.' + if np.any((params < 0) | (~np.isfinite(params))): + raise ValueError(message) + atol = params[0] + rtol = rtol if rtol is None else params[1] + + maxterms_int = int(maxterms) + if maxterms_int != maxterms or maxterms < 0: + message = "`maxterms` must be a non-negative integer." + raise ValueError(message) + + if not np.iterable(args): + args = (args,) + + return f, a, b, step, valid_abstep, args, log, maxterms_int, atol, rtol, xp + + +def nsum(f, a, b, *, step=1, args=(), log=False, maxterms=int(2**20), tolerances=None): + r"""Evaluate a convergent finite or infinite series. + + For finite `a` and `b`, this evaluates:: + + f(a + np.arange(n)*step).sum() + + where ``n = int((b - a) / step) + 1``, where `f` is smooth, positive, and + unimodal. The number of terms in the sum may be very large or infinite, + in which case a partial sum is evaluated directly and the remainder is + approximated using integration. + + Parameters + ---------- + f : callable + The function that evaluates terms to be summed. The signature must be:: + + f(x: ndarray, *args) -> ndarray + + where each element of ``x`` is a finite real and ``args`` is a tuple, + which may contain an arbitrary number of arrays that are broadcastable + with ``x``. + + `f` must be an elementwise function: each element ``f(x)[i]`` + must equal ``f(x[i])`` for all indices ``i``. It must not mutate the + array ``x`` or the arrays in ``args``, and it must return NaN where + the argument is NaN. + + `f` must represent a smooth, positive, unimodal function of `x` defined at + *all reals* between `a` and `b`. + a, b : float array_like + Real lower and upper limits of summed terms. Must be broadcastable. + Each element of `a` must be less than the corresponding element in `b`. + step : float array_like + Finite, positive, real step between summed terms. Must be broadcastable + with `a` and `b`. Note that the number of terms included in the sum will + be ``floor((b - a) / step)`` + 1; adjust `b` accordingly to ensure + that ``f(b)`` is included if intended. + args : tuple of array_like, optional + Additional positional arguments to be passed to `f`. Must be arrays + broadcastable with `a`, `b`, and `step`. If the callable to be summed + requires arguments that are not broadcastable with `a`, `b`, and `step`, + wrap that callable with `f` such that `f` accepts only `x` and + broadcastable ``*args``. See Examples. + log : bool, default: False + Setting to True indicates that `f` returns the log of the terms + and that `atol` and `rtol` are expressed as the logs of the absolute + and relative errors. In this case, the result object will contain the + log of the sum and error. This is useful for summands for which + numerical underflow or overflow would lead to inaccuracies. + maxterms : int, default: 2**20 + The maximum number of terms to evaluate for direct summation. + Additional function evaluations may be performed for input + validation and integral evaluation. + atol, rtol : float, optional + Absolute termination tolerance (default: 0) and relative termination + tolerance (default: ``eps**0.5``, where ``eps`` is the precision of + the result dtype), respectively. Must be non-negative + and finite if `log` is False, and must be expressed as the log of a + non-negative and finite number if `log` is True. + + Returns + ------- + res : _RichResult + An object similar to an instance of `scipy.optimize.OptimizeResult` with the + following attributes. (The descriptions are written as though the values will + be scalars; however, if `f` returns an array, the outputs will be + arrays of the same shape.) + + success : bool + ``True`` when the algorithm terminated successfully (status ``0``); + ``False`` otherwise. + status : int array + An integer representing the exit status of the algorithm. + + - ``0`` : The algorithm converged to the specified tolerances. + - ``-1`` : Element(s) of `a`, `b`, or `step` are invalid + - ``-2`` : Numerical integration reached its iteration limit; + the sum may be divergent. + - ``-3`` : A non-finite value was encountered. + - ``-4`` : The magnitude of the last term of the partial sum exceeds + the tolerances, so the error estimate exceeds the tolerances. + Consider increasing `maxterms` or loosening `tolerances`. + Alternatively, the callable may not be unimodal, or the limits of + summation may be too far from the function maximum. Consider + increasing `maxterms` or breaking the sum into pieces. + + sum : float array + An estimate of the sum. + error : float array + An estimate of the absolute error, assuming all terms are non-negative, + the function is computed exactly, and direct summation is accurate to + the precision of the result dtype. + nfev : int array + The number of points at which `f` was evaluated. + + See Also + -------- + mpmath.nsum + + Notes + ----- + The method implemented for infinite summation is related to the integral + test for convergence of an infinite series: assuming `step` size 1 for + simplicity of exposition, the sum of a monotone decreasing function is bounded by + + .. math:: + + \int_u^\infty f(x) dx \leq \sum_{k=u}^\infty f(k) \leq \int_u^\infty f(x) dx + f(u) + + Let :math:`a` represent `a`, :math:`n` represent `maxterms`, :math:`\epsilon_a` + represent `atol`, and :math:`\epsilon_r` represent `rtol`. + The implementation first evaluates the integral :math:`S_l=\int_a^\infty f(x) dx` + as a lower bound of the infinite sum. Then, it seeks a value :math:`c > a` such + that :math:`f(c) < \epsilon_a + S_l \epsilon_r`, if it exists; otherwise, + let :math:`c = a + n`. Then the infinite sum is approximated as + + .. math:: + + \sum_{k=a}^{c-1} f(k) + \int_c^\infty f(x) dx + f(c)/2 + + and the reported error is :math:`f(c)/2` plus the error estimate of + numerical integration. Note that the integral approximations may require + evaluation of the function at points besides those that appear in the sum, + so `f` must be a continuous and monotonically decreasing function defined + for all reals within the integration interval. However, due to the nature + of the integral approximation, the shape of the function between points + that appear in the sum has little effect. If there is not a natural + extension of the function to all reals, consider using linear interpolation, + which is easy to evaluate and preserves monotonicity. + + The approach described above is generalized for non-unit + `step` and finite `b` that is too large for direct evaluation of the sum, + i.e. ``b - a + 1 > maxterms``. It is further generalized to unimodal + functions by directly summing terms surrounding the maximum. + This strategy may fail: + + - If the left limit is finite and the maximum is far from it. + - If the right limit is finite and the maximum is far from it. + - If both limits are finite and the maximum is far from the origin. + + In these cases, accuracy may be poor, and `nsum` may return status code ``4``. + + Although the callable `f` must be non-negative and unimodal, + `nsum` can be used to evaluate more general forms of series. For instance, to + evaluate an alternating series, pass a callable that returns the difference + between pairs of adjacent terms, and adjust `step` accordingly. See Examples. + + References + ---------- + .. [1] Wikipedia. "Integral test for convergence." + https://en.wikipedia.org/wiki/Integral_test_for_convergence + + Examples + -------- + Compute the infinite sum of the reciprocals of squared integers. + + >>> import numpy as np + >>> from scipy.integrate import nsum + >>> res = nsum(lambda k: 1/k**2, 1, np.inf) + >>> ref = np.pi**2/6 # true value + >>> res.error # estimated error + np.float64(7.448762306416137e-09) + >>> (res.sum - ref)/ref # true error + np.float64(-1.839871898894426e-13) + >>> res.nfev # number of points at which callable was evaluated + np.int32(8561) + + Compute the infinite sums of the reciprocals of integers raised to powers ``p``, + where ``p`` is an array. + + >>> from scipy import special + >>> p = np.arange(3, 10) + >>> res = nsum(lambda k, p: 1/k**p, 1, np.inf, maxterms=1e3, args=(p,)) + >>> ref = special.zeta(p, 1) + >>> np.allclose(res.sum, ref) + True + + Evaluate the alternating harmonic series. + + >>> res = nsum(lambda x: 1/x - 1/(x+1), 1, np.inf, step=2) + >>> res.sum, res.sum - np.log(2) # result, difference vs analytical sum + (np.float64(0.6931471805598691), np.float64(-7.616129948928574e-14)) + + """ # noqa: E501 + # Potential future work: + # - improve error estimate of `_direct` sum + # - add other methods for convergence acceleration (Richardson, epsilon) + # - support negative monotone increasing functions? + # - b < a / negative step? + # - complex-valued function? + # - check for violations of monotonicity? + + # Function-specific input validation / standardization + tmp = _nsum_iv(f, a, b, step, args, log, maxterms, tolerances) + f, a, b, step, valid_abstep, args, log, maxterms, atol, rtol, xp = tmp + + # Additional elementwise algorithm input validation / standardization + tmp = eim._initialize(f, (a,), args, complex_ok=False, xp=xp) + f, xs, fs, args, shape, dtype, xp = tmp + + # Finish preparing `a`, `b`, and `step` arrays + a = xs[0] + b = xp.astype(xp_ravel(xp.broadcast_to(b, shape)), dtype) + step = xp.astype(xp_ravel(xp.broadcast_to(step, shape)), dtype) + valid_abstep = xp_ravel(xp.broadcast_to(valid_abstep, shape)) + nterms = xp.floor((b - a) / step) + finite_terms = xp.isfinite(nterms) + b[finite_terms] = a[finite_terms] + nterms[finite_terms]*step[finite_terms] + + # Define constants + eps = xp.finfo(dtype).eps + zero = xp.asarray(-xp.inf if log else 0, dtype=dtype)[()] + if rtol is None: + rtol = 0.5*math.log(eps) if log else eps**0.5 + constants = (dtype, log, eps, zero, rtol, atol, maxterms) + + # Prepare result arrays + S = xp.empty_like(a) + E = xp.empty_like(a) + status = xp.zeros(len(a), dtype=xp.int32) + nfev = xp.ones(len(a), dtype=xp.int32) # one function evaluation above + + # Branch for direct sum evaluation / integral approximation / invalid input + i0 = ~valid_abstep # invalid + i1 = (nterms + 1 <= maxterms) & ~i0 # direct sum evaluation + i2 = xp.isfinite(a) & ~i1 & ~i0 # infinite sum to the right + i3 = xp.isfinite(b) & ~i2 & ~i1 & ~i0 # infinite sum to the left + i4 = ~i3 & ~i2 & ~i1 & ~i0 # infinite sum on both sides + + if xp.any(i0): + S[i0], E[i0] = xp.nan, xp.nan + status[i0] = -1 + + if xp.any(i1): + args_direct = [arg[i1] for arg in args] + tmp = _direct(f, a[i1], b[i1], step[i1], args_direct, constants, xp) + S[i1], E[i1] = tmp[:-1] + nfev[i1] += tmp[-1] + status[i1] = -3 * xp.asarray(~xp.isfinite(S[i1]), dtype=xp.int32) + + if xp.any(i2): + args_indirect = [arg[i2] for arg in args] + tmp = _integral_bound(f, a[i2], b[i2], step[i2], + args_indirect, constants, xp) + S[i2], E[i2], status[i2] = tmp[:-1] + nfev[i2] += tmp[-1] + + if xp.any(i3): + args_indirect = [arg[i3] for arg in args] + def _f(x, *args): return f(-x, *args) + tmp = _integral_bound(_f, -b[i3], -a[i3], step[i3], + args_indirect, constants, xp) + S[i3], E[i3], status[i3] = tmp[:-1] + nfev[i3] += tmp[-1] + + if xp.any(i4): + args_indirect = [arg[i4] for arg in args] + + # There are two obvious high-level strategies: + # - Do two separate half-infinite sums (e.g. from -inf to 0 and 1 to inf) + # - Make a callable that returns f(x) + f(-x) and do a single half-infinite sum + # I thought the latter would have about half the overhead, so I went that way. + # Then there are two ways of ensuring that f(0) doesn't get counted twice. + # - Evaluate the sum from 1 to inf and add f(0) + # - Evaluate the sum from 0 to inf and subtract f(0) + # - Evaluate the sum from 0 to inf, but apply a weight of 0.5 when `x = 0` + # The last option has more overhead, but is simpler to implement correctly + # (especially getting the status message right) + if log: + def _f(x, *args): + log_factor = xp.where(x==0, math.log(0.5), 0) + out = xp.stack([f(x, *args), f(-x, *args)], axis=0) + return special.logsumexp(out, axis=0) + log_factor + + else: + def _f(x, *args): + factor = xp.where(x==0, 0.5, 1) + return (f(x, *args) + f(-x, *args)) * factor + + zero = xp.zeros_like(a[i4]) + tmp = _integral_bound(_f, zero, b[i4], step[i4], args_indirect, constants, xp) + S[i4], E[i4], status[i4] = tmp[:-1] + nfev[i4] += 2*tmp[-1] + + # Return results + S, E = S.reshape(shape)[()], E.reshape(shape)[()] + status, nfev = status.reshape(shape)[()], nfev.reshape(shape)[()] + return _RichResult(sum=S, error=E, status=status, success=status == 0, + nfev=nfev) + + +def _direct(f, a, b, step, args, constants, xp, inclusive=True): + # Directly evaluate the sum. + + # When used in the context of distributions, `args` would contain the + # distribution parameters. We have broadcasted for simplicity, but we could + # reduce function evaluations when distribution parameters are the same but + # sum limits differ. Roughly: + # - compute the function at all points between min(a) and max(b), + # - compute the cumulative sum, + # - take the difference between elements of the cumulative sum + # corresponding with b and a. + # This is left to future enhancement + + dtype, log, eps, zero, _, _, _ = constants + + # To allow computation in a single vectorized call, find the maximum number + # of points (over all slices) at which the function needs to be evaluated. + # Note: if `inclusive` is `True`, then we want `1` more term in the sum. + # I didn't think it was great style to use `True` as `1` in Python, so I + # explicitly converted it to an `int` before using it. + inclusive_adjustment = int(inclusive) + steps = xp.round((b - a) / step) + inclusive_adjustment + # Equivalently, steps = xp.round((b - a) / step) + inclusive + max_steps = int(xp.max(steps)) + + # In each slice, the function will be evaluated at the same number of points, + # but excessive points (those beyond the right sum limit `b`) are replaced + # with NaN to (potentially) reduce the time of these unnecessary calculations. + # Use a new last axis for these calculations for consistency with other + # elementwise algorithms. + a2, b2, step2 = a[:, xp.newaxis], b[:, xp.newaxis], step[:, xp.newaxis] + args2 = [arg[:, xp.newaxis] for arg in args] + ks = a2 + xp.arange(max_steps, dtype=dtype) * step2 + i_nan = ks >= (b2 + inclusive_adjustment*step2/2) + ks[i_nan] = xp.nan + fs = f(ks, *args2) + + # The function evaluated at NaN is NaN, and NaNs are zeroed in the sum. + # In some cases it may be faster to loop over slices than to vectorize + # like this. This is an optimization that can be added later. + fs[i_nan] = zero + nfev = max_steps - i_nan.sum(axis=-1) + S = special.logsumexp(fs, axis=-1) if log else xp.sum(fs, axis=-1) + # Rough, non-conservative error estimate. See gh-19667 for improvement ideas. + E = xp_real(S) + math.log(eps) if log else eps * abs(S) + return S, E, nfev + + +def _integral_bound(f, a, b, step, args, constants, xp): + # Estimate the sum with integral approximation + dtype, log, _, _, rtol, atol, maxterms = constants + log2 = xp.asarray(math.log(2), dtype=dtype) + + # Get a lower bound on the sum and compute effective absolute tolerance + lb = tanhsinh(f, a, b, args=args, atol=atol, rtol=rtol, log=log) + tol = xp.broadcast_to(xp.asarray(atol), lb.integral.shape) + if log: + tol = special.logsumexp(xp.stack((tol, rtol + lb.integral)), axis=0) + else: + tol = tol + rtol*lb.integral + i_skip = lb.status < 0 # avoid unnecessary f_evals if integral is divergent + tol[i_skip] = xp.nan + status = lb.status + + # As in `_direct`, we'll need a temporary new axis for points + # at which to evaluate the function. Append axis at the end for + # consistency with other elementwise algorithms. + a2 = a[..., xp.newaxis] + step2 = step[..., xp.newaxis] + args2 = [arg[..., xp.newaxis] for arg in args] + + # Find the location of a term that is less than the tolerance (if possible) + log2maxterms = math.floor(math.log2(maxterms)) if maxterms else 0 + n_steps = xp.concat((2**xp.arange(0, log2maxterms), xp.asarray([maxterms]))) + n_steps = xp.astype(n_steps, dtype) + nfev = len(n_steps) * 2 + ks = a2 + n_steps * step2 + fks = f(ks, *args2) + fksp1 = f(ks + step2, *args2) # check that the function is decreasing + fk_insufficient = (fks > tol[:, xp.newaxis]) | (fksp1 > fks) + n_fk_insufficient = xp.sum(fk_insufficient, axis=-1) + nt = xp.minimum(n_fk_insufficient, xp.asarray(n_steps.shape[-1]-1)) + n_steps = n_steps[nt] + + # If `maxterms` is insufficient (i.e. either the magnitude of the last term of the + # partial sum exceeds the tolerance or the function is not decreasing), finish the + # calculation, but report nonzero status. (Improvement: separate the status codes + # for these two cases.) + i_fk_insufficient = (n_fk_insufficient == nfev//2) + + # Directly evaluate the sum up to this term + k = a + n_steps * step + left, left_error, left_nfev = _direct(f, a, k, step, args, + constants, xp, inclusive=False) + left_is_pos_inf = xp.isinf(left) & (left > 0) + i_skip |= left_is_pos_inf # if sum is infinite, no sense in continuing + status[left_is_pos_inf] = -3 + k[i_skip] = xp.nan + + # Use integration to estimate the remaining sum + # Possible optimization for future work: if there were no terms less than + # the tolerance, there is no need to compute the integral to better accuracy. + # Something like: + # atol = xp.maximum(atol, xp.minimum(fk/2 - fb/2)) + # rtol = xp.maximum(rtol, xp.minimum((fk/2 - fb/2)/left)) + # where `fk`/`fb` are currently calculated below. + right = tanhsinh(f, k, b, args=args, atol=atol, rtol=rtol, log=log) + + # Calculate the full estimate and error from the pieces + fk = fks[xp.arange(len(fks)), nt] + + # fb = f(b, *args), but some functions return NaN at infinity. + # instead of 0 like they must (for the sum to be convergent). + fb = xp.full_like(fk, -xp.inf) if log else xp.zeros_like(fk) + i = xp.isfinite(b) + if xp.any(i): # better not call `f` with empty arrays + fb[i] = f(b[i], *[arg[i] for arg in args]) + nfev = nfev + xp.asarray(i, dtype=left_nfev.dtype) + + if log: + log_step = xp.log(step) + S_terms = (left, right.integral - log_step, fk - log2, fb - log2) + S = special.logsumexp(xp.stack(S_terms), axis=0) + E_terms = (left_error, right.error - log_step, fk-log2, fb-log2+xp.pi*1j) + E = xp_real(special.logsumexp(xp.stack(E_terms), axis=0)) + else: + S = left + right.integral/step + fk/2 + fb/2 + E = left_error + right.error/step + fk/2 - fb/2 + status[~i_skip] = right.status[~i_skip] + + status[(status == 0) & i_fk_insufficient] = -4 + return S, E, status, left_nfev + right.nfev + nfev + lb.nfev diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_test_multivariate.cpython-310-x86_64-linux-gnu.so b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_test_multivariate.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..fbe799fa8bfe4c5f1b2d2ed5edc07fe91db628ef Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_test_multivariate.cpython-310-x86_64-linux-gnu.so differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_test_odeint_banded.cpython-310-x86_64-linux-gnu.so b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_test_odeint_banded.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..9556604cd4abef66866a73cf02c8c2f49332bfb0 --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_test_odeint_banded.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:71ffdbfaa16726aa2246c2ce5c98caea7d35847d89e13b2c4a30ee36fc19e2fd +size 516577 diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_vode.cpython-310-x86_64-linux-gnu.so b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_vode.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..88e7199978c0242b703101d3bdf35ef76b98646e --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/_vode.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ad5130210f8433c3deb702ddebf7eb942cf341e0c79301db404e665fb6285e5b +size 565969 diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/dop.py b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/dop.py new file mode 100644 index 0000000000000000000000000000000000000000..bf67a9a35b7d2959c2617aadc5638b577a45b9b5 --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/dop.py @@ -0,0 +1,15 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__: list[str] = [] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="integrate", module="dop", + private_modules=["_dop"], all=__all__, + attribute=name) diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/lsoda.py b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/lsoda.py new file mode 100644 index 0000000000000000000000000000000000000000..1bc1f1da3c4f0aefad9da73b6405b957ce9335b4 --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/lsoda.py @@ -0,0 +1,15 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = ['lsoda'] # noqa: F822 + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="integrate", module="lsoda", + private_modules=["_lsoda"], all=__all__, + attribute=name) diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/odepack.py b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/odepack.py new file mode 100644 index 0000000000000000000000000000000000000000..7bb4c1a8c9be375df855abe6e1b30ca9711f2607 --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/odepack.py @@ -0,0 +1,17 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.integrate` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = ['odeint', 'ODEintWarning'] # noqa: F822 + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="integrate", module="odepack", + private_modules=["_odepack_py"], all=__all__, + attribute=name) diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/quadpack.py b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/quadpack.py new file mode 100644 index 0000000000000000000000000000000000000000..144584988095c8855da8c34253c045f1a3940572 --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/quadpack.py @@ -0,0 +1,23 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.integrate` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = [ # noqa: F822 + "quad", + "dblquad", + "tplquad", + "nquad", + "IntegrationWarning", +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="integrate", module="quadpack", + private_modules=["_quadpack_py"], all=__all__, + attribute=name) diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/tests/__init__.py b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/__init__.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bcaebe490e9dce2e1d0ea1e39e1d973b683188cd Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test__quad_vec.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test__quad_vec.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a726e153385b8d06e8f78b088aff533426d7980d Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test__quad_vec.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_banded_ode_solvers.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_banded_ode_solvers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e0312d945bd4fea236d8eb4ad9b90cacf6d4be7f Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_banded_ode_solvers.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_bvp.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_bvp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..490672d68e925fde4535f6202ff8eb5688251330 Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_bvp.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_cubature.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_cubature.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cfc4f6ae22e79b5cdfcb587dccb7f5e1b7e2f1c6 Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_cubature.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_integrate.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_integrate.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2c22092211e3378e8e76fbb375ae2cb214d153e8 Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_integrate.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_odeint_jac.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_odeint_jac.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b365126a0696902337a630b4f4dbaf16c8ef09ad Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_odeint_jac.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_quadpack.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_quadpack.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9fab2574728e330b31b6fddf94834e5ae57e17d2 Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_quadpack.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_quadrature.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_quadrature.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b94611e82117c6390de8cb74ab408c52e881e721 Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_quadrature.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_tanhsinh.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_tanhsinh.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..99dc8e7e7fe376d8e8bf007f9bffbed770810e53 Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_tanhsinh.cpython-310.pyc differ diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/tests/test__quad_vec.py b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/tests/test__quad_vec.py new file mode 100644 index 0000000000000000000000000000000000000000..851d28f5671c3eb5821a7379547c1ba66a7e1340 --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/tests/test__quad_vec.py @@ -0,0 +1,217 @@ +import pytest + +import numpy as np +from numpy.testing import assert_allclose + +from scipy.integrate import quad_vec + +from multiprocessing.dummy import Pool + + +quadrature_params = pytest.mark.parametrize( + 'quadrature', [None, "gk15", "gk21", "trapezoid"]) + + +@quadrature_params +def test_quad_vec_simple(quadrature): + n = np.arange(10) + def f(x): + return x ** n + for epsabs in [0.1, 1e-3, 1e-6]: + if quadrature == 'trapezoid' and epsabs < 1e-4: + # slow: skip + continue + + kwargs = dict(epsabs=epsabs, quadrature=quadrature) + + exact = 2**(n+1)/(n + 1) + + res, err = quad_vec(f, 0, 2, norm='max', **kwargs) + assert_allclose(res, exact, rtol=0, atol=epsabs) + + res, err = quad_vec(f, 0, 2, norm='2', **kwargs) + assert np.linalg.norm(res - exact) < epsabs + + res, err = quad_vec(f, 0, 2, norm='max', points=(0.5, 1.0), **kwargs) + assert_allclose(res, exact, rtol=0, atol=epsabs) + + res, err, *rest = quad_vec(f, 0, 2, norm='max', + epsrel=1e-8, + full_output=True, + limit=10000, + **kwargs) + assert_allclose(res, exact, rtol=0, atol=epsabs) + + +@quadrature_params +def test_quad_vec_simple_inf(quadrature): + def f(x): + return 1 / (1 + np.float64(x) ** 2) + + for epsabs in [0.1, 1e-3, 1e-6]: + if quadrature == 'trapezoid' and epsabs < 1e-4: + # slow: skip + continue + + kwargs = dict(norm='max', epsabs=epsabs, quadrature=quadrature) + + res, err = quad_vec(f, 0, np.inf, **kwargs) + assert_allclose(res, np.pi/2, rtol=0, atol=max(epsabs, err)) + + res, err = quad_vec(f, 0, -np.inf, **kwargs) + assert_allclose(res, -np.pi/2, rtol=0, atol=max(epsabs, err)) + + res, err = quad_vec(f, -np.inf, 0, **kwargs) + assert_allclose(res, np.pi/2, rtol=0, atol=max(epsabs, err)) + + res, err = quad_vec(f, np.inf, 0, **kwargs) + assert_allclose(res, -np.pi/2, rtol=0, atol=max(epsabs, err)) + + res, err = quad_vec(f, -np.inf, np.inf, **kwargs) + assert_allclose(res, np.pi, rtol=0, atol=max(epsabs, err)) + + res, err = quad_vec(f, np.inf, -np.inf, **kwargs) + assert_allclose(res, -np.pi, rtol=0, atol=max(epsabs, err)) + + res, err = quad_vec(f, np.inf, np.inf, **kwargs) + assert_allclose(res, 0, rtol=0, atol=max(epsabs, err)) + + res, err = quad_vec(f, -np.inf, -np.inf, **kwargs) + assert_allclose(res, 0, rtol=0, atol=max(epsabs, err)) + + res, err = quad_vec(f, 0, np.inf, points=(1.0, 2.0), **kwargs) + assert_allclose(res, np.pi/2, rtol=0, atol=max(epsabs, err)) + + def f(x): + return np.sin(x + 2) / (1 + x ** 2) + exact = np.pi / np.e * np.sin(2) + epsabs = 1e-5 + + res, err, info = quad_vec(f, -np.inf, np.inf, limit=1000, norm='max', epsabs=epsabs, + quadrature=quadrature, full_output=True) + assert info.status == 1 + assert_allclose(res, exact, rtol=0, atol=max(epsabs, 1.5 * err)) + + +def test_quad_vec_args(): + def f(x, a): + return x * (x + a) * np.arange(3) + a = 2 + exact = np.array([0, 4/3, 8/3]) + + res, err = quad_vec(f, 0, 1, args=(a,)) + assert_allclose(res, exact, rtol=0, atol=1e-4) + + +def _lorenzian(x): + return 1 / (1 + x**2) + + +@pytest.mark.fail_slow(10) +def test_quad_vec_pool(): + f = _lorenzian + res, err = quad_vec(f, -np.inf, np.inf, norm='max', epsabs=1e-4, workers=4) + assert_allclose(res, np.pi, rtol=0, atol=1e-4) + + with Pool(10) as pool: + def f(x): + return 1 / (1 + x ** 2) + res, _ = quad_vec(f, -np.inf, np.inf, norm='max', epsabs=1e-4, workers=pool.map) + assert_allclose(res, np.pi, rtol=0, atol=1e-4) + + +def _func_with_args(x, a): + return x * (x + a) * np.arange(3) + + +@pytest.mark.fail_slow(10) +@pytest.mark.parametrize('extra_args', [2, (2,)]) +@pytest.mark.parametrize('workers', [1, 10]) +def test_quad_vec_pool_args(extra_args, workers): + f = _func_with_args + exact = np.array([0, 4/3, 8/3]) + + res, err = quad_vec(f, 0, 1, args=extra_args, workers=workers) + assert_allclose(res, exact, rtol=0, atol=1e-4) + + with Pool(workers) as pool: + res, err = quad_vec(f, 0, 1, args=extra_args, workers=pool.map) + assert_allclose(res, exact, rtol=0, atol=1e-4) + + +@quadrature_params +def test_num_eval(quadrature): + def f(x): + count[0] += 1 + return x**5 + + count = [0] + res = quad_vec(f, 0, 1, norm='max', full_output=True, quadrature=quadrature) + assert res[2].neval == count[0] + + +def test_info(): + def f(x): + return np.ones((3, 2, 1)) + + res, err, info = quad_vec(f, 0, 1, norm='max', full_output=True) + + assert info.success is True + assert info.status == 0 + assert info.message == 'Target precision reached.' + assert info.neval > 0 + assert info.intervals.shape[1] == 2 + assert info.integrals.shape == (info.intervals.shape[0], 3, 2, 1) + assert info.errors.shape == (info.intervals.shape[0],) + + +def test_nan_inf(): + def f_nan(x): + return np.nan + + def f_inf(x): + return np.inf if x < 0.1 else 1/x + + res, err, info = quad_vec(f_nan, 0, 1, full_output=True) + assert info.status == 3 + + res, err, info = quad_vec(f_inf, 0, 1, full_output=True) + assert info.status == 3 + + +@pytest.mark.parametrize('a,b', [(0, 1), (0, np.inf), (np.inf, 0), + (-np.inf, np.inf), (np.inf, -np.inf)]) +def test_points(a, b): + # Check that initial interval splitting is done according to + # `points`, by checking that consecutive sets of 15 point (for + # gk15) function evaluations lie between `points` + + points = (0, 0.25, 0.5, 0.75, 1.0) + points += tuple(-x for x in points) + + quadrature_points = 15 + interval_sets = [] + count = 0 + + def f(x): + nonlocal count + + if count % quadrature_points == 0: + interval_sets.append(set()) + + count += 1 + interval_sets[-1].add(float(x)) + return 0.0 + + quad_vec(f, a, b, points=points, quadrature='gk15', limit=0) + + # Check that all point sets lie in a single `points` interval + for p in interval_sets: + j = np.searchsorted(sorted(points), tuple(p)) + assert np.all(j == j[0]) + + +@pytest.mark.thread_unsafe +def test_trapz_deprecation(): + with pytest.deprecated_call(match="`quadrature='trapz'`"): + quad_vec(lambda x: x, 0, 1, quadrature="trapz") diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/tests/test_banded_ode_solvers.py b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/tests/test_banded_ode_solvers.py new file mode 100644 index 0000000000000000000000000000000000000000..358c5e3d1fcfe7ccd7e3691bd9af2f47656f4e2b --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/tests/test_banded_ode_solvers.py @@ -0,0 +1,220 @@ +import itertools +import pytest +import numpy as np +from numpy.testing import assert_allclose +from scipy.integrate import ode + + +def _band_count(a): + """Returns ml and mu, the lower and upper band sizes of a.""" + nrows, ncols = a.shape + ml = 0 + for k in range(-nrows+1, 0): + if np.diag(a, k).any(): + ml = -k + break + mu = 0 + for k in range(nrows-1, 0, -1): + if np.diag(a, k).any(): + mu = k + break + return ml, mu + + +def _linear_func(t, y, a): + """Linear system dy/dt = a * y""" + return a.dot(y) + + +def _linear_jac(t, y, a): + """Jacobian of a * y is a.""" + return a + + +def _linear_banded_jac(t, y, a): + """Banded Jacobian.""" + ml, mu = _band_count(a) + bjac = [np.r_[[0] * k, np.diag(a, k)] for k in range(mu, 0, -1)] + bjac.append(np.diag(a)) + for k in range(-1, -ml-1, -1): + bjac.append(np.r_[np.diag(a, k), [0] * (-k)]) + return bjac + + +def _solve_linear_sys(a, y0, tend=1, dt=0.1, + solver=None, method='bdf', use_jac=True, + with_jacobian=False, banded=False): + """Use scipy.integrate.ode to solve a linear system of ODEs. + + a : square ndarray + Matrix of the linear system to be solved. + y0 : ndarray + Initial condition + tend : float + Stop time. + dt : float + Step size of the output. + solver : str + If not None, this must be "vode", "lsoda" or "zvode". + method : str + Either "bdf" or "adams". + use_jac : bool + Determines if the jacobian function is passed to ode(). + with_jacobian : bool + Passed to ode.set_integrator(). + banded : bool + Determines whether a banded or full jacobian is used. + If `banded` is True, `lband` and `uband` are determined by the + values in `a`. + """ + if banded: + lband, uband = _band_count(a) + else: + lband = None + uband = None + + if use_jac: + if banded: + r = ode(_linear_func, _linear_banded_jac) + else: + r = ode(_linear_func, _linear_jac) + else: + r = ode(_linear_func) + + if solver is None: + if np.iscomplexobj(a): + solver = "zvode" + else: + solver = "vode" + + r.set_integrator(solver, + with_jacobian=with_jacobian, + method=method, + lband=lband, uband=uband, + rtol=1e-9, atol=1e-10, + ) + t0 = 0 + r.set_initial_value(y0, t0) + r.set_f_params(a) + r.set_jac_params(a) + + t = [t0] + y = [y0] + while r.successful() and r.t < tend: + r.integrate(r.t + dt) + t.append(r.t) + y.append(r.y) + + t = np.array(t) + y = np.array(y) + return t, y + + +def _analytical_solution(a, y0, t): + """ + Analytical solution to the linear differential equations dy/dt = a*y. + + The solution is only valid if `a` is diagonalizable. + + Returns a 2-D array with shape (len(t), len(y0)). + """ + lam, v = np.linalg.eig(a) + c = np.linalg.solve(v, y0) + e = c * np.exp(lam * t.reshape(-1, 1)) + sol = e.dot(v.T) + return sol + + +@pytest.mark.thread_unsafe +def test_banded_ode_solvers(): + # Test the "lsoda", "vode" and "zvode" solvers of the `ode` class + # with a system that has a banded Jacobian matrix. + + t_exact = np.linspace(0, 1.0, 5) + + # --- Real arrays for testing the "lsoda" and "vode" solvers --- + + # lband = 2, uband = 1: + a_real = np.array([[-0.6, 0.1, 0.0, 0.0, 0.0], + [0.2, -0.5, 0.9, 0.0, 0.0], + [0.1, 0.1, -0.4, 0.1, 0.0], + [0.0, 0.3, -0.1, -0.9, -0.3], + [0.0, 0.0, 0.1, 0.1, -0.7]]) + + # lband = 0, uband = 1: + a_real_upper = np.triu(a_real) + + # lband = 2, uband = 0: + a_real_lower = np.tril(a_real) + + # lband = 0, uband = 0: + a_real_diag = np.triu(a_real_lower) + + real_matrices = [a_real, a_real_upper, a_real_lower, a_real_diag] + real_solutions = [] + + for a in real_matrices: + y0 = np.arange(1, a.shape[0] + 1) + y_exact = _analytical_solution(a, y0, t_exact) + real_solutions.append((y0, t_exact, y_exact)) + + def check_real(idx, solver, meth, use_jac, with_jac, banded): + a = real_matrices[idx] + y0, t_exact, y_exact = real_solutions[idx] + t, y = _solve_linear_sys(a, y0, + tend=t_exact[-1], + dt=t_exact[1] - t_exact[0], + solver=solver, + method=meth, + use_jac=use_jac, + with_jacobian=with_jac, + banded=banded) + assert_allclose(t, t_exact) + assert_allclose(y, y_exact) + + for idx in range(len(real_matrices)): + p = [['vode', 'lsoda'], # solver + ['bdf', 'adams'], # method + [False, True], # use_jac + [False, True], # with_jacobian + [False, True]] # banded + for solver, meth, use_jac, with_jac, banded in itertools.product(*p): + check_real(idx, solver, meth, use_jac, with_jac, banded) + + # --- Complex arrays for testing the "zvode" solver --- + + # complex, lband = 2, uband = 1: + a_complex = a_real - 0.5j * a_real + + # complex, lband = 0, uband = 0: + a_complex_diag = np.diag(np.diag(a_complex)) + + complex_matrices = [a_complex, a_complex_diag] + complex_solutions = [] + + for a in complex_matrices: + y0 = np.arange(1, a.shape[0] + 1) + 1j + y_exact = _analytical_solution(a, y0, t_exact) + complex_solutions.append((y0, t_exact, y_exact)) + + def check_complex(idx, solver, meth, use_jac, with_jac, banded): + a = complex_matrices[idx] + y0, t_exact, y_exact = complex_solutions[idx] + t, y = _solve_linear_sys(a, y0, + tend=t_exact[-1], + dt=t_exact[1] - t_exact[0], + solver=solver, + method=meth, + use_jac=use_jac, + with_jacobian=with_jac, + banded=banded) + assert_allclose(t, t_exact) + assert_allclose(y, y_exact) + + for idx in range(len(complex_matrices)): + p = [['bdf', 'adams'], # method + [False, True], # use_jac + [False, True], # with_jacobian + [False, True]] # banded + for meth, use_jac, with_jac, banded in itertools.product(*p): + check_complex(idx, "zvode", meth, use_jac, with_jac, banded) diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/tests/test_bvp.py b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/tests/test_bvp.py new file mode 100644 index 0000000000000000000000000000000000000000..4ef9eb6ff0502e1113d6bea7ad1e0088633d3151 --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/tests/test_bvp.py @@ -0,0 +1,714 @@ +import sys + +try: + from StringIO import StringIO +except ImportError: + from io import StringIO + +import numpy as np +from numpy.testing import (assert_, assert_array_equal, assert_allclose, + assert_equal) +from pytest import raises as assert_raises + +from scipy.sparse import coo_matrix +from scipy.special import erf +from scipy.integrate._bvp import (modify_mesh, estimate_fun_jac, + estimate_bc_jac, compute_jac_indices, + construct_global_jac, solve_bvp) + +import pytest + + +def exp_fun(x, y): + return np.vstack((y[1], y[0])) + + +def exp_fun_jac(x, y): + df_dy = np.empty((2, 2, x.shape[0])) + df_dy[0, 0] = 0 + df_dy[0, 1] = 1 + df_dy[1, 0] = 1 + df_dy[1, 1] = 0 + return df_dy + + +def exp_bc(ya, yb): + return np.hstack((ya[0] - 1, yb[0])) + + +def exp_bc_complex(ya, yb): + return np.hstack((ya[0] - 1 - 1j, yb[0])) + + +def exp_bc_jac(ya, yb): + dbc_dya = np.array([ + [1, 0], + [0, 0] + ]) + dbc_dyb = np.array([ + [0, 0], + [1, 0] + ]) + return dbc_dya, dbc_dyb + + +def exp_sol(x): + return (np.exp(-x) - np.exp(x - 2)) / (1 - np.exp(-2)) + + +def sl_fun(x, y, p): + return np.vstack((y[1], -p[0]**2 * y[0])) + + +def sl_fun_jac(x, y, p): + n, m = y.shape + df_dy = np.empty((n, 2, m)) + df_dy[0, 0] = 0 + df_dy[0, 1] = 1 + df_dy[1, 0] = -p[0]**2 + df_dy[1, 1] = 0 + + df_dp = np.empty((n, 1, m)) + df_dp[0, 0] = 0 + df_dp[1, 0] = -2 * p[0] * y[0] + + return df_dy, df_dp + + +def sl_bc(ya, yb, p): + return np.hstack((ya[0], yb[0], ya[1] - p[0])) + + +def sl_bc_jac(ya, yb, p): + dbc_dya = np.zeros((3, 2)) + dbc_dya[0, 0] = 1 + dbc_dya[2, 1] = 1 + + dbc_dyb = np.zeros((3, 2)) + dbc_dyb[1, 0] = 1 + + dbc_dp = np.zeros((3, 1)) + dbc_dp[2, 0] = -1 + + return dbc_dya, dbc_dyb, dbc_dp + + +def sl_sol(x, p): + return np.sin(p[0] * x) + + +def emden_fun(x, y): + return np.vstack((y[1], -y[0]**5)) + + +def emden_fun_jac(x, y): + df_dy = np.empty((2, 2, x.shape[0])) + df_dy[0, 0] = 0 + df_dy[0, 1] = 1 + df_dy[1, 0] = -5 * y[0]**4 + df_dy[1, 1] = 0 + return df_dy + + +def emden_bc(ya, yb): + return np.array([ya[1], yb[0] - (3/4)**0.5]) + + +def emden_bc_jac(ya, yb): + dbc_dya = np.array([ + [0, 1], + [0, 0] + ]) + dbc_dyb = np.array([ + [0, 0], + [1, 0] + ]) + return dbc_dya, dbc_dyb + + +def emden_sol(x): + return (1 + x**2/3)**-0.5 + + +def undefined_fun(x, y): + return np.zeros_like(y) + + +def undefined_bc(ya, yb): + return np.array([ya[0], yb[0] - 1]) + + +def big_fun(x, y): + f = np.zeros_like(y) + f[::2] = y[1::2] + return f + + +def big_bc(ya, yb): + return np.hstack((ya[::2], yb[::2] - 1)) + + +def big_sol(x, n): + y = np.ones((2 * n, x.size)) + y[::2] = x + return x + + +def big_fun_with_parameters(x, y, p): + """ Big version of sl_fun, with two parameters. + + The two differential equations represented by sl_fun are broadcast to the + number of rows of y, rotating between the parameters p[0] and p[1]. + Here are the differential equations: + + dy[0]/dt = y[1] + dy[1]/dt = -p[0]**2 * y[0] + dy[2]/dt = y[3] + dy[3]/dt = -p[1]**2 * y[2] + dy[4]/dt = y[5] + dy[5]/dt = -p[0]**2 * y[4] + dy[6]/dt = y[7] + dy[7]/dt = -p[1]**2 * y[6] + . + . + . + + """ + f = np.zeros_like(y) + f[::2] = y[1::2] + f[1::4] = -p[0]**2 * y[::4] + f[3::4] = -p[1]**2 * y[2::4] + return f + + +def big_fun_with_parameters_jac(x, y, p): + # big version of sl_fun_jac, with two parameters + n, m = y.shape + df_dy = np.zeros((n, n, m)) + df_dy[range(0, n, 2), range(1, n, 2)] = 1 + df_dy[range(1, n, 4), range(0, n, 4)] = -p[0]**2 + df_dy[range(3, n, 4), range(2, n, 4)] = -p[1]**2 + + df_dp = np.zeros((n, 2, m)) + df_dp[range(1, n, 4), 0] = -2 * p[0] * y[range(0, n, 4)] + df_dp[range(3, n, 4), 1] = -2 * p[1] * y[range(2, n, 4)] + + return df_dy, df_dp + + +def big_bc_with_parameters(ya, yb, p): + # big version of sl_bc, with two parameters + return np.hstack((ya[::2], yb[::2], ya[1] - p[0], ya[3] - p[1])) + + +def big_bc_with_parameters_jac(ya, yb, p): + # big version of sl_bc_jac, with two parameters + n = ya.shape[0] + dbc_dya = np.zeros((n + 2, n)) + dbc_dyb = np.zeros((n + 2, n)) + + dbc_dya[range(n // 2), range(0, n, 2)] = 1 + dbc_dyb[range(n // 2, n), range(0, n, 2)] = 1 + + dbc_dp = np.zeros((n + 2, 2)) + dbc_dp[n, 0] = -1 + dbc_dya[n, 1] = 1 + dbc_dp[n + 1, 1] = -1 + dbc_dya[n + 1, 3] = 1 + + return dbc_dya, dbc_dyb, dbc_dp + + +def big_sol_with_parameters(x, p): + # big version of sl_sol, with two parameters + return np.vstack((np.sin(p[0] * x), np.sin(p[1] * x))) + + +def shock_fun(x, y): + eps = 1e-3 + return np.vstack(( + y[1], + -(x * y[1] + eps * np.pi**2 * np.cos(np.pi * x) + + np.pi * x * np.sin(np.pi * x)) / eps + )) + + +def shock_bc(ya, yb): + return np.array([ya[0] + 2, yb[0]]) + + +def shock_sol(x): + eps = 1e-3 + k = np.sqrt(2 * eps) + return np.cos(np.pi * x) + erf(x / k) / erf(1 / k) + + +def nonlin_bc_fun(x, y): + # laplace eq. + return np.stack([y[1], np.zeros_like(x)]) + + +def nonlin_bc_bc(ya, yb): + phiA, phipA = ya + phiC, phipC = yb + + kappa, ioA, ioC, V, f = 1.64, 0.01, 1.0e-4, 0.5, 38.9 + + # Butler-Volmer Kinetics at Anode + hA = 0.0-phiA-0.0 + iA = ioA * (np.exp(f*hA) - np.exp(-f*hA)) + res0 = iA + kappa * phipA + + # Butler-Volmer Kinetics at Cathode + hC = V - phiC - 1.0 + iC = ioC * (np.exp(f*hC) - np.exp(-f*hC)) + res1 = iC - kappa*phipC + + return np.array([res0, res1]) + + +def nonlin_bc_sol(x): + return -0.13426436116763119 - 1.1308709 * x + + +def test_modify_mesh(): + x = np.array([0, 1, 3, 9], dtype=float) + x_new = modify_mesh(x, np.array([0]), np.array([2])) + assert_array_equal(x_new, np.array([0, 0.5, 1, 3, 5, 7, 9])) + + x = np.array([-6, -3, 0, 3, 6], dtype=float) + x_new = modify_mesh(x, np.array([1], dtype=int), np.array([0, 2, 3])) + assert_array_equal(x_new, [-6, -5, -4, -3, -1.5, 0, 1, 2, 3, 4, 5, 6]) + + +def test_compute_fun_jac(): + x = np.linspace(0, 1, 5) + y = np.empty((2, x.shape[0])) + y[0] = 0.01 + y[1] = 0.02 + p = np.array([]) + df_dy, df_dp = estimate_fun_jac(lambda x, y, p: exp_fun(x, y), x, y, p) + df_dy_an = exp_fun_jac(x, y) + assert_allclose(df_dy, df_dy_an) + assert_(df_dp is None) + + x = np.linspace(0, np.pi, 5) + y = np.empty((2, x.shape[0])) + y[0] = np.sin(x) + y[1] = np.cos(x) + p = np.array([1.0]) + df_dy, df_dp = estimate_fun_jac(sl_fun, x, y, p) + df_dy_an, df_dp_an = sl_fun_jac(x, y, p) + assert_allclose(df_dy, df_dy_an) + assert_allclose(df_dp, df_dp_an) + + x = np.linspace(0, 1, 10) + y = np.empty((2, x.shape[0])) + y[0] = (3/4)**0.5 + y[1] = 1e-4 + p = np.array([]) + df_dy, df_dp = estimate_fun_jac(lambda x, y, p: emden_fun(x, y), x, y, p) + df_dy_an = emden_fun_jac(x, y) + assert_allclose(df_dy, df_dy_an) + assert_(df_dp is None) + + +def test_compute_bc_jac(): + ya = np.array([-1.0, 2]) + yb = np.array([0.5, 3]) + p = np.array([]) + dbc_dya, dbc_dyb, dbc_dp = estimate_bc_jac( + lambda ya, yb, p: exp_bc(ya, yb), ya, yb, p) + dbc_dya_an, dbc_dyb_an = exp_bc_jac(ya, yb) + assert_allclose(dbc_dya, dbc_dya_an) + assert_allclose(dbc_dyb, dbc_dyb_an) + assert_(dbc_dp is None) + + ya = np.array([0.0, 1]) + yb = np.array([0.0, -1]) + p = np.array([0.5]) + dbc_dya, dbc_dyb, dbc_dp = estimate_bc_jac(sl_bc, ya, yb, p) + dbc_dya_an, dbc_dyb_an, dbc_dp_an = sl_bc_jac(ya, yb, p) + assert_allclose(dbc_dya, dbc_dya_an) + assert_allclose(dbc_dyb, dbc_dyb_an) + assert_allclose(dbc_dp, dbc_dp_an) + + ya = np.array([0.5, 100]) + yb = np.array([-1000, 10.5]) + p = np.array([]) + dbc_dya, dbc_dyb, dbc_dp = estimate_bc_jac( + lambda ya, yb, p: emden_bc(ya, yb), ya, yb, p) + dbc_dya_an, dbc_dyb_an = emden_bc_jac(ya, yb) + assert_allclose(dbc_dya, dbc_dya_an) + assert_allclose(dbc_dyb, dbc_dyb_an) + assert_(dbc_dp is None) + + +def test_compute_jac_indices(): + n = 2 + m = 4 + k = 2 + i, j = compute_jac_indices(n, m, k) + s = coo_matrix((np.ones_like(i), (i, j))).toarray() + s_true = np.array([ + [1, 1, 1, 1, 0, 0, 0, 0, 1, 1], + [1, 1, 1, 1, 0, 0, 0, 0, 1, 1], + [0, 0, 1, 1, 1, 1, 0, 0, 1, 1], + [0, 0, 1, 1, 1, 1, 0, 0, 1, 1], + [0, 0, 0, 0, 1, 1, 1, 1, 1, 1], + [0, 0, 0, 0, 1, 1, 1, 1, 1, 1], + [1, 1, 0, 0, 0, 0, 1, 1, 1, 1], + [1, 1, 0, 0, 0, 0, 1, 1, 1, 1], + [1, 1, 0, 0, 0, 0, 1, 1, 1, 1], + [1, 1, 0, 0, 0, 0, 1, 1, 1, 1], + ]) + assert_array_equal(s, s_true) + + +def test_compute_global_jac(): + n = 2 + m = 5 + k = 1 + i_jac, j_jac = compute_jac_indices(2, 5, 1) + x = np.linspace(0, 1, 5) + h = np.diff(x) + y = np.vstack((np.sin(np.pi * x), np.pi * np.cos(np.pi * x))) + p = np.array([3.0]) + + f = sl_fun(x, y, p) + + x_middle = x[:-1] + 0.5 * h + y_middle = 0.5 * (y[:, :-1] + y[:, 1:]) - h/8 * (f[:, 1:] - f[:, :-1]) + + df_dy, df_dp = sl_fun_jac(x, y, p) + df_dy_middle, df_dp_middle = sl_fun_jac(x_middle, y_middle, p) + dbc_dya, dbc_dyb, dbc_dp = sl_bc_jac(y[:, 0], y[:, -1], p) + + J = construct_global_jac(n, m, k, i_jac, j_jac, h, df_dy, df_dy_middle, + df_dp, df_dp_middle, dbc_dya, dbc_dyb, dbc_dp) + J = J.toarray() + + def J_block(h, p): + return np.array([ + [h**2*p**2/12 - 1, -0.5*h, -h**2*p**2/12 + 1, -0.5*h], + [0.5*h*p**2, h**2*p**2/12 - 1, 0.5*h*p**2, 1 - h**2*p**2/12] + ]) + + J_true = np.zeros((m * n + k, m * n + k)) + for i in range(m - 1): + J_true[i * n: (i + 1) * n, i * n: (i + 2) * n] = J_block(h[i], p[0]) + + J_true[:(m - 1) * n:2, -1] = p * h**2/6 * (y[0, :-1] - y[0, 1:]) + J_true[1:(m - 1) * n:2, -1] = p * (h * (y[0, :-1] + y[0, 1:]) + + h**2/6 * (y[1, :-1] - y[1, 1:])) + + J_true[8, 0] = 1 + J_true[9, 8] = 1 + J_true[10, 1] = 1 + J_true[10, 10] = -1 + + assert_allclose(J, J_true, rtol=1e-10) + + df_dy, df_dp = estimate_fun_jac(sl_fun, x, y, p) + df_dy_middle, df_dp_middle = estimate_fun_jac(sl_fun, x_middle, y_middle, p) + dbc_dya, dbc_dyb, dbc_dp = estimate_bc_jac(sl_bc, y[:, 0], y[:, -1], p) + J = construct_global_jac(n, m, k, i_jac, j_jac, h, df_dy, df_dy_middle, + df_dp, df_dp_middle, dbc_dya, dbc_dyb, dbc_dp) + J = J.toarray() + assert_allclose(J, J_true, rtol=2e-8, atol=2e-8) + + +def test_parameter_validation(): + x = [0, 1, 0.5] + y = np.zeros((2, 3)) + assert_raises(ValueError, solve_bvp, exp_fun, exp_bc, x, y) + + x = np.linspace(0, 1, 5) + y = np.zeros((2, 4)) + assert_raises(ValueError, solve_bvp, exp_fun, exp_bc, x, y) + + def fun(x, y, p): + return exp_fun(x, y) + def bc(ya, yb, p): + return exp_bc(ya, yb) + + y = np.zeros((2, x.shape[0])) + assert_raises(ValueError, solve_bvp, fun, bc, x, y, p=[1]) + + def wrong_shape_fun(x, y): + return np.zeros(3) + + assert_raises(ValueError, solve_bvp, wrong_shape_fun, bc, x, y) + + S = np.array([[0, 0]]) + assert_raises(ValueError, solve_bvp, exp_fun, exp_bc, x, y, S=S) + + +def test_no_params(): + x = np.linspace(0, 1, 5) + x_test = np.linspace(0, 1, 100) + y = np.zeros((2, x.shape[0])) + for fun_jac in [None, exp_fun_jac]: + for bc_jac in [None, exp_bc_jac]: + sol = solve_bvp(exp_fun, exp_bc, x, y, fun_jac=fun_jac, + bc_jac=bc_jac) + + assert_equal(sol.status, 0) + assert_(sol.success) + + assert_equal(sol.x.size, 5) + + sol_test = sol.sol(x_test) + + assert_allclose(sol_test[0], exp_sol(x_test), atol=1e-5) + + f_test = exp_fun(x_test, sol_test) + r = sol.sol(x_test, 1) - f_test + rel_res = r / (1 + np.abs(f_test)) + norm_res = np.sum(rel_res**2, axis=0)**0.5 + assert_(np.all(norm_res < 1e-3)) + + assert_(np.all(sol.rms_residuals < 1e-3)) + assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10) + assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10) + + +def test_with_params(): + x = np.linspace(0, np.pi, 5) + x_test = np.linspace(0, np.pi, 100) + y = np.ones((2, x.shape[0])) + + for fun_jac in [None, sl_fun_jac]: + for bc_jac in [None, sl_bc_jac]: + sol = solve_bvp(sl_fun, sl_bc, x, y, p=[0.5], fun_jac=fun_jac, + bc_jac=bc_jac) + + assert_equal(sol.status, 0) + assert_(sol.success) + + assert_(sol.x.size < 10) + + assert_allclose(sol.p, [1], rtol=1e-4) + + sol_test = sol.sol(x_test) + + assert_allclose(sol_test[0], sl_sol(x_test, [1]), + rtol=1e-4, atol=1e-4) + + f_test = sl_fun(x_test, sol_test, [1]) + r = sol.sol(x_test, 1) - f_test + rel_res = r / (1 + np.abs(f_test)) + norm_res = np.sum(rel_res ** 2, axis=0) ** 0.5 + assert_(np.all(norm_res < 1e-3)) + + assert_(np.all(sol.rms_residuals < 1e-3)) + assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10) + assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10) + + +def test_singular_term(): + x = np.linspace(0, 1, 10) + x_test = np.linspace(0.05, 1, 100) + y = np.empty((2, 10)) + y[0] = (3/4)**0.5 + y[1] = 1e-4 + S = np.array([[0, 0], [0, -2]]) + + for fun_jac in [None, emden_fun_jac]: + for bc_jac in [None, emden_bc_jac]: + sol = solve_bvp(emden_fun, emden_bc, x, y, S=S, fun_jac=fun_jac, + bc_jac=bc_jac) + + assert_equal(sol.status, 0) + assert_(sol.success) + + assert_equal(sol.x.size, 10) + + sol_test = sol.sol(x_test) + assert_allclose(sol_test[0], emden_sol(x_test), atol=1e-5) + + f_test = emden_fun(x_test, sol_test) + S.dot(sol_test) / x_test + r = sol.sol(x_test, 1) - f_test + rel_res = r / (1 + np.abs(f_test)) + norm_res = np.sum(rel_res ** 2, axis=0) ** 0.5 + + assert_(np.all(norm_res < 1e-3)) + assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10) + assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10) + + +def test_complex(): + # The test is essentially the same as test_no_params, but boundary + # conditions are turned into complex. + x = np.linspace(0, 1, 5) + x_test = np.linspace(0, 1, 100) + y = np.zeros((2, x.shape[0]), dtype=complex) + for fun_jac in [None, exp_fun_jac]: + for bc_jac in [None, exp_bc_jac]: + sol = solve_bvp(exp_fun, exp_bc_complex, x, y, fun_jac=fun_jac, + bc_jac=bc_jac) + + assert_equal(sol.status, 0) + assert_(sol.success) + + sol_test = sol.sol(x_test) + + assert_allclose(sol_test[0].real, exp_sol(x_test), atol=1e-5) + assert_allclose(sol_test[0].imag, exp_sol(x_test), atol=1e-5) + + f_test = exp_fun(x_test, sol_test) + r = sol.sol(x_test, 1) - f_test + rel_res = r / (1 + np.abs(f_test)) + norm_res = np.sum(np.real(rel_res * np.conj(rel_res)), + axis=0) ** 0.5 + assert_(np.all(norm_res < 1e-3)) + + assert_(np.all(sol.rms_residuals < 1e-3)) + assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10) + assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10) + + +def test_failures(): + x = np.linspace(0, 1, 2) + y = np.zeros((2, x.size)) + res = solve_bvp(exp_fun, exp_bc, x, y, tol=1e-5, max_nodes=5) + assert_equal(res.status, 1) + assert_(not res.success) + + x = np.linspace(0, 1, 5) + y = np.zeros((2, x.size)) + res = solve_bvp(undefined_fun, undefined_bc, x, y) + assert_equal(res.status, 2) + assert_(not res.success) + + +def test_big_problem(): + n = 30 + x = np.linspace(0, 1, 5) + y = np.zeros((2 * n, x.size)) + sol = solve_bvp(big_fun, big_bc, x, y) + + assert_equal(sol.status, 0) + assert_(sol.success) + + sol_test = sol.sol(x) + + assert_allclose(sol_test[0], big_sol(x, n)) + + f_test = big_fun(x, sol_test) + r = sol.sol(x, 1) - f_test + rel_res = r / (1 + np.abs(f_test)) + norm_res = np.sum(np.real(rel_res * np.conj(rel_res)), axis=0) ** 0.5 + assert_(np.all(norm_res < 1e-3)) + + assert_(np.all(sol.rms_residuals < 1e-3)) + assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10) + assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10) + + +def test_big_problem_with_parameters(): + n = 30 + x = np.linspace(0, np.pi, 5) + x_test = np.linspace(0, np.pi, 100) + y = np.ones((2 * n, x.size)) + + for fun_jac in [None, big_fun_with_parameters_jac]: + for bc_jac in [None, big_bc_with_parameters_jac]: + sol = solve_bvp(big_fun_with_parameters, big_bc_with_parameters, x, + y, p=[0.5, 0.5], fun_jac=fun_jac, bc_jac=bc_jac) + + assert_equal(sol.status, 0) + assert_(sol.success) + + assert_allclose(sol.p, [1, 1], rtol=1e-4) + + sol_test = sol.sol(x_test) + + for isol in range(0, n, 4): + assert_allclose(sol_test[isol], + big_sol_with_parameters(x_test, [1, 1])[0], + rtol=1e-4, atol=1e-4) + assert_allclose(sol_test[isol + 2], + big_sol_with_parameters(x_test, [1, 1])[1], + rtol=1e-4, atol=1e-4) + + f_test = big_fun_with_parameters(x_test, sol_test, [1, 1]) + r = sol.sol(x_test, 1) - f_test + rel_res = r / (1 + np.abs(f_test)) + norm_res = np.sum(rel_res ** 2, axis=0) ** 0.5 + assert_(np.all(norm_res < 1e-3)) + + assert_(np.all(sol.rms_residuals < 1e-3)) + assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10) + assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10) + + +def test_shock_layer(): + x = np.linspace(-1, 1, 5) + x_test = np.linspace(-1, 1, 100) + y = np.zeros((2, x.size)) + sol = solve_bvp(shock_fun, shock_bc, x, y) + + assert_equal(sol.status, 0) + assert_(sol.success) + + assert_(sol.x.size < 110) + + sol_test = sol.sol(x_test) + assert_allclose(sol_test[0], shock_sol(x_test), rtol=1e-5, atol=1e-5) + + f_test = shock_fun(x_test, sol_test) + r = sol.sol(x_test, 1) - f_test + rel_res = r / (1 + np.abs(f_test)) + norm_res = np.sum(rel_res ** 2, axis=0) ** 0.5 + + assert_(np.all(norm_res < 1e-3)) + assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10) + assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10) + + +def test_nonlin_bc(): + x = np.linspace(0, 0.1, 5) + x_test = x + y = np.zeros([2, x.size]) + sol = solve_bvp(nonlin_bc_fun, nonlin_bc_bc, x, y) + + assert_equal(sol.status, 0) + assert_(sol.success) + + assert_(sol.x.size < 8) + + sol_test = sol.sol(x_test) + assert_allclose(sol_test[0], nonlin_bc_sol(x_test), rtol=1e-5, atol=1e-5) + + f_test = nonlin_bc_fun(x_test, sol_test) + r = sol.sol(x_test, 1) - f_test + rel_res = r / (1 + np.abs(f_test)) + norm_res = np.sum(rel_res ** 2, axis=0) ** 0.5 + + assert_(np.all(norm_res < 1e-3)) + assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10) + assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10) + + +@pytest.mark.thread_unsafe +def test_verbose(): + # Smoke test that checks the printing does something and does not crash + x = np.linspace(0, 1, 5) + y = np.zeros((2, x.shape[0])) + for verbose in [0, 1, 2]: + old_stdout = sys.stdout + sys.stdout = StringIO() + try: + sol = solve_bvp(exp_fun, exp_bc, x, y, verbose=verbose) + text = sys.stdout.getvalue() + finally: + sys.stdout = old_stdout + + assert_(sol.success) + if verbose == 0: + assert_(not text, text) + if verbose >= 1: + assert_("Solved in" in text, text) + if verbose >= 2: + assert_("Max residual" in text, text) diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/tests/test_cubature.py b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/tests/test_cubature.py new file mode 100644 index 0000000000000000000000000000000000000000..899655c7631fbc86d06eb97c514761d4c882a632 --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/tests/test_cubature.py @@ -0,0 +1,1389 @@ +import math +import scipy +import itertools + +import pytest + +from scipy._lib._array_api import ( + array_namespace, + xp_assert_close, + xp_size, + np_compat, + is_array_api_strict, +) +from scipy.conftest import array_api_compatible + +from scipy.integrate import cubature + +from scipy.integrate._rules import ( + Rule, FixedRule, + NestedFixedRule, + GaussLegendreQuadrature, GaussKronrodQuadrature, + GenzMalikCubature, +) + +from scipy.integrate._cubature import _InfiniteLimitsTransform + +pytestmark = [pytest.mark.usefixtures("skip_xp_backends"),] +skip_xp_backends = pytest.mark.skip_xp_backends + +# The integrands ``genz_malik_1980_*`` come from the paper: +# A.C. Genz, A.A. Malik, Remarks on algorithm 006: An adaptive algorithm for +# numerical integration over an N-dimensional rectangular region, Journal of +# Computational and Applied Mathematics, Volume 6, Issue 4, 1980, Pages 295-302, +# ISSN 0377-0427, https://doi.org/10.1016/0771-050X(80)90039-X. + + +def basic_1d_integrand(x, n, xp): + x_reshaped = xp.reshape(x, (-1, 1, 1)) + n_reshaped = xp.reshape(n, (1, -1, 1)) + + return x_reshaped**n_reshaped + + +def basic_1d_integrand_exact(n, xp): + # Exact only for integration over interval [0, 2]. + return xp.reshape(2**(n+1)/(n+1), (-1, 1)) + + +def basic_nd_integrand(x, n, xp): + return xp.reshape(xp.sum(x, axis=-1), (-1, 1))**xp.reshape(n, (1, -1)) + + +def basic_nd_integrand_exact(n, xp): + # Exact only for integration over interval [0, 2]. + return (-2**(3+n) + 4**(2+n))/((1+n)*(2+n)) + + +def genz_malik_1980_f_1(x, r, alphas, xp): + r""" + .. math:: f_1(\mathbf x) = \cos\left(2\pi r + \sum^n_{i = 1}\alpha_i x_i\right) + + .. code-block:: mathematica + + genzMalik1980f1[x_List, r_, alphas_List] := Cos[2*Pi*r + Total[x*alphas]] + """ + + npoints, ndim = x.shape[0], x.shape[-1] + + alphas_reshaped = alphas[None, ...] + x_reshaped = xp.reshape(x, (npoints, *([1]*(len(alphas.shape) - 1)), ndim)) + + return xp.cos(2*math.pi*r + xp.sum(alphas_reshaped * x_reshaped, axis=-1)) + + +def genz_malik_1980_f_1_exact(a, b, r, alphas, xp): + ndim = xp_size(a) + a = xp.reshape(a, (*([1]*(len(alphas.shape) - 1)), ndim)) + b = xp.reshape(b, (*([1]*(len(alphas.shape) - 1)), ndim)) + + return ( + (-2)**ndim + * 1/xp.prod(alphas, axis=-1) + * xp.cos(2*math.pi*r + xp.sum(alphas * (a+b) * 0.5, axis=-1)) + * xp.prod(xp.sin(alphas * (a-b)/2), axis=-1) + ) + + +def genz_malik_1980_f_1_random_args(rng, shape, xp): + r = xp.asarray(rng.random(shape[:-1])) + alphas = xp.asarray(rng.random(shape)) + + difficulty = 9 + normalisation_factors = xp.sum(alphas, axis=-1)[..., None] + alphas = difficulty * alphas / normalisation_factors + + return (r, alphas) + + +def genz_malik_1980_f_2(x, alphas, betas, xp): + r""" + .. math:: f_2(\mathbf x) = \prod^n_{i = 1} (\alpha_i^2 + (x_i - \beta_i)^2)^{-1} + + .. code-block:: mathematica + + genzMalik1980f2[x_List, alphas_List, betas_List] := + 1/Times @@ ((alphas^2 + (x - betas)^2)) + """ + npoints, ndim = x.shape[0], x.shape[-1] + + alphas_reshaped = alphas[None, ...] + betas_reshaped = betas[None, ...] + + x_reshaped = xp.reshape(x, (npoints, *([1]*(len(alphas.shape) - 1)), ndim)) + + return 1/xp.prod(alphas_reshaped**2 + (x_reshaped-betas_reshaped)**2, axis=-1) + + +def genz_malik_1980_f_2_exact(a, b, alphas, betas, xp): + ndim = xp_size(a) + a = xp.reshape(a, (*([1]*(len(alphas.shape) - 1)), ndim)) + b = xp.reshape(b, (*([1]*(len(alphas.shape) - 1)), ndim)) + + # `xp` is the unwrapped namespace, so `.atan` won't work for `xp = np` and np<2. + xp_test = array_namespace(a) + + return ( + (-1)**ndim * 1/xp.prod(alphas, axis=-1) + * xp.prod( + xp_test.atan((a - betas)/alphas) - xp_test.atan((b - betas)/alphas), + axis=-1, + ) + ) + + +def genz_malik_1980_f_2_random_args(rng, shape, xp): + ndim = shape[-1] + alphas = xp.asarray(rng.random(shape)) + betas = xp.asarray(rng.random(shape)) + + difficulty = 25.0 + products = xp.prod(alphas**xp.asarray(-2.0), axis=-1) + normalisation_factors = (products**xp.asarray(1 / (2*ndim)))[..., None] + alphas = alphas * normalisation_factors * math.pow(difficulty, 1 / (2*ndim)) + + # Adjust alphas from distribution used in Genz and Malik 1980 since denominator + # is very small for high dimensions. + alphas *= 10 + + return alphas, betas + + +def genz_malik_1980_f_3(x, alphas, xp): + r""" + .. math:: f_3(\mathbf x) = \exp\left(\sum^n_{i = 1} \alpha_i x_i\right) + + .. code-block:: mathematica + + genzMalik1980f3[x_List, alphas_List] := Exp[Dot[x, alphas]] + """ + + npoints, ndim = x.shape[0], x.shape[-1] + + alphas_reshaped = alphas[None, ...] + x_reshaped = xp.reshape(x, (npoints, *([1]*(len(alphas.shape) - 1)), ndim)) + + return xp.exp(xp.sum(alphas_reshaped * x_reshaped, axis=-1)) + + +def genz_malik_1980_f_3_exact(a, b, alphas, xp): + ndim = xp_size(a) + a = xp.reshape(a, (*([1]*(len(alphas.shape) - 1)), ndim)) + b = xp.reshape(b, (*([1]*(len(alphas.shape) - 1)), ndim)) + + return ( + (-1)**ndim * 1/xp.prod(alphas, axis=-1) + * xp.prod(xp.exp(alphas * a) - xp.exp(alphas * b), axis=-1) + ) + + +def genz_malik_1980_f_3_random_args(rng, shape, xp): + alphas = xp.asarray(rng.random(shape)) + normalisation_factors = xp.sum(alphas, axis=-1)[..., None] + difficulty = 12.0 + alphas = difficulty * alphas / normalisation_factors + + return (alphas,) + + +def genz_malik_1980_f_4(x, alphas, xp): + r""" + .. math:: f_4(\mathbf x) = \left(1 + \sum^n_{i = 1} \alpha_i x_i\right)^{-n-1} + + .. code-block:: mathematica + genzMalik1980f4[x_List, alphas_List] := + (1 + Dot[x, alphas])^(-Length[alphas] - 1) + """ + + npoints, ndim = x.shape[0], x.shape[-1] + + alphas_reshaped = alphas[None, ...] + x_reshaped = xp.reshape(x, (npoints, *([1]*(len(alphas.shape) - 1)), ndim)) + + return (1 + xp.sum(alphas_reshaped * x_reshaped, axis=-1))**(-ndim-1) + + +def genz_malik_1980_f_4_exact(a, b, alphas, xp): + ndim = xp_size(a) + + def F(x): + x_reshaped = xp.reshape(x, (*([1]*(len(alphas.shape) - 1)), ndim)) + + return ( + (-1)**ndim/xp.prod(alphas, axis=-1) + / math.factorial(ndim) + / (1 + xp.sum(alphas * x_reshaped, axis=-1)) + ) + + return _eval_indefinite_integral(F, a, b, xp) + + +def _eval_indefinite_integral(F, a, b, xp): + """ + Calculates a definite integral from points `a` to `b` by summing up over the corners + of the corresponding hyperrectangle. + """ + + ndim = xp_size(a) + points = xp.stack([a, b], axis=0) + + out = 0 + for ind in itertools.product(range(2), repeat=ndim): + selected_points = xp.asarray([points[i, j] for i, j in zip(ind, range(ndim))]) + out += pow(-1, sum(ind) + ndim) * F(selected_points) + + return out + + +def genz_malik_1980_f_4_random_args(rng, shape, xp): + ndim = shape[-1] + + alphas = xp.asarray(rng.random(shape)) + normalisation_factors = xp.sum(alphas, axis=-1)[..., None] + difficulty = 14.0 + alphas = (difficulty / ndim) * alphas / normalisation_factors + + return (alphas,) + + +def genz_malik_1980_f_5(x, alphas, betas, xp): + r""" + .. math:: + + f_5(\mathbf x) = \exp\left(-\sum^n_{i = 1} \alpha^2_i (x_i - \beta_i)^2\right) + + .. code-block:: mathematica + + genzMalik1980f5[x_List, alphas_List, betas_List] := + Exp[-Total[alphas^2 * (x - betas)^2]] + """ + + npoints, ndim = x.shape[0], x.shape[-1] + + alphas_reshaped = alphas[None, ...] + betas_reshaped = betas[None, ...] + + x_reshaped = xp.reshape(x, (npoints, *([1]*(len(alphas.shape) - 1)), ndim)) + + return xp.exp( + -xp.sum(alphas_reshaped**2 * (x_reshaped - betas_reshaped)**2, axis=-1) + ) + + +def genz_malik_1980_f_5_exact(a, b, alphas, betas, xp): + ndim = xp_size(a) + a = xp.reshape(a, (*([1]*(len(alphas.shape) - 1)), ndim)) + b = xp.reshape(b, (*([1]*(len(alphas.shape) - 1)), ndim)) + + return ( + (1/2)**ndim + * 1/xp.prod(alphas, axis=-1) + * (math.pi**(ndim/2)) + * xp.prod( + scipy.special.erf(alphas * (betas - a)) + + scipy.special.erf(alphas * (b - betas)), + axis=-1, + ) + ) + + +def genz_malik_1980_f_5_random_args(rng, shape, xp): + alphas = xp.asarray(rng.random(shape)) + betas = xp.asarray(rng.random(shape)) + + difficulty = 21.0 + normalisation_factors = xp.sqrt(xp.sum(alphas**xp.asarray(2.0), axis=-1))[..., None] + alphas = alphas / normalisation_factors * math.sqrt(difficulty) + + return alphas, betas + + +def f_gaussian(x, alphas, xp): + r""" + .. math:: + + f(\mathbf x) = \exp\left(-\sum^n_{i = 1} (\alpha_i x_i)^2 \right) + """ + npoints, ndim = x.shape[0], x.shape[-1] + alphas_reshaped = alphas[None, ...] + x_reshaped = xp.reshape(x, (npoints, *([1]*(len(alphas.shape) - 1)), ndim)) + + return xp.exp(-xp.sum((alphas_reshaped * x_reshaped)**2, axis=-1)) + + +def f_gaussian_exact(a, b, alphas, xp): + # Exact only when `a` and `b` are one of: + # (-oo, oo), or + # (0, oo), or + # (-oo, 0) + # `alphas` can be arbitrary. + + ndim = xp_size(a) + double_infinite_count = 0 + semi_infinite_count = 0 + + for i in range(ndim): + if xp.isinf(a[i]) and xp.isinf(b[i]): # doubly-infinite + double_infinite_count += 1 + elif xp.isinf(a[i]) != xp.isinf(b[i]): # exclusive or, so semi-infinite + semi_infinite_count += 1 + + return (math.sqrt(math.pi) ** ndim) / ( + 2**semi_infinite_count * xp.prod(alphas, axis=-1) + ) + + +def f_gaussian_random_args(rng, shape, xp): + alphas = xp.asarray(rng.random(shape)) + + # If alphas are very close to 0 this makes the problem very difficult due to large + # values of ``f``. + alphas *= 100 + + return (alphas,) + + +def f_modified_gaussian(x_arr, n, xp): + r""" + .. math:: + + f(x, y, z, w) = x^n \sqrt{y} \exp(-y-z^2-w^2) + """ + x, y, z, w = x_arr[:, 0], x_arr[:, 1], x_arr[:, 2], x_arr[:, 3] + res = (x ** n[:, None]) * xp.sqrt(y) * xp.exp(-y-z**2-w**2) + + return res.T + + +def f_modified_gaussian_exact(a, b, n, xp): + # Exact only for the limits + # a = (0, 0, -oo, -oo) + # b = (1, oo, oo, oo) + # but defined here as a function to match the format of the other integrands. + return 1/(2 + 2*n) * math.pi ** (3/2) + + +def f_with_problematic_points(x_arr, points, xp): + """ + This emulates a function with a list of singularities given by `points`. + + If no `x_arr` are one of the `points`, then this function returns 1. + """ + + for point in points: + if xp.any(x_arr == point): + raise ValueError("called with a problematic point") + + return xp.ones(x_arr.shape[0]) + + +@array_api_compatible +class TestCubature: + """ + Tests related to the interface of `cubature`. + """ + + @pytest.mark.parametrize("rule_str", [ + "gauss-kronrod", + "genz-malik", + "gk21", + "gk15", + ]) + def test_pass_str(self, rule_str, xp): + n = xp.arange(5, dtype=xp.float64) + a = xp.asarray([0, 0], dtype=xp.float64) + b = xp.asarray([2, 2], dtype=xp.float64) + + res = cubature(basic_nd_integrand, a, b, rule=rule_str, args=(n, xp)) + + xp_assert_close( + res.estimate, + basic_nd_integrand_exact(n, xp), + rtol=1e-8, + atol=0, + ) + + @skip_xp_backends(np_only=True, + reason='array-likes only supported for NumPy backend') + def test_pass_array_like_not_array(self, xp): + n = np_compat.arange(5, dtype=np_compat.float64) + a = [0] + b = [2] + + res = cubature( + basic_1d_integrand, + a, + b, + args=(n, xp) + ) + + xp_assert_close( + res.estimate, + basic_1d_integrand_exact(n, xp), + rtol=1e-8, + atol=0, + ) + + def test_stops_after_max_subdivisions(self, xp): + a = xp.asarray([0]) + b = xp.asarray([1]) + rule = BadErrorRule() + + res = cubature( + basic_1d_integrand, # Any function would suffice + a, + b, + rule=rule, + max_subdivisions=10, + args=(xp.arange(5, dtype=xp.float64), xp), + ) + + assert res.subdivisions == 10 + assert res.status == "not_converged" + + def test_a_and_b_must_be_1d(self, xp): + a = xp.asarray([[0]], dtype=xp.float64) + b = xp.asarray([[1]], dtype=xp.float64) + + with pytest.raises(Exception, match="`a` and `b` must be 1D arrays"): + cubature(basic_1d_integrand, a, b, args=(xp,)) + + def test_a_and_b_must_be_nonempty(self, xp): + a = xp.asarray([]) + b = xp.asarray([]) + + with pytest.raises(Exception, match="`a` and `b` must be nonempty"): + cubature(basic_1d_integrand, a, b, args=(xp,)) + + def test_zero_width_limits(self, xp): + n = xp.arange(5, dtype=xp.float64) + + a = xp.asarray([0], dtype=xp.float64) + b = xp.asarray([0], dtype=xp.float64) + + res = cubature( + basic_1d_integrand, + a, + b, + args=(n, xp), + ) + + xp_assert_close( + res.estimate, + xp.asarray([[0], [0], [0], [0], [0]], dtype=xp.float64), + rtol=1e-8, + atol=0, + ) + + def test_limits_other_way_around(self, xp): + n = xp.arange(5, dtype=xp.float64) + + a = xp.asarray([2], dtype=xp.float64) + b = xp.asarray([0], dtype=xp.float64) + + res = cubature( + basic_1d_integrand, + a, + b, + args=(n, xp), + ) + + xp_assert_close( + res.estimate, + -basic_1d_integrand_exact(n, xp), + rtol=1e-8, + atol=0, + ) + + def test_result_dtype_promoted_correctly(self, xp): + result_dtype = cubature( + basic_1d_integrand, + xp.asarray([0], dtype=xp.float64), + xp.asarray([1], dtype=xp.float64), + points=[], + args=(xp.asarray([1], dtype=xp.float64), xp), + ).estimate.dtype + + assert result_dtype == xp.float64 + + result_dtype = cubature( + basic_1d_integrand, + xp.asarray([0], dtype=xp.float32), + xp.asarray([1], dtype=xp.float32), + points=[], + args=(xp.asarray([1], dtype=xp.float32), xp), + ).estimate.dtype + + assert result_dtype == xp.float32 + + result_dtype = cubature( + basic_1d_integrand, + xp.asarray([0], dtype=xp.float32), + xp.asarray([1], dtype=xp.float64), + points=[], + args=(xp.asarray([1], dtype=xp.float32), xp), + ).estimate.dtype + + assert result_dtype == xp.float64 + + +@pytest.mark.parametrize("rtol", [1e-4]) +@pytest.mark.parametrize("atol", [1e-5]) +@pytest.mark.parametrize("rule", [ + "gk15", + "gk21", + "genz-malik", +]) +@array_api_compatible +class TestCubatureProblems: + """ + Tests that `cubature` gives the correct answer. + """ + + @pytest.mark.parametrize("problem", [ + # -- f1 -- + ( + # Function to integrate, like `f(x, *args)` + genz_malik_1980_f_1, + + # Exact solution, like `exact(a, b, *args)` + genz_malik_1980_f_1_exact, + + # Coordinates of `a` + [0], + + # Coordinates of `b` + [10], + + # Arguments to pass to `f` and `exact` + ( + 1/4, + [5], + ) + ), + ( + genz_malik_1980_f_1, + genz_malik_1980_f_1_exact, + [0, 0], + [1, 1], + ( + 1/4, + [2, 4], + ), + ), + ( + genz_malik_1980_f_1, + genz_malik_1980_f_1_exact, + [0, 0], + [5, 5], + ( + 1/2, + [2, 4], + ) + ), + ( + genz_malik_1980_f_1, + genz_malik_1980_f_1_exact, + [0, 0, 0], + [5, 5, 5], + ( + 1/2, + [1, 1, 1], + ) + ), + + # -- f2 -- + ( + genz_malik_1980_f_2, + genz_malik_1980_f_2_exact, + [-1], + [1], + ( + [5], + [4], + ) + ), + ( + genz_malik_1980_f_2, + genz_malik_1980_f_2_exact, + + [0, 0], + [10, 50], + ( + [-3, 3], + [-2, 2], + ), + ), + ( + genz_malik_1980_f_2, + genz_malik_1980_f_2_exact, + [0, 0, 0], + [1, 1, 1], + ( + [1, 1, 1], + [1, 1, 1], + ) + ), + ( + genz_malik_1980_f_2, + genz_malik_1980_f_2_exact, + [0, 0, 0], + [1, 1, 1], + ( + [2, 3, 4], + [2, 3, 4], + ) + ), + ( + genz_malik_1980_f_2, + genz_malik_1980_f_2_exact, + [-1, -1, -1], + [1, 1, 1], + ( + [1, 1, 1], + [2, 2, 2], + ) + ), + ( + genz_malik_1980_f_2, + genz_malik_1980_f_2_exact, + [-1, -1, -1, -1], + [1, 1, 1, 1], + ( + [1, 1, 1, 1], + [1, 1, 1, 1], + ) + ), + + # -- f3 -- + ( + genz_malik_1980_f_3, + genz_malik_1980_f_3_exact, + [-1], + [1], + ( + [1/2], + ), + ), + ( + genz_malik_1980_f_3, + genz_malik_1980_f_3_exact, + [0, -1], + [1, 1], + ( + [5, 5], + ), + ), + ( + genz_malik_1980_f_3, + genz_malik_1980_f_3_exact, + [-1, -1, -1], + [1, 1, 1], + ( + [1, 1, 1], + ), + ), + + # -- f4 -- + ( + genz_malik_1980_f_4, + genz_malik_1980_f_4_exact, + [0], + [2], + ( + [1], + ), + ), + ( + genz_malik_1980_f_4, + genz_malik_1980_f_4_exact, + [0, 0], + [2, 1], + ([1, 1],), + ), + ( + genz_malik_1980_f_4, + genz_malik_1980_f_4_exact, + [0, 0, 0], + [1, 1, 1], + ([1, 1, 1],), + ), + + # -- f5 -- + ( + genz_malik_1980_f_5, + genz_malik_1980_f_5_exact, + [-1], + [1], + ( + [-2], + [2], + ), + ), + ( + genz_malik_1980_f_5, + genz_malik_1980_f_5_exact, + [-1, -1], + [1, 1], + ( + [2, 3], + [4, 5], + ), + ), + ( + genz_malik_1980_f_5, + genz_malik_1980_f_5_exact, + [-1, -1], + [1, 1], + ( + [-1, 1], + [0, 0], + ), + ), + ( + genz_malik_1980_f_5, + genz_malik_1980_f_5_exact, + [-1, -1, -1], + [1, 1, 1], + ( + [1, 1, 1], + [1, 1, 1], + ), + ), + ]) + def test_scalar_output(self, problem, rule, rtol, atol, xp): + f, exact, a, b, args = problem + + a = xp.asarray(a, dtype=xp.float64) + b = xp.asarray(b, dtype=xp.float64) + args = tuple(xp.asarray(arg, dtype=xp.float64) for arg in args) + + ndim = xp_size(a) + + if rule == "genz-malik" and ndim < 2: + pytest.skip("Genz-Malik cubature does not support 1D integrals") + + res = cubature( + f, + a, + b, + rule=rule, + rtol=rtol, + atol=atol, + args=(*args, xp), + ) + + assert res.status == "converged" + + est = res.estimate + exact_sol = exact(a, b, *args, xp) + + xp_assert_close( + est, + exact_sol, + rtol=rtol, + atol=atol, + err_msg=f"estimate_error={res.error}, subdivisions={res.subdivisions}", + ) + + @pytest.mark.parametrize("problem", [ + ( + # Function to integrate, like `f(x, *args)` + genz_malik_1980_f_1, + + # Exact solution, like `exact(a, b, *args)` + genz_malik_1980_f_1_exact, + + # Function that generates random args of a certain shape. + genz_malik_1980_f_1_random_args, + ), + ( + genz_malik_1980_f_2, + genz_malik_1980_f_2_exact, + genz_malik_1980_f_2_random_args, + ), + ( + genz_malik_1980_f_3, + genz_malik_1980_f_3_exact, + genz_malik_1980_f_3_random_args + ), + ( + genz_malik_1980_f_4, + genz_malik_1980_f_4_exact, + genz_malik_1980_f_4_random_args + ), + ( + genz_malik_1980_f_5, + genz_malik_1980_f_5_exact, + genz_malik_1980_f_5_random_args, + ), + ]) + @pytest.mark.parametrize("shape", [ + (2,), + (3,), + (4,), + (1, 2), + (1, 3), + (1, 4), + (3, 2), + (3, 4, 2), + (2, 1, 3), + ]) + def test_array_output(self, problem, rule, shape, rtol, atol, xp): + rng = np_compat.random.default_rng(1) + ndim = shape[-1] + + if rule == "genz-malik" and ndim < 2: + pytest.skip("Genz-Malik cubature does not support 1D integrals") + + if rule == "genz-malik" and ndim >= 5: + pytest.mark.slow("Gauss-Kronrod is slow in >= 5 dim") + + f, exact, random_args = problem + args = random_args(rng, shape, xp) + + a = xp.asarray([0] * ndim, dtype=xp.float64) + b = xp.asarray([1] * ndim, dtype=xp.float64) + + res = cubature( + f, + a, + b, + rule=rule, + rtol=rtol, + atol=atol, + args=(*args, xp), + ) + + est = res.estimate + exact_sol = exact(a, b, *args, xp) + + xp_assert_close( + est, + exact_sol, + rtol=rtol, + atol=atol, + err_msg=f"estimate_error={res.error}, subdivisions={res.subdivisions}", + ) + + err_msg = (f"estimate_error={res.error}, " + f"subdivisions= {res.subdivisions}, " + f"true_error={xp.abs(res.estimate - exact_sol)}") + assert res.status == "converged", err_msg + + assert res.estimate.shape == shape[:-1] + + @pytest.mark.parametrize("problem", [ + ( + # Function to integrate + lambda x, xp: x, + + # Exact value + [50.0], + + # Coordinates of `a` + [0], + + # Coordinates of `b` + [10], + + # Points by which to split up the initial region + None, + ), + ( + lambda x, xp: xp.sin(x)/x, + [2.551496047169878], # si(1) + si(2), + [-1], + [2], + [ + [0.0], + ], + ), + ( + lambda x, xp: xp.ones((x.shape[0], 1)), + [1.0], + [0, 0, 0], + [1, 1, 1], + [ + [0.5, 0.5, 0.5], + ], + ), + ( + lambda x, xp: xp.ones((x.shape[0], 1)), + [1.0], + [0, 0, 0], + [1, 1, 1], + [ + [0.25, 0.25, 0.25], + [0.5, 0.5, 0.5], + ], + ), + ( + lambda x, xp: xp.ones((x.shape[0], 1)), + [1.0], + [0, 0, 0], + [1, 1, 1], + [ + [0.1, 0.25, 0.5], + [0.25, 0.25, 0.25], + [0.5, 0.5, 0.5], + ], + ) + ]) + def test_break_points(self, problem, rule, rtol, atol, xp): + f, exact, a, b, points = problem + + a = xp.asarray(a, dtype=xp.float64) + b = xp.asarray(b, dtype=xp.float64) + exact = xp.asarray(exact, dtype=xp.float64) + + if points is not None: + points = [xp.asarray(point, dtype=xp.float64) for point in points] + + ndim = xp_size(a) + + if rule == "genz-malik" and ndim < 2: + pytest.skip("Genz-Malik cubature does not support 1D integrals") + + if rule == "genz-malik" and ndim >= 5: + pytest.mark.slow("Gauss-Kronrod is slow in >= 5 dim") + + res = cubature( + f, + a, + b, + rule=rule, + rtol=rtol, + atol=atol, + points=points, + args=(xp,), + ) + + xp_assert_close( + res.estimate, + exact, + rtol=rtol, + atol=atol, + err_msg=f"estimate_error={res.error}, subdivisions={res.subdivisions}", + check_dtype=False, + ) + + err_msg = (f"estimate_error={res.error}, " + f"subdivisions= {res.subdivisions}, " + f"true_error={xp.abs(res.estimate - exact)}") + assert res.status == "converged", err_msg + + @skip_xp_backends( + "jax.numpy", + reasons=["transforms make use of indexing assignment"], + ) + @pytest.mark.parametrize("problem", [ + ( + # Function to integrate + f_gaussian, + + # Exact solution + f_gaussian_exact, + + # Arguments passed to f + f_gaussian_random_args, + (1, 1), + + # Limits, have to match the shape of the arguments + [-math.inf], # a + [math.inf], # b + ), + ( + f_gaussian, + f_gaussian_exact, + f_gaussian_random_args, + (2, 2), + [-math.inf, -math.inf], + [math.inf, math.inf], + ), + ( + f_gaussian, + f_gaussian_exact, + f_gaussian_random_args, + (1, 1), + [0], + [math.inf], + ), + ( + f_gaussian, + f_gaussian_exact, + f_gaussian_random_args, + (1, 1), + [-math.inf], + [0], + ), + ( + f_gaussian, + f_gaussian_exact, + f_gaussian_random_args, + (2, 2), + [0, 0], + [math.inf, math.inf], + ), + ( + f_gaussian, + f_gaussian_exact, + f_gaussian_random_args, + (2, 2), + [0, -math.inf], + [math.inf, math.inf], + ), + ( + f_gaussian, + f_gaussian_exact, + f_gaussian_random_args, + (1, 4), + [0, 0, -math.inf, -math.inf], + [math.inf, math.inf, math.inf, math.inf], + ), + ( + f_gaussian, + f_gaussian_exact, + f_gaussian_random_args, + (1, 4), + [-math.inf, -math.inf, -math.inf, -math.inf], + [0, 0, math.inf, math.inf], + ), + ( + lambda x, xp: 1/xp.prod(x, axis=-1)**2, + + # Exact only for the below limits, not for general `a` and `b`. + lambda a, b, xp: xp.asarray(1/6, dtype=xp.float64), + + # Arguments + lambda rng, shape, xp: tuple(), + tuple(), + + [1, -math.inf, 3], + [math.inf, -2, math.inf], + ), + + # This particular problem can be slow + pytest.param( + ( + # f(x, y, z, w) = x^n * sqrt(y) * exp(-y-z**2-w**2) for n in [0,1,2,3] + f_modified_gaussian, + + # This exact solution is for the below limits, not in general + f_modified_gaussian_exact, + + # Constant arguments + lambda rng, shape, xp: (xp.asarray([0, 1, 2, 3, 4], dtype=xp.float64),), + tuple(), + + [0, 0, -math.inf, -math.inf], + [1, math.inf, math.inf, math.inf] + ), + + marks=pytest.mark.xslow, + ), + ]) + def test_infinite_limits(self, problem, rule, rtol, atol, xp): + rng = np_compat.random.default_rng(1) + f, exact, random_args_func, random_args_shape, a, b = problem + + a = xp.asarray(a, dtype=xp.float64) + b = xp.asarray(b, dtype=xp.float64) + args = random_args_func(rng, random_args_shape, xp) + + ndim = xp_size(a) + + if rule == "genz-malik" and ndim < 2: + pytest.skip("Genz-Malik cubature does not support 1D integrals") + + if rule == "genz-malik" and ndim >= 4: + pytest.mark.slow("Genz-Malik is slow in >= 5 dim") + + if rule == "genz-malik" and ndim >= 4 and is_array_api_strict(xp): + pytest.mark.xslow("Genz-Malik very slow for array_api_strict in >= 4 dim") + + res = cubature( + f, + a, + b, + rule=rule, + rtol=rtol, + atol=atol, + args=(*args, xp), + ) + + assert res.status == "converged" + + xp_assert_close( + res.estimate, + exact(a, b, *args, xp), + rtol=rtol, + atol=atol, + err_msg=f"error_estimate={res.error}, subdivisions={res.subdivisions}", + check_0d=False, + ) + + @skip_xp_backends( + "jax.numpy", + reasons=["transforms make use of indexing assignment"], + ) + @pytest.mark.parametrize("problem", [ + ( + # Function to integrate + lambda x, xp: (xp.sin(x) / x)**8, + + # Exact value + [151/315 * math.pi], + + # Limits + [-math.inf], + [math.inf], + + # Breakpoints + [[0]], + + ), + ( + # Function to integrate + lambda x, xp: (xp.sin(x[:, 0]) / x[:, 0])**8, + + # Exact value + 151/315 * math.pi, + + # Limits + [-math.inf, 0], + [math.inf, 1], + + # Breakpoints + [[0, 0.5]], + + ) + ]) + def test_infinite_limits_and_break_points(self, problem, rule, rtol, atol, xp): + f, exact, a, b, points = problem + + a = xp.asarray(a, dtype=xp.float64) + b = xp.asarray(b, dtype=xp.float64) + exact = xp.asarray(exact, dtype=xp.float64) + + ndim = xp_size(a) + + if rule == "genz-malik" and ndim < 2: + pytest.skip("Genz-Malik cubature does not support 1D integrals") + + if points is not None: + points = [xp.asarray(point, dtype=xp.float64) for point in points] + + res = cubature( + f, + a, + b, + rule=rule, + rtol=rtol, + atol=atol, + points=points, + args=(xp,), + ) + + assert res.status == "converged" + + xp_assert_close( + res.estimate, + exact, + rtol=rtol, + atol=atol, + err_msg=f"error_estimate={res.error}, subdivisions={res.subdivisions}", + check_0d=False, + ) + + +@array_api_compatible +class TestRules: + """ + Tests related to the general Rule interface (currently private). + """ + + @pytest.mark.parametrize("problem", [ + ( + # 2D problem, 1D rule + [0, 0], + [1, 1], + GaussKronrodQuadrature, + (21,), + ), + ( + # 1D problem, 2D rule + [0], + [1], + GenzMalikCubature, + (2,), + ) + ]) + def test_incompatible_dimension_raises_error(self, problem, xp): + a, b, quadrature, quadrature_args = problem + rule = quadrature(*quadrature_args, xp=xp) + + a = xp.asarray(a, dtype=xp.float64) + b = xp.asarray(b, dtype=xp.float64) + + with pytest.raises(Exception, match="incompatible dimension"): + rule.estimate(basic_1d_integrand, a, b, args=(xp,)) + + def test_estimate_with_base_classes_raise_error(self, xp): + a = xp.asarray([0]) + b = xp.asarray([1]) + + for base_class in [Rule(), FixedRule()]: + with pytest.raises(Exception): + base_class.estimate(basic_1d_integrand, a, b, args=(xp,)) + + +@array_api_compatible +class TestRulesQuadrature: + """ + Tests underlying quadrature rules (ndim == 1). + """ + + @pytest.mark.parametrize(("rule", "rule_args"), [ + (GaussLegendreQuadrature, (3,)), + (GaussLegendreQuadrature, (5,)), + (GaussLegendreQuadrature, (10,)), + (GaussKronrodQuadrature, (15,)), + (GaussKronrodQuadrature, (21,)), + ]) + def test_base_1d_quadratures_simple(self, rule, rule_args, xp): + quadrature = rule(*rule_args, xp=xp) + + n = xp.arange(5, dtype=xp.float64) + + def f(x): + x_reshaped = xp.reshape(x, (-1, 1, 1)) + n_reshaped = xp.reshape(n, (1, -1, 1)) + + return x_reshaped**n_reshaped + + a = xp.asarray([0], dtype=xp.float64) + b = xp.asarray([2], dtype=xp.float64) + + exact = xp.reshape(2**(n+1)/(n+1), (-1, 1)) + estimate = quadrature.estimate(f, a, b) + + xp_assert_close( + estimate, + exact, + rtol=1e-8, + atol=0, + ) + + @pytest.mark.parametrize(("rule_pair", "rule_pair_args"), [ + ((GaussLegendreQuadrature, GaussLegendreQuadrature), (10, 5)), + ]) + def test_base_1d_quadratures_error_from_difference(self, rule_pair, rule_pair_args, + xp): + n = xp.arange(5, dtype=xp.float64) + a = xp.asarray([0], dtype=xp.float64) + b = xp.asarray([2], dtype=xp.float64) + + higher = rule_pair[0](rule_pair_args[0], xp=xp) + lower = rule_pair[1](rule_pair_args[1], xp=xp) + + rule = NestedFixedRule(higher, lower) + res = cubature( + basic_1d_integrand, + a, b, + rule=rule, + rtol=1e-8, + args=(n, xp), + ) + + xp_assert_close( + res.estimate, + basic_1d_integrand_exact(n, xp), + rtol=1e-8, + atol=0, + ) + + @pytest.mark.parametrize("quadrature", [ + GaussLegendreQuadrature + ]) + def test_one_point_fixed_quad_impossible(self, quadrature, xp): + with pytest.raises(Exception): + quadrature(1, xp=xp) + + +@array_api_compatible +class TestRulesCubature: + """ + Tests underlying cubature rules (ndim >= 2). + """ + + @pytest.mark.parametrize("ndim", range(2, 11)) + def test_genz_malik_func_evaluations(self, ndim, xp): + """ + Tests that the number of function evaluations required for Genz-Malik cubature + matches the number in Genz and Malik 1980. + """ + + nodes, _ = GenzMalikCubature(ndim, xp=xp).nodes_and_weights + + assert nodes.shape[0] == (2**ndim) + 2*ndim**2 + 2*ndim + 1 + + def test_genz_malik_1d_raises_error(self, xp): + with pytest.raises(Exception, match="only defined for ndim >= 2"): + GenzMalikCubature(1, xp=xp) + + +@array_api_compatible +@skip_xp_backends( + "jax.numpy", + reasons=["transforms make use of indexing assignment"], +) +class TestTransformations: + @pytest.mark.parametrize(("a", "b", "points"), [ + ( + [0, 1, -math.inf], + [1, math.inf, math.inf], + [ + [1, 1, 1], + [0.5, 10, 10], + ] + ) + ]) + def test_infinite_limits_maintains_points(self, a, b, points, xp): + """ + Test that break points are correctly mapped under the _InfiniteLimitsTransform + transformation. + """ + + xp_compat = array_namespace(xp.empty(0)) + points = [xp.asarray(p, dtype=xp.float64) for p in points] + + f_transformed = _InfiniteLimitsTransform( + # Bind `points` and `xp` argument in f + lambda x: f_with_problematic_points(x, points, xp_compat), + xp.asarray(a, dtype=xp_compat.float64), + xp.asarray(b, dtype=xp_compat.float64), + xp=xp_compat, + ) + + for point in points: + transformed_point = f_transformed.inv(xp_compat.reshape(point, (1, -1))) + + with pytest.raises(Exception, match="called with a problematic point"): + f_transformed(transformed_point) + + +class BadErrorRule(Rule): + """ + A rule with fake high error so that cubature will keep on subdividing. + """ + + def estimate(self, f, a, b, args=()): + xp = array_namespace(a, b) + underlying = GaussLegendreQuadrature(10, xp=xp) + + return underlying.estimate(f, a, b, args) + + def estimate_error(self, f, a, b, args=()): + xp = array_namespace(a, b) + return xp.asarray(1e6, dtype=xp.float64) diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/tests/test_integrate.py b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/tests/test_integrate.py new file mode 100644 index 0000000000000000000000000000000000000000..44bfecdaac0f00b413538510c61dd1317a076261 --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/tests/test_integrate.py @@ -0,0 +1,840 @@ +# Authors: Nils Wagner, Ed Schofield, Pauli Virtanen, John Travers +""" +Tests for numerical integration. +""" +import numpy as np +from numpy import (arange, zeros, array, dot, sqrt, cos, sin, eye, pi, exp, + allclose) + +from numpy.testing import ( + assert_, assert_array_almost_equal, + assert_allclose, assert_array_equal, assert_equal, assert_warns) +import pytest +from pytest import raises as assert_raises +from scipy.integrate import odeint, ode, complex_ode + +#------------------------------------------------------------------------------ +# Test ODE integrators +#------------------------------------------------------------------------------ + + +class TestOdeint: + # Check integrate.odeint + + def _do_problem(self, problem): + t = arange(0.0, problem.stop_t, 0.05) + + # Basic case + z, infodict = odeint(problem.f, problem.z0, t, full_output=True) + assert_(problem.verify(z, t)) + + # Use tfirst=True + z, infodict = odeint(lambda t, y: problem.f(y, t), problem.z0, t, + full_output=True, tfirst=True) + assert_(problem.verify(z, t)) + + if hasattr(problem, 'jac'): + # Use Dfun + z, infodict = odeint(problem.f, problem.z0, t, Dfun=problem.jac, + full_output=True) + assert_(problem.verify(z, t)) + + # Use Dfun and tfirst=True + z, infodict = odeint(lambda t, y: problem.f(y, t), problem.z0, t, + Dfun=lambda t, y: problem.jac(y, t), + full_output=True, tfirst=True) + assert_(problem.verify(z, t)) + + def test_odeint(self): + for problem_cls in PROBLEMS: + problem = problem_cls() + if problem.cmplx: + continue + self._do_problem(problem) + + +class TestODEClass: + + ode_class = None # Set in subclass. + + def _do_problem(self, problem, integrator, method='adams'): + + # ode has callback arguments in different order than odeint + def f(t, z): + return problem.f(z, t) + jac = None + if hasattr(problem, 'jac'): + def jac(t, z): + return problem.jac(z, t) + + integrator_params = {} + if problem.lband is not None or problem.uband is not None: + integrator_params['uband'] = problem.uband + integrator_params['lband'] = problem.lband + + ig = self.ode_class(f, jac) + ig.set_integrator(integrator, + atol=problem.atol/10, + rtol=problem.rtol/10, + method=method, + **integrator_params) + + ig.set_initial_value(problem.z0, t=0.0) + z = ig.integrate(problem.stop_t) + + assert_array_equal(z, ig.y) + assert_(ig.successful(), (problem, method)) + assert_(ig.get_return_code() > 0, (problem, method)) + assert_(problem.verify(array([z]), problem.stop_t), (problem, method)) + + +class TestOde(TestODEClass): + + ode_class = ode + + def test_vode(self): + # Check the vode solver + for problem_cls in PROBLEMS: + problem = problem_cls() + if problem.cmplx: + continue + if not problem.stiff: + self._do_problem(problem, 'vode', 'adams') + self._do_problem(problem, 'vode', 'bdf') + + def test_zvode(self): + # Check the zvode solver + for problem_cls in PROBLEMS: + problem = problem_cls() + if not problem.stiff: + self._do_problem(problem, 'zvode', 'adams') + self._do_problem(problem, 'zvode', 'bdf') + + def test_lsoda(self): + # Check the lsoda solver + for problem_cls in PROBLEMS: + problem = problem_cls() + if problem.cmplx: + continue + self._do_problem(problem, 'lsoda') + + def test_dopri5(self): + # Check the dopri5 solver + for problem_cls in PROBLEMS: + problem = problem_cls() + if problem.cmplx: + continue + if problem.stiff: + continue + if hasattr(problem, 'jac'): + continue + self._do_problem(problem, 'dopri5') + + def test_dop853(self): + # Check the dop853 solver + for problem_cls in PROBLEMS: + problem = problem_cls() + if problem.cmplx: + continue + if problem.stiff: + continue + if hasattr(problem, 'jac'): + continue + self._do_problem(problem, 'dop853') + + @pytest.mark.thread_unsafe + def test_concurrent_fail(self): + for sol in ('vode', 'zvode', 'lsoda'): + def f(t, y): + return 1.0 + + r = ode(f).set_integrator(sol) + r.set_initial_value(0, 0) + + r2 = ode(f).set_integrator(sol) + r2.set_initial_value(0, 0) + + r.integrate(r.t + 0.1) + r2.integrate(r2.t + 0.1) + + assert_raises(RuntimeError, r.integrate, r.t + 0.1) + + def test_concurrent_ok(self, num_parallel_threads): + def f(t, y): + return 1.0 + + for k in range(3): + for sol in ('vode', 'zvode', 'lsoda', 'dopri5', 'dop853'): + if sol in {'vode', 'zvode', 'lsoda'} and num_parallel_threads > 1: + continue + r = ode(f).set_integrator(sol) + r.set_initial_value(0, 0) + + r2 = ode(f).set_integrator(sol) + r2.set_initial_value(0, 0) + + r.integrate(r.t + 0.1) + r2.integrate(r2.t + 0.1) + r2.integrate(r2.t + 0.1) + + assert_allclose(r.y, 0.1) + assert_allclose(r2.y, 0.2) + + for sol in ('dopri5', 'dop853'): + r = ode(f).set_integrator(sol) + r.set_initial_value(0, 0) + + r2 = ode(f).set_integrator(sol) + r2.set_initial_value(0, 0) + + r.integrate(r.t + 0.1) + r.integrate(r.t + 0.1) + r2.integrate(r2.t + 0.1) + r.integrate(r.t + 0.1) + r2.integrate(r2.t + 0.1) + + assert_allclose(r.y, 0.3) + assert_allclose(r2.y, 0.2) + + +class TestComplexOde(TestODEClass): + + ode_class = complex_ode + + def test_vode(self): + # Check the vode solver + for problem_cls in PROBLEMS: + problem = problem_cls() + if not problem.stiff: + self._do_problem(problem, 'vode', 'adams') + else: + self._do_problem(problem, 'vode', 'bdf') + + def test_lsoda(self): + + # Check the lsoda solver + for problem_cls in PROBLEMS: + problem = problem_cls() + self._do_problem(problem, 'lsoda') + + def test_dopri5(self): + # Check the dopri5 solver + for problem_cls in PROBLEMS: + problem = problem_cls() + if problem.stiff: + continue + if hasattr(problem, 'jac'): + continue + self._do_problem(problem, 'dopri5') + + def test_dop853(self): + # Check the dop853 solver + for problem_cls in PROBLEMS: + problem = problem_cls() + if problem.stiff: + continue + if hasattr(problem, 'jac'): + continue + self._do_problem(problem, 'dop853') + + +class TestSolout: + # Check integrate.ode correctly handles solout for dopri5 and dop853 + def _run_solout_test(self, integrator): + # Check correct usage of solout + ts = [] + ys = [] + t0 = 0.0 + tend = 10.0 + y0 = [1.0, 2.0] + + def solout(t, y): + ts.append(t) + ys.append(y.copy()) + + def rhs(t, y): + return [y[0] + y[1], -y[1]**2] + + ig = ode(rhs).set_integrator(integrator) + ig.set_solout(solout) + ig.set_initial_value(y0, t0) + ret = ig.integrate(tend) + assert_array_equal(ys[0], y0) + assert_array_equal(ys[-1], ret) + assert_equal(ts[0], t0) + assert_equal(ts[-1], tend) + + def test_solout(self): + for integrator in ('dopri5', 'dop853'): + self._run_solout_test(integrator) + + def _run_solout_after_initial_test(self, integrator): + # Check if solout works even if it is set after the initial value. + ts = [] + ys = [] + t0 = 0.0 + tend = 10.0 + y0 = [1.0, 2.0] + + def solout(t, y): + ts.append(t) + ys.append(y.copy()) + + def rhs(t, y): + return [y[0] + y[1], -y[1]**2] + + ig = ode(rhs).set_integrator(integrator) + ig.set_initial_value(y0, t0) + ig.set_solout(solout) + ret = ig.integrate(tend) + assert_array_equal(ys[0], y0) + assert_array_equal(ys[-1], ret) + assert_equal(ts[0], t0) + assert_equal(ts[-1], tend) + + def test_solout_after_initial(self): + for integrator in ('dopri5', 'dop853'): + self._run_solout_after_initial_test(integrator) + + def _run_solout_break_test(self, integrator): + # Check correct usage of stopping via solout + ts = [] + ys = [] + t0 = 0.0 + tend = 10.0 + y0 = [1.0, 2.0] + + def solout(t, y): + ts.append(t) + ys.append(y.copy()) + if t > tend/2.0: + return -1 + + def rhs(t, y): + return [y[0] + y[1], -y[1]**2] + + ig = ode(rhs).set_integrator(integrator) + ig.set_solout(solout) + ig.set_initial_value(y0, t0) + ret = ig.integrate(tend) + assert_array_equal(ys[0], y0) + assert_array_equal(ys[-1], ret) + assert_equal(ts[0], t0) + assert_(ts[-1] > tend/2.0) + assert_(ts[-1] < tend) + + def test_solout_break(self): + for integrator in ('dopri5', 'dop853'): + self._run_solout_break_test(integrator) + + +class TestComplexSolout: + # Check integrate.ode correctly handles solout for dopri5 and dop853 + def _run_solout_test(self, integrator): + # Check correct usage of solout + ts = [] + ys = [] + t0 = 0.0 + tend = 20.0 + y0 = [0.0] + + def solout(t, y): + ts.append(t) + ys.append(y.copy()) + + def rhs(t, y): + return [1.0/(t - 10.0 - 1j)] + + ig = complex_ode(rhs).set_integrator(integrator) + ig.set_solout(solout) + ig.set_initial_value(y0, t0) + ret = ig.integrate(tend) + assert_array_equal(ys[0], y0) + assert_array_equal(ys[-1], ret) + assert_equal(ts[0], t0) + assert_equal(ts[-1], tend) + + def test_solout(self): + for integrator in ('dopri5', 'dop853'): + self._run_solout_test(integrator) + + def _run_solout_break_test(self, integrator): + # Check correct usage of stopping via solout + ts = [] + ys = [] + t0 = 0.0 + tend = 20.0 + y0 = [0.0] + + def solout(t, y): + ts.append(t) + ys.append(y.copy()) + if t > tend/2.0: + return -1 + + def rhs(t, y): + return [1.0/(t - 10.0 - 1j)] + + ig = complex_ode(rhs).set_integrator(integrator) + ig.set_solout(solout) + ig.set_initial_value(y0, t0) + ret = ig.integrate(tend) + assert_array_equal(ys[0], y0) + assert_array_equal(ys[-1], ret) + assert_equal(ts[0], t0) + assert_(ts[-1] > tend/2.0) + assert_(ts[-1] < tend) + + def test_solout_break(self): + for integrator in ('dopri5', 'dop853'): + self._run_solout_break_test(integrator) + + +#------------------------------------------------------------------------------ +# Test problems +#------------------------------------------------------------------------------ + + +class ODE: + """ + ODE problem + """ + stiff = False + cmplx = False + stop_t = 1 + z0 = [] + + lband = None + uband = None + + atol = 1e-6 + rtol = 1e-5 + + +class SimpleOscillator(ODE): + r""" + Free vibration of a simple oscillator:: + m \ddot{u} + k u = 0, u(0) = u_0 \dot{u}(0) \dot{u}_0 + Solution:: + u(t) = u_0*cos(sqrt(k/m)*t)+\dot{u}_0*sin(sqrt(k/m)*t)/sqrt(k/m) + """ + stop_t = 1 + 0.09 + z0 = array([1.0, 0.1], float) + + k = 4.0 + m = 1.0 + + def f(self, z, t): + tmp = zeros((2, 2), float) + tmp[0, 1] = 1.0 + tmp[1, 0] = -self.k / self.m + return dot(tmp, z) + + def verify(self, zs, t): + omega = sqrt(self.k / self.m) + u = self.z0[0]*cos(omega*t) + self.z0[1]*sin(omega*t)/omega + return allclose(u, zs[:, 0], atol=self.atol, rtol=self.rtol) + + +class ComplexExp(ODE): + r"""The equation :lm:`\dot u = i u`""" + stop_t = 1.23*pi + z0 = exp([1j, 2j, 3j, 4j, 5j]) + cmplx = True + + def f(self, z, t): + return 1j*z + + def jac(self, z, t): + return 1j*eye(5) + + def verify(self, zs, t): + u = self.z0 * exp(1j*t) + return allclose(u, zs, atol=self.atol, rtol=self.rtol) + + +class Pi(ODE): + r"""Integrate 1/(t + 1j) from t=-10 to t=10""" + stop_t = 20 + z0 = [0] + cmplx = True + + def f(self, z, t): + return array([1./(t - 10 + 1j)]) + + def verify(self, zs, t): + u = -2j * np.arctan(10) + return allclose(u, zs[-1, :], atol=self.atol, rtol=self.rtol) + + +class CoupledDecay(ODE): + r""" + 3 coupled decays suited for banded treatment + (banded mode makes it necessary when N>>3) + """ + + stiff = True + stop_t = 0.5 + z0 = [5.0, 7.0, 13.0] + lband = 1 + uband = 0 + + lmbd = [0.17, 0.23, 0.29] # fictitious decay constants + + def f(self, z, t): + lmbd = self.lmbd + return np.array([-lmbd[0]*z[0], + -lmbd[1]*z[1] + lmbd[0]*z[0], + -lmbd[2]*z[2] + lmbd[1]*z[1]]) + + def jac(self, z, t): + # The full Jacobian is + # + # [-lmbd[0] 0 0 ] + # [ lmbd[0] -lmbd[1] 0 ] + # [ 0 lmbd[1] -lmbd[2]] + # + # The lower and upper bandwidths are lband=1 and uband=0, resp. + # The representation of this array in packed format is + # + # [-lmbd[0] -lmbd[1] -lmbd[2]] + # [ lmbd[0] lmbd[1] 0 ] + + lmbd = self.lmbd + j = np.zeros((self.lband + self.uband + 1, 3), order='F') + + def set_j(ri, ci, val): + j[self.uband + ri - ci, ci] = val + set_j(0, 0, -lmbd[0]) + set_j(1, 0, lmbd[0]) + set_j(1, 1, -lmbd[1]) + set_j(2, 1, lmbd[1]) + set_j(2, 2, -lmbd[2]) + return j + + def verify(self, zs, t): + # Formulae derived by hand + lmbd = np.array(self.lmbd) + d10 = lmbd[1] - lmbd[0] + d21 = lmbd[2] - lmbd[1] + d20 = lmbd[2] - lmbd[0] + e0 = np.exp(-lmbd[0] * t) + e1 = np.exp(-lmbd[1] * t) + e2 = np.exp(-lmbd[2] * t) + u = np.vstack(( + self.z0[0] * e0, + self.z0[1] * e1 + self.z0[0] * lmbd[0] / d10 * (e0 - e1), + self.z0[2] * e2 + self.z0[1] * lmbd[1] / d21 * (e1 - e2) + + lmbd[1] * lmbd[0] * self.z0[0] / d10 * + (1 / d20 * (e0 - e2) - 1 / d21 * (e1 - e2)))).transpose() + return allclose(u, zs, atol=self.atol, rtol=self.rtol) + + +PROBLEMS = [SimpleOscillator, ComplexExp, Pi, CoupledDecay] + +#------------------------------------------------------------------------------ + + +def f(t, x): + dxdt = [x[1], -x[0]] + return dxdt + + +def jac(t, x): + j = array([[0.0, 1.0], + [-1.0, 0.0]]) + return j + + +def f1(t, x, omega): + dxdt = [omega*x[1], -omega*x[0]] + return dxdt + + +def jac1(t, x, omega): + j = array([[0.0, omega], + [-omega, 0.0]]) + return j + + +def f2(t, x, omega1, omega2): + dxdt = [omega1*x[1], -omega2*x[0]] + return dxdt + + +def jac2(t, x, omega1, omega2): + j = array([[0.0, omega1], + [-omega2, 0.0]]) + return j + + +def fv(t, x, omega): + dxdt = [omega[0]*x[1], -omega[1]*x[0]] + return dxdt + + +def jacv(t, x, omega): + j = array([[0.0, omega[0]], + [-omega[1], 0.0]]) + return j + + +class ODECheckParameterUse: + """Call an ode-class solver with several cases of parameter use.""" + + # solver_name must be set before tests can be run with this class. + + # Set these in subclasses. + solver_name = '' + solver_uses_jac = False + + def _get_solver(self, f, jac): + solver = ode(f, jac) + if self.solver_uses_jac: + solver.set_integrator(self.solver_name, atol=1e-9, rtol=1e-7, + with_jacobian=self.solver_uses_jac) + else: + # XXX Shouldn't set_integrator *always* accept the keyword arg + # 'with_jacobian', and perhaps raise an exception if it is set + # to True if the solver can't actually use it? + solver.set_integrator(self.solver_name, atol=1e-9, rtol=1e-7) + return solver + + def _check_solver(self, solver): + ic = [1.0, 0.0] + solver.set_initial_value(ic, 0.0) + solver.integrate(pi) + assert_array_almost_equal(solver.y, [-1.0, 0.0]) + + def test_no_params(self): + solver = self._get_solver(f, jac) + self._check_solver(solver) + + def test_one_scalar_param(self): + solver = self._get_solver(f1, jac1) + omega = 1.0 + solver.set_f_params(omega) + if self.solver_uses_jac: + solver.set_jac_params(omega) + self._check_solver(solver) + + def test_two_scalar_params(self): + solver = self._get_solver(f2, jac2) + omega1 = 1.0 + omega2 = 1.0 + solver.set_f_params(omega1, omega2) + if self.solver_uses_jac: + solver.set_jac_params(omega1, omega2) + self._check_solver(solver) + + def test_vector_param(self): + solver = self._get_solver(fv, jacv) + omega = [1.0, 1.0] + solver.set_f_params(omega) + if self.solver_uses_jac: + solver.set_jac_params(omega) + self._check_solver(solver) + + @pytest.mark.thread_unsafe + def test_warns_on_failure(self): + # Set nsteps small to ensure failure + solver = self._get_solver(f, jac) + solver.set_integrator(self.solver_name, nsteps=1) + ic = [1.0, 0.0] + solver.set_initial_value(ic, 0.0) + assert_warns(UserWarning, solver.integrate, pi) + + +class TestDOPRI5CheckParameterUse(ODECheckParameterUse): + solver_name = 'dopri5' + solver_uses_jac = False + + +class TestDOP853CheckParameterUse(ODECheckParameterUse): + solver_name = 'dop853' + solver_uses_jac = False + + +class TestVODECheckParameterUse(ODECheckParameterUse): + solver_name = 'vode' + solver_uses_jac = True + + +class TestZVODECheckParameterUse(ODECheckParameterUse): + solver_name = 'zvode' + solver_uses_jac = True + + +class TestLSODACheckParameterUse(ODECheckParameterUse): + solver_name = 'lsoda' + solver_uses_jac = True + + +def test_odeint_trivial_time(): + # Test that odeint succeeds when given a single time point + # and full_output=True. This is a regression test for gh-4282. + y0 = 1 + t = [0] + y, info = odeint(lambda y, t: -y, y0, t, full_output=True) + assert_array_equal(y, np.array([[y0]])) + + +def test_odeint_banded_jacobian(): + # Test the use of the `Dfun`, `ml` and `mu` options of odeint. + + def func(y, t, c): + return c.dot(y) + + def jac(y, t, c): + return c + + def jac_transpose(y, t, c): + return c.T.copy(order='C') + + def bjac_rows(y, t, c): + jac = np.vstack((np.r_[0, np.diag(c, 1)], + np.diag(c), + np.r_[np.diag(c, -1), 0], + np.r_[np.diag(c, -2), 0, 0])) + return jac + + def bjac_cols(y, t, c): + return bjac_rows(y, t, c).T.copy(order='C') + + c = array([[-205, 0.01, 0.00, 0.0], + [0.1, -2.50, 0.02, 0.0], + [1e-3, 0.01, -2.0, 0.01], + [0.00, 0.00, 0.1, -1.0]]) + + y0 = np.ones(4) + t = np.array([0, 5, 10, 100]) + + # Use the full Jacobian. + sol1, info1 = odeint(func, y0, t, args=(c,), full_output=True, + atol=1e-13, rtol=1e-11, mxstep=10000, + Dfun=jac) + + # Use the transposed full Jacobian, with col_deriv=True. + sol2, info2 = odeint(func, y0, t, args=(c,), full_output=True, + atol=1e-13, rtol=1e-11, mxstep=10000, + Dfun=jac_transpose, col_deriv=True) + + # Use the banded Jacobian. + sol3, info3 = odeint(func, y0, t, args=(c,), full_output=True, + atol=1e-13, rtol=1e-11, mxstep=10000, + Dfun=bjac_rows, ml=2, mu=1) + + # Use the transposed banded Jacobian, with col_deriv=True. + sol4, info4 = odeint(func, y0, t, args=(c,), full_output=True, + atol=1e-13, rtol=1e-11, mxstep=10000, + Dfun=bjac_cols, ml=2, mu=1, col_deriv=True) + + assert_allclose(sol1, sol2, err_msg="sol1 != sol2") + assert_allclose(sol1, sol3, atol=1e-12, err_msg="sol1 != sol3") + assert_allclose(sol3, sol4, err_msg="sol3 != sol4") + + # Verify that the number of jacobian evaluations was the same for the + # calls of odeint with a full jacobian and with a banded jacobian. This is + # a regression test--there was a bug in the handling of banded jacobians + # that resulted in an incorrect jacobian matrix being passed to the LSODA + # code. That would cause errors or excessive jacobian evaluations. + assert_array_equal(info1['nje'], info2['nje']) + assert_array_equal(info3['nje'], info4['nje']) + + # Test the use of tfirst + sol1ty, info1ty = odeint(lambda t, y, c: func(y, t, c), y0, t, args=(c,), + full_output=True, atol=1e-13, rtol=1e-11, + mxstep=10000, + Dfun=lambda t, y, c: jac(y, t, c), tfirst=True) + # The code should execute the exact same sequence of floating point + # calculations, so these should be exactly equal. We'll be safe and use + # a small tolerance. + assert_allclose(sol1, sol1ty, rtol=1e-12, err_msg="sol1 != sol1ty") + + +def test_odeint_errors(): + def sys1d(x, t): + return -100*x + + def bad1(x, t): + return 1.0/0 + + def bad2(x, t): + return "foo" + + def bad_jac1(x, t): + return 1.0/0 + + def bad_jac2(x, t): + return [["foo"]] + + def sys2d(x, t): + return [-100*x[0], -0.1*x[1]] + + def sys2d_bad_jac(x, t): + return [[1.0/0, 0], [0, -0.1]] + + assert_raises(ZeroDivisionError, odeint, bad1, 1.0, [0, 1]) + assert_raises(ValueError, odeint, bad2, 1.0, [0, 1]) + + assert_raises(ZeroDivisionError, odeint, sys1d, 1.0, [0, 1], Dfun=bad_jac1) + assert_raises(ValueError, odeint, sys1d, 1.0, [0, 1], Dfun=bad_jac2) + + assert_raises(ZeroDivisionError, odeint, sys2d, [1.0, 1.0], [0, 1], + Dfun=sys2d_bad_jac) + + +def test_odeint_bad_shapes(): + # Tests of some errors that can occur with odeint. + + def badrhs(x, t): + return [1, -1] + + def sys1(x, t): + return -100*x + + def badjac(x, t): + return [[0, 0, 0]] + + # y0 must be at most 1-d. + bad_y0 = [[0, 0], [0, 0]] + assert_raises(ValueError, odeint, sys1, bad_y0, [0, 1]) + + # t must be at most 1-d. + bad_t = [[0, 1], [2, 3]] + assert_raises(ValueError, odeint, sys1, [10.0], bad_t) + + # y0 is 10, but badrhs(x, t) returns [1, -1]. + assert_raises(RuntimeError, odeint, badrhs, 10, [0, 1]) + + # shape of array returned by badjac(x, t) is not correct. + assert_raises(RuntimeError, odeint, sys1, [10, 10], [0, 1], Dfun=badjac) + + +def test_repeated_t_values(): + """Regression test for gh-8217.""" + + def func(x, t): + return -0.25*x + + t = np.zeros(10) + sol = odeint(func, [1.], t) + assert_array_equal(sol, np.ones((len(t), 1))) + + tau = 4*np.log(2) + t = [0]*9 + [tau, 2*tau, 2*tau, 3*tau] + sol = odeint(func, [1, 2], t, rtol=1e-12, atol=1e-12) + expected_sol = np.array([[1.0, 2.0]]*9 + + [[0.5, 1.0], + [0.25, 0.5], + [0.25, 0.5], + [0.125, 0.25]]) + assert_allclose(sol, expected_sol) + + # Edge case: empty t sequence. + sol = odeint(func, [1.], []) + assert_array_equal(sol, np.array([], dtype=np.float64).reshape((0, 1))) + + # t values are not monotonic. + assert_raises(ValueError, odeint, func, [1.], [0, 1, 0.5, 0]) + assert_raises(ValueError, odeint, func, [1, 2, 3], [0, -1, -2, 3]) diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/tests/test_odeint_jac.py b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/tests/test_odeint_jac.py new file mode 100644 index 0000000000000000000000000000000000000000..7d28ccc93f4444f3f2e0b71da01c573d4f903dbc --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/tests/test_odeint_jac.py @@ -0,0 +1,74 @@ +import numpy as np +from numpy.testing import assert_equal, assert_allclose +from scipy.integrate import odeint +import scipy.integrate._test_odeint_banded as banded5x5 + + +def rhs(y, t): + dydt = np.zeros_like(y) + banded5x5.banded5x5(t, y, dydt) + return dydt + + +def jac(y, t): + n = len(y) + jac = np.zeros((n, n), order='F') + banded5x5.banded5x5_jac(t, y, 1, 1, jac) + return jac + + +def bjac(y, t): + n = len(y) + bjac = np.zeros((4, n), order='F') + banded5x5.banded5x5_bjac(t, y, 1, 1, bjac) + return bjac + + +JACTYPE_FULL = 1 +JACTYPE_BANDED = 4 + + +def check_odeint(jactype): + if jactype == JACTYPE_FULL: + ml = None + mu = None + jacobian = jac + elif jactype == JACTYPE_BANDED: + ml = 2 + mu = 1 + jacobian = bjac + else: + raise ValueError(f"invalid jactype: {jactype!r}") + + y0 = np.arange(1.0, 6.0) + # These tolerances must match the tolerances used in banded5x5.f. + rtol = 1e-11 + atol = 1e-13 + dt = 0.125 + nsteps = 64 + t = dt * np.arange(nsteps+1) + + sol, info = odeint(rhs, y0, t, + Dfun=jacobian, ml=ml, mu=mu, + atol=atol, rtol=rtol, full_output=True) + yfinal = sol[-1] + odeint_nst = info['nst'][-1] + odeint_nfe = info['nfe'][-1] + odeint_nje = info['nje'][-1] + + y1 = y0.copy() + # Pure Fortran solution. y1 is modified in-place. + nst, nfe, nje = banded5x5.banded5x5_solve(y1, nsteps, dt, jactype) + + # It is likely that yfinal and y1 are *exactly* the same, but + # we'll be cautious and use assert_allclose. + assert_allclose(yfinal, y1, rtol=1e-12) + assert_equal((odeint_nst, odeint_nfe, odeint_nje), (nst, nfe, nje)) + + +def test_odeint_full_jac(): + check_odeint(JACTYPE_FULL) + + +def test_odeint_banded_jac(): + check_odeint(JACTYPE_BANDED) diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/tests/test_quadpack.py b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/tests/test_quadpack.py new file mode 100644 index 0000000000000000000000000000000000000000..e61a69df40f9b5975a6f02f40e6f72e34dbbf297 --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/tests/test_quadpack.py @@ -0,0 +1,680 @@ +import sys +import math +import numpy as np +from numpy import sqrt, cos, sin, arctan, exp, log, pi +from numpy.testing import (assert_, + assert_allclose, assert_array_less, assert_almost_equal) +import pytest + +from scipy.integrate import quad, dblquad, tplquad, nquad +from scipy.special import erf, erfc +from scipy._lib._ccallback import LowLevelCallable + +import ctypes +import ctypes.util +from scipy._lib._ccallback_c import sine_ctypes + +import scipy.integrate._test_multivariate as clib_test + + +def assert_quad(value_and_err, tabled_value, error_tolerance=1.5e-8): + value, err = value_and_err + assert_allclose(value, tabled_value, atol=err, rtol=0) + if error_tolerance is not None: + assert_array_less(err, error_tolerance) + + +def get_clib_test_routine(name, restype, *argtypes): + ptr = getattr(clib_test, name) + return ctypes.cast(ptr, ctypes.CFUNCTYPE(restype, *argtypes)) + + +class TestCtypesQuad: + def setup_method(self): + if sys.platform == 'win32': + files = ['api-ms-win-crt-math-l1-1-0.dll'] + elif sys.platform == 'darwin': + files = ['libm.dylib'] + else: + files = ['libm.so', 'libm.so.6'] + + for file in files: + try: + self.lib = ctypes.CDLL(file) + break + except OSError: + pass + else: + # This test doesn't work on some Linux platforms (Fedora for + # example) that put an ld script in libm.so - see gh-5370 + pytest.skip("Ctypes can't import libm.so") + + restype = ctypes.c_double + argtypes = (ctypes.c_double,) + for name in ['sin', 'cos', 'tan']: + func = getattr(self.lib, name) + func.restype = restype + func.argtypes = argtypes + + def test_typical(self): + assert_quad(quad(self.lib.sin, 0, 5), quad(math.sin, 0, 5)[0]) + assert_quad(quad(self.lib.cos, 0, 5), quad(math.cos, 0, 5)[0]) + assert_quad(quad(self.lib.tan, 0, 1), quad(math.tan, 0, 1)[0]) + + def test_ctypes_sine(self): + quad(LowLevelCallable(sine_ctypes), 0, 1) + + def test_ctypes_variants(self): + sin_0 = get_clib_test_routine('_sin_0', ctypes.c_double, + ctypes.c_double, ctypes.c_void_p) + + sin_1 = get_clib_test_routine('_sin_1', ctypes.c_double, + ctypes.c_int, ctypes.POINTER(ctypes.c_double), + ctypes.c_void_p) + + sin_2 = get_clib_test_routine('_sin_2', ctypes.c_double, + ctypes.c_double) + + sin_3 = get_clib_test_routine('_sin_3', ctypes.c_double, + ctypes.c_int, ctypes.POINTER(ctypes.c_double)) + + sin_4 = get_clib_test_routine('_sin_3', ctypes.c_double, + ctypes.c_int, ctypes.c_double) + + all_sigs = [sin_0, sin_1, sin_2, sin_3, sin_4] + legacy_sigs = [sin_2, sin_4] + legacy_only_sigs = [sin_4] + + # LowLevelCallables work for new signatures + for j, func in enumerate(all_sigs): + callback = LowLevelCallable(func) + if func in legacy_only_sigs: + pytest.raises(ValueError, quad, callback, 0, pi) + else: + assert_allclose(quad(callback, 0, pi)[0], 2.0) + + # Plain ctypes items work only for legacy signatures + for j, func in enumerate(legacy_sigs): + if func in legacy_sigs: + assert_allclose(quad(func, 0, pi)[0], 2.0) + else: + pytest.raises(ValueError, quad, func, 0, pi) + + +class TestMultivariateCtypesQuad: + def setup_method(self): + restype = ctypes.c_double + argtypes = (ctypes.c_int, ctypes.c_double) + for name in ['_multivariate_typical', '_multivariate_indefinite', + '_multivariate_sin']: + func = get_clib_test_routine(name, restype, *argtypes) + setattr(self, name, func) + + def test_typical(self): + # 1) Typical function with two extra arguments: + assert_quad(quad(self._multivariate_typical, 0, pi, (2, 1.8)), + 0.30614353532540296487) + + def test_indefinite(self): + # 2) Infinite integration limits --- Euler's constant + assert_quad(quad(self._multivariate_indefinite, 0, np.inf), + 0.577215664901532860606512) + + def test_threadsafety(self): + # Ensure multivariate ctypes are threadsafe + def threadsafety(y): + return y + quad(self._multivariate_sin, 0, 1)[0] + assert_quad(quad(threadsafety, 0, 1), 0.9596976941318602) + + +class TestQuad: + def test_typical(self): + # 1) Typical function with two extra arguments: + def myfunc(x, n, z): # Bessel function integrand + return cos(n*x-z*sin(x))/pi + assert_quad(quad(myfunc, 0, pi, (2, 1.8)), 0.30614353532540296487) + + def test_indefinite(self): + # 2) Infinite integration limits --- Euler's constant + def myfunc(x): # Euler's constant integrand + return -exp(-x)*log(x) + assert_quad(quad(myfunc, 0, np.inf), 0.577215664901532860606512) + + def test_singular(self): + # 3) Singular points in region of integration. + def myfunc(x): + if 0 < x < 2.5: + return sin(x) + elif 2.5 <= x <= 5.0: + return exp(-x) + else: + return 0.0 + + assert_quad(quad(myfunc, 0, 10, points=[2.5, 5.0]), + 1 - cos(2.5) + exp(-2.5) - exp(-5.0)) + + def test_sine_weighted_finite(self): + # 4) Sine weighted integral (finite limits) + def myfunc(x, a): + return exp(a*(x-1)) + + ome = 2.0**3.4 + assert_quad(quad(myfunc, 0, 1, args=20, weight='sin', wvar=ome), + (20*sin(ome)-ome*cos(ome)+ome*exp(-20))/(20**2 + ome**2)) + + def test_sine_weighted_infinite(self): + # 5) Sine weighted integral (infinite limits) + def myfunc(x, a): + return exp(-x*a) + + a = 4.0 + ome = 3.0 + assert_quad(quad(myfunc, 0, np.inf, args=a, weight='sin', wvar=ome), + ome/(a**2 + ome**2)) + + def test_cosine_weighted_infinite(self): + # 6) Cosine weighted integral (negative infinite limits) + def myfunc(x, a): + return exp(x*a) + + a = 2.5 + ome = 2.3 + assert_quad(quad(myfunc, -np.inf, 0, args=a, weight='cos', wvar=ome), + a/(a**2 + ome**2)) + + def test_algebraic_log_weight(self): + # 6) Algebraic-logarithmic weight. + def myfunc(x, a): + return 1/(1+x+2**(-a)) + + a = 1.5 + assert_quad(quad(myfunc, -1, 1, args=a, weight='alg', + wvar=(-0.5, -0.5)), + pi/sqrt((1+2**(-a))**2 - 1)) + + def test_cauchypv_weight(self): + # 7) Cauchy prinicpal value weighting w(x) = 1/(x-c) + def myfunc(x, a): + return 2.0**(-a)/((x-1)**2+4.0**(-a)) + + a = 0.4 + tabledValue = ((2.0**(-0.4)*log(1.5) - + 2.0**(-1.4)*log((4.0**(-a)+16) / (4.0**(-a)+1)) - + arctan(2.0**(a+2)) - + arctan(2.0**a)) / + (4.0**(-a) + 1)) + assert_quad(quad(myfunc, 0, 5, args=0.4, weight='cauchy', wvar=2.0), + tabledValue, error_tolerance=1.9e-8) + + def test_b_less_than_a(self): + def f(x, p, q): + return p * np.exp(-q*x) + + val_1, err_1 = quad(f, 0, np.inf, args=(2, 3)) + val_2, err_2 = quad(f, np.inf, 0, args=(2, 3)) + assert_allclose(val_1, -val_2, atol=max(err_1, err_2)) + + def test_b_less_than_a_2(self): + def f(x, s): + return np.exp(-x**2 / 2 / s) / np.sqrt(2.*s) + + val_1, err_1 = quad(f, -np.inf, np.inf, args=(2,)) + val_2, err_2 = quad(f, np.inf, -np.inf, args=(2,)) + assert_allclose(val_1, -val_2, atol=max(err_1, err_2)) + + def test_b_less_than_a_3(self): + def f(x): + return 1.0 + + val_1, err_1 = quad(f, 0, 1, weight='alg', wvar=(0, 0)) + val_2, err_2 = quad(f, 1, 0, weight='alg', wvar=(0, 0)) + assert_allclose(val_1, -val_2, atol=max(err_1, err_2)) + + def test_b_less_than_a_full_output(self): + def f(x): + return 1.0 + + res_1 = quad(f, 0, 1, weight='alg', wvar=(0, 0), full_output=True) + res_2 = quad(f, 1, 0, weight='alg', wvar=(0, 0), full_output=True) + err = max(res_1[1], res_2[1]) + assert_allclose(res_1[0], -res_2[0], atol=err) + + def test_double_integral(self): + # 8) Double Integral test + def simpfunc(y, x): # Note order of arguments. + return x+y + + a, b = 1.0, 2.0 + assert_quad(dblquad(simpfunc, a, b, lambda x: x, lambda x: 2*x), + 5/6.0 * (b**3.0-a**3.0)) + + def test_double_integral2(self): + def func(x0, x1, t0, t1): + return x0 + x1 + t0 + t1 + def g(x): + return x + def h(x): + return 2 * x + args = 1, 2 + assert_quad(dblquad(func, 1, 2, g, h, args=args),35./6 + 9*.5) + + def test_double_integral3(self): + def func(x0, x1): + return x0 + x1 + 1 + 2 + assert_quad(dblquad(func, 1, 2, 1, 2),6.) + + @pytest.mark.parametrize( + "x_lower, x_upper, y_lower, y_upper, expected", + [ + # Multiple integration of a function in n = 2 variables: f(x, y, z) + # over domain D = [-inf, 0] for all n. + (-np.inf, 0, -np.inf, 0, np.pi / 4), + # Multiple integration of a function in n = 2 variables: f(x, y, z) + # over domain D = [-inf, -1] for each n (one at a time). + (-np.inf, -1, -np.inf, 0, np.pi / 4 * erfc(1)), + (-np.inf, 0, -np.inf, -1, np.pi / 4 * erfc(1)), + # Multiple integration of a function in n = 2 variables: f(x, y, z) + # over domain D = [-inf, -1] for all n. + (-np.inf, -1, -np.inf, -1, np.pi / 4 * (erfc(1) ** 2)), + # Multiple integration of a function in n = 2 variables: f(x, y, z) + # over domain D = [-inf, 1] for each n (one at a time). + (-np.inf, 1, -np.inf, 0, np.pi / 4 * (erf(1) + 1)), + (-np.inf, 0, -np.inf, 1, np.pi / 4 * (erf(1) + 1)), + # Multiple integration of a function in n = 2 variables: f(x, y, z) + # over domain D = [-inf, 1] for all n. + (-np.inf, 1, -np.inf, 1, np.pi / 4 * ((erf(1) + 1) ** 2)), + # Multiple integration of a function in n = 2 variables: f(x, y, z) + # over domain Dx = [-inf, -1] and Dy = [-inf, 1]. + (-np.inf, -1, -np.inf, 1, np.pi / 4 * ((erf(1) + 1) * erfc(1))), + # Multiple integration of a function in n = 2 variables: f(x, y, z) + # over domain Dx = [-inf, 1] and Dy = [-inf, -1]. + (-np.inf, 1, -np.inf, -1, np.pi / 4 * ((erf(1) + 1) * erfc(1))), + # Multiple integration of a function in n = 2 variables: f(x, y, z) + # over domain D = [0, inf] for all n. + (0, np.inf, 0, np.inf, np.pi / 4), + # Multiple integration of a function in n = 2 variables: f(x, y, z) + # over domain D = [1, inf] for each n (one at a time). + (1, np.inf, 0, np.inf, np.pi / 4 * erfc(1)), + (0, np.inf, 1, np.inf, np.pi / 4 * erfc(1)), + # Multiple integration of a function in n = 2 variables: f(x, y, z) + # over domain D = [1, inf] for all n. + (1, np.inf, 1, np.inf, np.pi / 4 * (erfc(1) ** 2)), + # Multiple integration of a function in n = 2 variables: f(x, y, z) + # over domain D = [-1, inf] for each n (one at a time). + (-1, np.inf, 0, np.inf, np.pi / 4 * (erf(1) + 1)), + (0, np.inf, -1, np.inf, np.pi / 4 * (erf(1) + 1)), + # Multiple integration of a function in n = 2 variables: f(x, y, z) + # over domain D = [-1, inf] for all n. + (-1, np.inf, -1, np.inf, np.pi / 4 * ((erf(1) + 1) ** 2)), + # Multiple integration of a function in n = 2 variables: f(x, y, z) + # over domain Dx = [-1, inf] and Dy = [1, inf]. + (-1, np.inf, 1, np.inf, np.pi / 4 * ((erf(1) + 1) * erfc(1))), + # Multiple integration of a function in n = 2 variables: f(x, y, z) + # over domain Dx = [1, inf] and Dy = [-1, inf]. + (1, np.inf, -1, np.inf, np.pi / 4 * ((erf(1) + 1) * erfc(1))), + # Multiple integration of a function in n = 2 variables: f(x, y, z) + # over domain D = [-inf, inf] for all n. + (-np.inf, np.inf, -np.inf, np.inf, np.pi) + ] + ) + def test_double_integral_improper( + self, x_lower, x_upper, y_lower, y_upper, expected + ): + # The Gaussian Integral. + def f(x, y): + return np.exp(-x ** 2 - y ** 2) + + assert_quad( + dblquad(f, x_lower, x_upper, y_lower, y_upper), + expected, + error_tolerance=3e-8 + ) + + def test_triple_integral(self): + # 9) Triple Integral test + def simpfunc(z, y, x, t): # Note order of arguments. + return (x+y+z)*t + + a, b = 1.0, 2.0 + assert_quad(tplquad(simpfunc, a, b, + lambda x: x, lambda x: 2*x, + lambda x, y: x - y, lambda x, y: x + y, + (2.,)), + 2*8/3.0 * (b**4.0 - a**4.0)) + + @pytest.mark.xslow + @pytest.mark.parametrize( + "x_lower, x_upper, y_lower, y_upper, z_lower, z_upper, expected", + [ + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain D = [-inf, 0] for all n. + (-np.inf, 0, -np.inf, 0, -np.inf, 0, (np.pi ** (3 / 2)) / 8), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain D = [-inf, -1] for each n (one at a time). + (-np.inf, -1, -np.inf, 0, -np.inf, 0, + (np.pi ** (3 / 2)) / 8 * erfc(1)), + (-np.inf, 0, -np.inf, -1, -np.inf, 0, + (np.pi ** (3 / 2)) / 8 * erfc(1)), + (-np.inf, 0, -np.inf, 0, -np.inf, -1, + (np.pi ** (3 / 2)) / 8 * erfc(1)), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain D = [-inf, -1] for each n (two at a time). + (-np.inf, -1, -np.inf, -1, -np.inf, 0, + (np.pi ** (3 / 2)) / 8 * (erfc(1) ** 2)), + (-np.inf, -1, -np.inf, 0, -np.inf, -1, + (np.pi ** (3 / 2)) / 8 * (erfc(1) ** 2)), + (-np.inf, 0, -np.inf, -1, -np.inf, -1, + (np.pi ** (3 / 2)) / 8 * (erfc(1) ** 2)), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain D = [-inf, -1] for all n. + (-np.inf, -1, -np.inf, -1, -np.inf, -1, + (np.pi ** (3 / 2)) / 8 * (erfc(1) ** 3)), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain Dx = [-inf, -1] and Dy = Dz = [-inf, 1]. + (-np.inf, -1, -np.inf, 1, -np.inf, 1, + (np.pi ** (3 / 2)) / 8 * (((erf(1) + 1) ** 2) * erfc(1))), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain Dx = Dy = [-inf, -1] and Dz = [-inf, 1]. + (-np.inf, -1, -np.inf, -1, -np.inf, 1, + (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) * (erfc(1) ** 2))), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain Dx = Dz = [-inf, -1] and Dy = [-inf, 1]. + (-np.inf, -1, -np.inf, 1, -np.inf, -1, + (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) * (erfc(1) ** 2))), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain Dx = [-inf, 1] and Dy = Dz = [-inf, -1]. + (-np.inf, 1, -np.inf, -1, -np.inf, -1, + (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) * (erfc(1) ** 2))), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain Dx = Dy = [-inf, 1] and Dz = [-inf, -1]. + (-np.inf, 1, -np.inf, 1, -np.inf, -1, + (np.pi ** (3 / 2)) / 8 * (((erf(1) + 1) ** 2) * erfc(1))), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain Dx = Dz = [-inf, 1] and Dy = [-inf, -1]. + (-np.inf, 1, -np.inf, -1, -np.inf, 1, + (np.pi ** (3 / 2)) / 8 * (((erf(1) + 1) ** 2) * erfc(1))), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain D = [-inf, 1] for each n (one at a time). + (-np.inf, 1, -np.inf, 0, -np.inf, 0, + (np.pi ** (3 / 2)) / 8 * (erf(1) + 1)), + (-np.inf, 0, -np.inf, 1, -np.inf, 0, + (np.pi ** (3 / 2)) / 8 * (erf(1) + 1)), + (-np.inf, 0, -np.inf, 0, -np.inf, 1, + (np.pi ** (3 / 2)) / 8 * (erf(1) + 1)), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain D = [-inf, 1] for each n (two at a time). + (-np.inf, 1, -np.inf, 1, -np.inf, 0, + (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 2)), + (-np.inf, 1, -np.inf, 0, -np.inf, 1, + (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 2)), + (-np.inf, 0, -np.inf, 1, -np.inf, 1, + (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 2)), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain D = [-inf, 1] for all n. + (-np.inf, 1, -np.inf, 1, -np.inf, 1, + (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 3)), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain D = [0, inf] for all n. + (0, np.inf, 0, np.inf, 0, np.inf, (np.pi ** (3 / 2)) / 8), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain D = [1, inf] for each n (one at a time). + (1, np.inf, 0, np.inf, 0, np.inf, + (np.pi ** (3 / 2)) / 8 * erfc(1)), + (0, np.inf, 1, np.inf, 0, np.inf, + (np.pi ** (3 / 2)) / 8 * erfc(1)), + (0, np.inf, 0, np.inf, 1, np.inf, + (np.pi ** (3 / 2)) / 8 * erfc(1)), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain D = [1, inf] for each n (two at a time). + (1, np.inf, 1, np.inf, 0, np.inf, + (np.pi ** (3 / 2)) / 8 * (erfc(1) ** 2)), + (1, np.inf, 0, np.inf, 1, np.inf, + (np.pi ** (3 / 2)) / 8 * (erfc(1) ** 2)), + (0, np.inf, 1, np.inf, 1, np.inf, + (np.pi ** (3 / 2)) / 8 * (erfc(1) ** 2)), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain D = [1, inf] for all n. + (1, np.inf, 1, np.inf, 1, np.inf, + (np.pi ** (3 / 2)) / 8 * (erfc(1) ** 3)), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain D = [-1, inf] for each n (one at a time). + (-1, np.inf, 0, np.inf, 0, np.inf, + (np.pi ** (3 / 2)) / 8 * (erf(1) + 1)), + (0, np.inf, -1, np.inf, 0, np.inf, + (np.pi ** (3 / 2)) / 8 * (erf(1) + 1)), + (0, np.inf, 0, np.inf, -1, np.inf, + (np.pi ** (3 / 2)) / 8 * (erf(1) + 1)), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain D = [-1, inf] for each n (two at a time). + (-1, np.inf, -1, np.inf, 0, np.inf, + (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 2)), + (-1, np.inf, 0, np.inf, -1, np.inf, + (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 2)), + (0, np.inf, -1, np.inf, -1, np.inf, + (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 2)), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain D = [-1, inf] for all n. + (-1, np.inf, -1, np.inf, -1, np.inf, + (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 3)), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain Dx = [1, inf] and Dy = Dz = [-1, inf]. + (1, np.inf, -1, np.inf, -1, np.inf, + (np.pi ** (3 / 2)) / 8 * (((erf(1) + 1) ** 2) * erfc(1))), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain Dx = Dy = [1, inf] and Dz = [-1, inf]. + (1, np.inf, 1, np.inf, -1, np.inf, + (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) * (erfc(1) ** 2))), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain Dx = Dz = [1, inf] and Dy = [-1, inf]. + (1, np.inf, -1, np.inf, 1, np.inf, + (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) * (erfc(1) ** 2))), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain Dx = [-1, inf] and Dy = Dz = [1, inf]. + (-1, np.inf, 1, np.inf, 1, np.inf, + (np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) * (erfc(1) ** 2))), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain Dx = Dy = [-1, inf] and Dz = [1, inf]. + (-1, np.inf, -1, np.inf, 1, np.inf, + (np.pi ** (3 / 2)) / 8 * (((erf(1) + 1) ** 2) * erfc(1))), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain Dx = Dz = [-1, inf] and Dy = [1, inf]. + (-1, np.inf, 1, np.inf, -1, np.inf, + (np.pi ** (3 / 2)) / 8 * (((erf(1) + 1) ** 2) * erfc(1))), + # Multiple integration of a function in n = 3 variables: f(x, y, z) + # over domain D = [-inf, inf] for all n. + (-np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf, + np.pi ** (3 / 2)), + ], + ) + def test_triple_integral_improper( + self, + x_lower, + x_upper, + y_lower, + y_upper, + z_lower, + z_upper, + expected + ): + # The Gaussian Integral. + def f(x, y, z): + return np.exp(-x ** 2 - y ** 2 - z ** 2) + + assert_quad( + tplquad(f, x_lower, x_upper, y_lower, y_upper, z_lower, z_upper), + expected, + error_tolerance=6e-8 + ) + + def test_complex(self): + def tfunc(x): + return np.exp(1j*x) + + assert np.allclose( + quad(tfunc, 0, np.pi/2, complex_func=True)[0], + 1+1j) + + # We consider a divergent case in order to force quadpack + # to return an error message. The output is compared + # against what is returned by explicit integration + # of the parts. + kwargs = {'a': 0, 'b': np.inf, 'full_output': True, + 'weight': 'cos', 'wvar': 1} + res_c = quad(tfunc, complex_func=True, **kwargs) + res_r = quad(lambda x: np.real(np.exp(1j*x)), + complex_func=False, + **kwargs) + res_i = quad(lambda x: np.imag(np.exp(1j*x)), + complex_func=False, + **kwargs) + + np.testing.assert_equal(res_c[0], res_r[0] + 1j*res_i[0]) + np.testing.assert_equal(res_c[1], res_r[1] + 1j*res_i[1]) + + assert len(res_c[2]['real']) == len(res_r[2:]) == 3 + assert res_c[2]['real'][2] == res_r[4] + assert res_c[2]['real'][1] == res_r[3] + assert res_c[2]['real'][0]['lst'] == res_r[2]['lst'] + + assert len(res_c[2]['imag']) == len(res_i[2:]) == 1 + assert res_c[2]['imag'][0]['lst'] == res_i[2]['lst'] + + +class TestNQuad: + @pytest.mark.fail_slow(5) + def test_fixed_limits(self): + def func1(x0, x1, x2, x3): + val = (x0**2 + x1*x2 - x3**3 + np.sin(x0) + + (1 if (x0 - 0.2*x3 - 0.5 - 0.25*x1 > 0) else 0)) + return val + + def opts_basic(*args): + return {'points': [0.2*args[2] + 0.5 + 0.25*args[0]]} + + res = nquad(func1, [[0, 1], [-1, 1], [.13, .8], [-.15, 1]], + opts=[opts_basic, {}, {}, {}], full_output=True) + assert_quad(res[:-1], 1.5267454070738635) + assert_(res[-1]['neval'] > 0 and res[-1]['neval'] < 4e5) + + @pytest.mark.fail_slow(5) + def test_variable_limits(self): + scale = .1 + + def func2(x0, x1, x2, x3, t0, t1): + val = (x0*x1*x3**2 + np.sin(x2) + 1 + + (1 if x0 + t1*x1 - t0 > 0 else 0)) + return val + + def lim0(x1, x2, x3, t0, t1): + return [scale * (x1**2 + x2 + np.cos(x3)*t0*t1 + 1) - 1, + scale * (x1**2 + x2 + np.cos(x3)*t0*t1 + 1) + 1] + + def lim1(x2, x3, t0, t1): + return [scale * (t0*x2 + t1*x3) - 1, + scale * (t0*x2 + t1*x3) + 1] + + def lim2(x3, t0, t1): + return [scale * (x3 + t0**2*t1**3) - 1, + scale * (x3 + t0**2*t1**3) + 1] + + def lim3(t0, t1): + return [scale * (t0 + t1) - 1, scale * (t0 + t1) + 1] + + def opts0(x1, x2, x3, t0, t1): + return {'points': [t0 - t1*x1]} + + def opts1(x2, x3, t0, t1): + return {} + + def opts2(x3, t0, t1): + return {} + + def opts3(t0, t1): + return {} + + res = nquad(func2, [lim0, lim1, lim2, lim3], args=(0, 0), + opts=[opts0, opts1, opts2, opts3]) + assert_quad(res, 25.066666666666663) + + def test_square_separate_ranges_and_opts(self): + def f(y, x): + return 1.0 + + assert_quad(nquad(f, [[-1, 1], [-1, 1]], opts=[{}, {}]), 4.0) + + def test_square_aliased_ranges_and_opts(self): + def f(y, x): + return 1.0 + + r = [-1, 1] + opt = {} + assert_quad(nquad(f, [r, r], opts=[opt, opt]), 4.0) + + def test_square_separate_fn_ranges_and_opts(self): + def f(y, x): + return 1.0 + + def fn_range0(*args): + return (-1, 1) + + def fn_range1(*args): + return (-1, 1) + + def fn_opt0(*args): + return {} + + def fn_opt1(*args): + return {} + + ranges = [fn_range0, fn_range1] + opts = [fn_opt0, fn_opt1] + assert_quad(nquad(f, ranges, opts=opts), 4.0) + + def test_square_aliased_fn_ranges_and_opts(self): + def f(y, x): + return 1.0 + + def fn_range(*args): + return (-1, 1) + + def fn_opt(*args): + return {} + + ranges = [fn_range, fn_range] + opts = [fn_opt, fn_opt] + assert_quad(nquad(f, ranges, opts=opts), 4.0) + + def test_matching_quad(self): + def func(x): + return x**2 + 1 + + res, reserr = quad(func, 0, 4) + res2, reserr2 = nquad(func, ranges=[[0, 4]]) + assert_almost_equal(res, res2) + assert_almost_equal(reserr, reserr2) + + def test_matching_dblquad(self): + def func2d(x0, x1): + return x0**2 + x1**3 - x0 * x1 + 1 + + res, reserr = dblquad(func2d, -2, 2, lambda x: -3, lambda x: 3) + res2, reserr2 = nquad(func2d, [[-3, 3], (-2, 2)]) + assert_almost_equal(res, res2) + assert_almost_equal(reserr, reserr2) + + def test_matching_tplquad(self): + def func3d(x0, x1, x2, c0, c1): + return x0**2 + c0 * x1**3 - x0 * x1 + 1 + c1 * np.sin(x2) + + res = tplquad(func3d, -1, 2, lambda x: -2, lambda x: 2, + lambda x, y: -np.pi, lambda x, y: np.pi, + args=(2, 3)) + res2 = nquad(func3d, [[-np.pi, np.pi], [-2, 2], (-1, 2)], args=(2, 3)) + assert_almost_equal(res, res2) + + def test_dict_as_opts(self): + try: + nquad(lambda x, y: x * y, [[0, 1], [0, 1]], opts={'epsrel': 0.0001}) + except TypeError: + assert False + diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/tests/test_quadrature.py b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/tests/test_quadrature.py new file mode 100644 index 0000000000000000000000000000000000000000..2237a12f1ba53e343e27c6853e1d613a3641b55c --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/tests/test_quadrature.py @@ -0,0 +1,732 @@ +# mypy: disable-error-code="attr-defined" +import pytest +import numpy as np +from numpy.testing import assert_equal, assert_almost_equal, assert_allclose +from hypothesis import given +import hypothesis.strategies as st +import hypothesis.extra.numpy as hyp_num + +from scipy.integrate import (romb, newton_cotes, + cumulative_trapezoid, trapezoid, + quad, simpson, fixed_quad, + qmc_quad, cumulative_simpson) +from scipy.integrate._quadrature import _cumulative_simpson_unequal_intervals + +from scipy import stats, special, integrate +from scipy.conftest import array_api_compatible, skip_xp_invalid_arg +from scipy._lib._array_api_no_0d import xp_assert_close + +skip_xp_backends = pytest.mark.skip_xp_backends + + +class TestFixedQuad: + def test_scalar(self): + n = 4 + expected = 1/(2*n) + got, _ = fixed_quad(lambda x: x**(2*n - 1), 0, 1, n=n) + # quadrature exact for this input + assert_allclose(got, expected, rtol=1e-12) + + def test_vector(self): + n = 4 + p = np.arange(1, 2*n) + expected = 1/(p + 1) + got, _ = fixed_quad(lambda x: x**p[:, None], 0, 1, n=n) + assert_allclose(got, expected, rtol=1e-12) + + +class TestQuadrature: + def quad(self, x, a, b, args): + raise NotImplementedError + + def test_romb(self): + assert_equal(romb(np.arange(17)), 128) + + def test_romb_gh_3731(self): + # Check that romb makes maximal use of data points + x = np.arange(2**4+1) + y = np.cos(0.2*x) + val = romb(y) + val2, err = quad(lambda x: np.cos(0.2*x), x.min(), x.max()) + assert_allclose(val, val2, rtol=1e-8, atol=0) + + def test_newton_cotes(self): + """Test the first few degrees, for evenly spaced points.""" + n = 1 + wts, errcoff = newton_cotes(n, 1) + assert_equal(wts, n*np.array([0.5, 0.5])) + assert_almost_equal(errcoff, -n**3/12.0) + + n = 2 + wts, errcoff = newton_cotes(n, 1) + assert_almost_equal(wts, n*np.array([1.0, 4.0, 1.0])/6.0) + assert_almost_equal(errcoff, -n**5/2880.0) + + n = 3 + wts, errcoff = newton_cotes(n, 1) + assert_almost_equal(wts, n*np.array([1.0, 3.0, 3.0, 1.0])/8.0) + assert_almost_equal(errcoff, -n**5/6480.0) + + n = 4 + wts, errcoff = newton_cotes(n, 1) + assert_almost_equal(wts, n*np.array([7.0, 32.0, 12.0, 32.0, 7.0])/90.0) + assert_almost_equal(errcoff, -n**7/1935360.0) + + def test_newton_cotes2(self): + """Test newton_cotes with points that are not evenly spaced.""" + + x = np.array([0.0, 1.5, 2.0]) + y = x**2 + wts, errcoff = newton_cotes(x) + exact_integral = 8.0/3 + numeric_integral = np.dot(wts, y) + assert_almost_equal(numeric_integral, exact_integral) + + x = np.array([0.0, 1.4, 2.1, 3.0]) + y = x**2 + wts, errcoff = newton_cotes(x) + exact_integral = 9.0 + numeric_integral = np.dot(wts, y) + assert_almost_equal(numeric_integral, exact_integral) + + def test_simpson(self): + y = np.arange(17) + assert_equal(simpson(y), 128) + assert_equal(simpson(y, dx=0.5), 64) + assert_equal(simpson(y, x=np.linspace(0, 4, 17)), 32) + + # integral should be exactly 21 + x = np.linspace(1, 4, 4) + def f(x): + return x**2 + + assert_allclose(simpson(f(x), x=x), 21.0) + + # integral should be exactly 114 + x = np.linspace(1, 7, 4) + assert_allclose(simpson(f(x), dx=2.0), 114) + + # test multi-axis behaviour + a = np.arange(16).reshape(4, 4) + x = np.arange(64.).reshape(4, 4, 4) + y = f(x) + for i in range(3): + r = simpson(y, x=x, axis=i) + it = np.nditer(a, flags=['multi_index']) + for _ in it: + idx = list(it.multi_index) + idx.insert(i, slice(None)) + integral = x[tuple(idx)][-1]**3 / 3 - x[tuple(idx)][0]**3 / 3 + assert_allclose(r[it.multi_index], integral) + + # test when integration axis only has two points + x = np.arange(16).reshape(8, 2) + y = f(x) + r = simpson(y, x=x, axis=-1) + + integral = 0.5 * (y[:, 1] + y[:, 0]) * (x[:, 1] - x[:, 0]) + assert_allclose(r, integral) + + # odd points, test multi-axis behaviour + a = np.arange(25).reshape(5, 5) + x = np.arange(125).reshape(5, 5, 5) + y = f(x) + for i in range(3): + r = simpson(y, x=x, axis=i) + it = np.nditer(a, flags=['multi_index']) + for _ in it: + idx = list(it.multi_index) + idx.insert(i, slice(None)) + integral = x[tuple(idx)][-1]**3 / 3 - x[tuple(idx)][0]**3 / 3 + assert_allclose(r[it.multi_index], integral) + + # Tests for checking base case + x = np.array([3]) + y = np.power(x, 2) + assert_allclose(simpson(y, x=x, axis=0), 0.0) + assert_allclose(simpson(y, x=x, axis=-1), 0.0) + + x = np.array([3, 3, 3, 3]) + y = np.power(x, 2) + assert_allclose(simpson(y, x=x, axis=0), 0.0) + assert_allclose(simpson(y, x=x, axis=-1), 0.0) + + x = np.array([[1, 2, 4, 8], [1, 2, 4, 8], [1, 2, 4, 8]]) + y = np.power(x, 2) + zero_axis = [0.0, 0.0, 0.0, 0.0] + default_axis = [170 + 1/3] * 3 # 8**3 / 3 - 1/3 + assert_allclose(simpson(y, x=x, axis=0), zero_axis) + # the following should be exact + assert_allclose(simpson(y, x=x, axis=-1), default_axis) + + x = np.array([[1, 2, 4, 8], [1, 2, 4, 8], [1, 8, 16, 32]]) + y = np.power(x, 2) + zero_axis = [0.0, 136.0, 1088.0, 8704.0] + default_axis = [170 + 1/3, 170 + 1/3, 32**3 / 3 - 1/3] + assert_allclose(simpson(y, x=x, axis=0), zero_axis) + assert_allclose(simpson(y, x=x, axis=-1), default_axis) + + + @pytest.mark.parametrize('droplast', [False, True]) + def test_simpson_2d_integer_no_x(self, droplast): + # The inputs are 2d integer arrays. The results should be + # identical to the results when the inputs are floating point. + y = np.array([[2, 2, 4, 4, 8, 8, -4, 5], + [4, 4, 2, -4, 10, 22, -2, 10]]) + if droplast: + y = y[:, :-1] + result = simpson(y, axis=-1) + expected = simpson(np.array(y, dtype=np.float64), axis=-1) + assert_equal(result, expected) + + +class TestCumulative_trapezoid: + def test_1d(self): + x = np.linspace(-2, 2, num=5) + y = x + y_int = cumulative_trapezoid(y, x, initial=0) + y_expected = [0., -1.5, -2., -1.5, 0.] + assert_allclose(y_int, y_expected) + + y_int = cumulative_trapezoid(y, x, initial=None) + assert_allclose(y_int, y_expected[1:]) + + def test_y_nd_x_nd(self): + x = np.arange(3 * 2 * 4).reshape(3, 2, 4) + y = x + y_int = cumulative_trapezoid(y, x, initial=0) + y_expected = np.array([[[0., 0.5, 2., 4.5], + [0., 4.5, 10., 16.5]], + [[0., 8.5, 18., 28.5], + [0., 12.5, 26., 40.5]], + [[0., 16.5, 34., 52.5], + [0., 20.5, 42., 64.5]]]) + + assert_allclose(y_int, y_expected) + + # Try with all axes + shapes = [(2, 2, 4), (3, 1, 4), (3, 2, 3)] + for axis, shape in zip([0, 1, 2], shapes): + y_int = cumulative_trapezoid(y, x, initial=0, axis=axis) + assert_equal(y_int.shape, (3, 2, 4)) + y_int = cumulative_trapezoid(y, x, initial=None, axis=axis) + assert_equal(y_int.shape, shape) + + def test_y_nd_x_1d(self): + y = np.arange(3 * 2 * 4).reshape(3, 2, 4) + x = np.arange(4)**2 + # Try with all axes + ys_expected = ( + np.array([[[4., 5., 6., 7.], + [8., 9., 10., 11.]], + [[40., 44., 48., 52.], + [56., 60., 64., 68.]]]), + np.array([[[2., 3., 4., 5.]], + [[10., 11., 12., 13.]], + [[18., 19., 20., 21.]]]), + np.array([[[0.5, 5., 17.5], + [4.5, 21., 53.5]], + [[8.5, 37., 89.5], + [12.5, 53., 125.5]], + [[16.5, 69., 161.5], + [20.5, 85., 197.5]]])) + + for axis, y_expected in zip([0, 1, 2], ys_expected): + y_int = cumulative_trapezoid(y, x=x[:y.shape[axis]], axis=axis, + initial=None) + assert_allclose(y_int, y_expected) + + def test_x_none(self): + y = np.linspace(-2, 2, num=5) + + y_int = cumulative_trapezoid(y) + y_expected = [-1.5, -2., -1.5, 0.] + assert_allclose(y_int, y_expected) + + y_int = cumulative_trapezoid(y, initial=0) + y_expected = [0, -1.5, -2., -1.5, 0.] + assert_allclose(y_int, y_expected) + + y_int = cumulative_trapezoid(y, dx=3) + y_expected = [-4.5, -6., -4.5, 0.] + assert_allclose(y_int, y_expected) + + y_int = cumulative_trapezoid(y, dx=3, initial=0) + y_expected = [0, -4.5, -6., -4.5, 0.] + assert_allclose(y_int, y_expected) + + @pytest.mark.parametrize( + "initial", [1, 0.5] + ) + def test_initial_error(self, initial): + """If initial is not None or 0, a ValueError is raised.""" + y = np.linspace(0, 10, num=10) + with pytest.raises(ValueError, match="`initial`"): + cumulative_trapezoid(y, initial=initial) + + def test_zero_len_y(self): + with pytest.raises(ValueError, match="At least one point is required"): + cumulative_trapezoid(y=[]) + + +@array_api_compatible +class TestTrapezoid: + def test_simple(self, xp): + x = xp.arange(-10, 10, .1) + r = trapezoid(xp.exp(-.5 * x ** 2) / xp.sqrt(2 * xp.asarray(xp.pi)), dx=0.1) + # check integral of normal equals 1 + xp_assert_close(r, xp.asarray(1.0)) + + @skip_xp_backends('jax.numpy', + reasons=["JAX arrays do not support item assignment"]) + @pytest.mark.usefixtures("skip_xp_backends") + def test_ndim(self, xp): + x = xp.linspace(0, 1, 3) + y = xp.linspace(0, 2, 8) + z = xp.linspace(0, 3, 13) + + wx = xp.ones_like(x) * (x[1] - x[0]) + wx[0] /= 2 + wx[-1] /= 2 + wy = xp.ones_like(y) * (y[1] - y[0]) + wy[0] /= 2 + wy[-1] /= 2 + wz = xp.ones_like(z) * (z[1] - z[0]) + wz[0] /= 2 + wz[-1] /= 2 + + q = x[:, None, None] + y[None,:, None] + z[None, None,:] + + qx = xp.sum(q * wx[:, None, None], axis=0) + qy = xp.sum(q * wy[None, :, None], axis=1) + qz = xp.sum(q * wz[None, None, :], axis=2) + + # n-d `x` + r = trapezoid(q, x=x[:, None, None], axis=0) + xp_assert_close(r, qx) + r = trapezoid(q, x=y[None,:, None], axis=1) + xp_assert_close(r, qy) + r = trapezoid(q, x=z[None, None,:], axis=2) + xp_assert_close(r, qz) + + # 1-d `x` + r = trapezoid(q, x=x, axis=0) + xp_assert_close(r, qx) + r = trapezoid(q, x=y, axis=1) + xp_assert_close(r, qy) + r = trapezoid(q, x=z, axis=2) + xp_assert_close(r, qz) + + @skip_xp_backends('jax.numpy', + reasons=["JAX arrays do not support item assignment"]) + @pytest.mark.usefixtures("skip_xp_backends") + def test_gh21908(self, xp): + # extended testing for n-dim arrays + x = xp.reshape(xp.linspace(0, 29, 30), (3, 10)) + y = xp.reshape(xp.linspace(0, 29, 30), (3, 10)) + + out0 = xp.linspace(200, 380, 10) + xp_assert_close(trapezoid(y, x=x, axis=0), out0) + xp_assert_close(trapezoid(y, x=xp.asarray([0, 10., 20.]), axis=0), out0) + # x needs to be broadcastable against y + xp_assert_close( + trapezoid(y, x=xp.asarray([0, 10., 20.])[:, None], axis=0), + out0 + ) + with pytest.raises(Exception): + # x is not broadcastable against y + trapezoid(y, x=xp.asarray([0, 10., 20.])[None, :], axis=0) + + out1 = xp.asarray([ 40.5, 130.5, 220.5]) + xp_assert_close(trapezoid(y, x=x, axis=1), out1) + xp_assert_close( + trapezoid(y, x=xp.linspace(0, 9, 10), axis=1), + out1 + ) + + @skip_xp_invalid_arg + def test_masked(self, xp): + # Testing that masked arrays behave as if the function is 0 where + # masked + x = np.arange(5) + y = x * x + mask = x == 2 + ym = np.ma.array(y, mask=mask) + r = 13.0 # sum(0.5 * (0 + 1) * 1.0 + 0.5 * (9 + 16)) + assert_allclose(trapezoid(ym, x), r) + + xm = np.ma.array(x, mask=mask) + assert_allclose(trapezoid(ym, xm), r) + + xm = np.ma.array(x, mask=mask) + assert_allclose(trapezoid(y, xm), r) + + @skip_xp_backends(np_only=True, + reasons=['array-likes only supported for NumPy backend']) + @pytest.mark.usefixtures("skip_xp_backends") + def test_array_like(self, xp): + x = list(range(5)) + y = [t * t for t in x] + xarr = xp.asarray(x, dtype=xp.float64) + yarr = xp.asarray(y, dtype=xp.float64) + res = trapezoid(y, x) + resarr = trapezoid(yarr, xarr) + xp_assert_close(res, resarr) + + +class TestQMCQuad: + @pytest.mark.thread_unsafe + def test_input_validation(self): + message = "`func` must be callable." + with pytest.raises(TypeError, match=message): + qmc_quad("a duck", [0, 0], [1, 1]) + + message = "`func` must evaluate the integrand at points..." + with pytest.raises(ValueError, match=message): + qmc_quad(lambda: 1, [0, 0], [1, 1]) + + def func(x): + assert x.ndim == 1 + return np.sum(x) + message = "Exception encountered when attempting vectorized call..." + with pytest.warns(UserWarning, match=message): + qmc_quad(func, [0, 0], [1, 1]) + + message = "`n_points` must be an integer." + with pytest.raises(TypeError, match=message): + qmc_quad(lambda x: 1, [0, 0], [1, 1], n_points=1024.5) + + message = "`n_estimates` must be an integer." + with pytest.raises(TypeError, match=message): + qmc_quad(lambda x: 1, [0, 0], [1, 1], n_estimates=8.5) + + message = "`qrng` must be an instance of scipy.stats.qmc.QMCEngine." + with pytest.raises(TypeError, match=message): + qmc_quad(lambda x: 1, [0, 0], [1, 1], qrng="a duck") + + message = "`qrng` must be initialized with dimensionality equal to " + with pytest.raises(ValueError, match=message): + qmc_quad(lambda x: 1, [0, 0], [1, 1], qrng=stats.qmc.Sobol(1)) + + message = r"`log` must be boolean \(`True` or `False`\)." + with pytest.raises(TypeError, match=message): + qmc_quad(lambda x: 1, [0, 0], [1, 1], log=10) + + def basic_test(self, n_points=2**8, n_estimates=8, signs=None): + if signs is None: + signs = np.ones(2) + ndim = 2 + mean = np.zeros(ndim) + cov = np.eye(ndim) + + def func(x): + return stats.multivariate_normal.pdf(x.T, mean, cov) + + rng = np.random.default_rng(2879434385674690281) + qrng = stats.qmc.Sobol(ndim, seed=rng) + a = np.zeros(ndim) + b = np.ones(ndim) * signs + res = qmc_quad(func, a, b, n_points=n_points, + n_estimates=n_estimates, qrng=qrng) + ref = stats.multivariate_normal.cdf(b, mean, cov, lower_limit=a) + atol = special.stdtrit(n_estimates-1, 0.995) * res.standard_error # 99% CI + assert_allclose(res.integral, ref, atol=atol) + assert np.prod(signs)*res.integral > 0 + + rng = np.random.default_rng(2879434385674690281) + qrng = stats.qmc.Sobol(ndim, seed=rng) + logres = qmc_quad(lambda *args: np.log(func(*args)), a, b, + n_points=n_points, n_estimates=n_estimates, + log=True, qrng=qrng) + assert_allclose(np.exp(logres.integral), res.integral, rtol=1e-14) + assert np.imag(logres.integral) == (np.pi if np.prod(signs) < 0 else 0) + assert_allclose(np.exp(logres.standard_error), + res.standard_error, rtol=1e-14, atol=1e-16) + + @pytest.mark.parametrize("n_points", [2**8, 2**12]) + @pytest.mark.parametrize("n_estimates", [8, 16]) + def test_basic(self, n_points, n_estimates): + self.basic_test(n_points, n_estimates) + + @pytest.mark.parametrize("signs", [[1, 1], [-1, -1], [-1, 1], [1, -1]]) + def test_sign(self, signs): + self.basic_test(signs=signs) + + @pytest.mark.thread_unsafe + @pytest.mark.parametrize("log", [False, True]) + def test_zero(self, log): + message = "A lower limit was equal to an upper limit, so" + with pytest.warns(UserWarning, match=message): + res = qmc_quad(lambda x: 1, [0, 0], [0, 1], log=log) + assert res.integral == (-np.inf if log else 0) + assert res.standard_error == 0 + + def test_flexible_input(self): + # check that qrng is not required + # also checks that for 1d problems, a and b can be scalars + def func(x): + return stats.norm.pdf(x, scale=2) + + res = qmc_quad(func, 0, 1) + ref = stats.norm.cdf(1, scale=2) - stats.norm.cdf(0, scale=2) + assert_allclose(res.integral, ref, 1e-2) + + +def cumulative_simpson_nd_reference(y, *, x=None, dx=None, initial=None, axis=-1): + # Use cumulative_trapezoid if length of y < 3 + if y.shape[axis] < 3: + if initial is None: + return cumulative_trapezoid(y, x=x, dx=dx, axis=axis, initial=None) + else: + return initial + cumulative_trapezoid(y, x=x, dx=dx, axis=axis, initial=0) + + # Ensure that working axis is last axis + y = np.moveaxis(y, axis, -1) + x = np.moveaxis(x, axis, -1) if np.ndim(x) > 1 else x + dx = np.moveaxis(dx, axis, -1) if np.ndim(dx) > 1 else dx + initial = np.moveaxis(initial, axis, -1) if np.ndim(initial) > 1 else initial + + # If `x` is not present, create it from `dx` + n = y.shape[-1] + x = dx * np.arange(n) if dx is not None else x + # Similarly, if `initial` is not present, set it to 0 + initial_was_none = initial is None + initial = 0 if initial_was_none else initial + + # `np.apply_along_axis` accepts only one array, so concatenate arguments + x = np.broadcast_to(x, y.shape) + initial = np.broadcast_to(initial, y.shape[:-1] + (1,)) + z = np.concatenate((y, x, initial), axis=-1) + + # Use `np.apply_along_axis` to compute result + def f(z): + return cumulative_simpson(z[:n], x=z[n:2*n], initial=z[2*n:]) + res = np.apply_along_axis(f, -1, z) + + # Remove `initial` and undo axis move as needed + res = res[..., 1:] if initial_was_none else res + res = np.moveaxis(res, -1, axis) + return res + + +class TestCumulativeSimpson: + x0 = np.arange(4) + y0 = x0**2 + + @pytest.mark.parametrize('use_dx', (False, True)) + @pytest.mark.parametrize('use_initial', (False, True)) + def test_1d(self, use_dx, use_initial): + # Test for exact agreement with polynomial of highest + # possible order (3 if `dx` is constant, 2 otherwise). + rng = np.random.default_rng(82456839535679456794) + n = 10 + + # Generate random polynomials and ground truth + # integral of appropriate order + order = 3 if use_dx else 2 + dx = rng.random() + x = (np.sort(rng.random(n)) if order == 2 + else np.arange(n)*dx + rng.random()) + i = np.arange(order + 1)[:, np.newaxis] + c = rng.random(order + 1)[:, np.newaxis] + y = np.sum(c*x**i, axis=0) + Y = np.sum(c*x**(i + 1)/(i + 1), axis=0) + ref = Y if use_initial else (Y-Y[0])[1:] + + # Integrate with `cumulative_simpson` + initial = Y[0] if use_initial else None + kwarg = {'dx': dx} if use_dx else {'x': x} + res = cumulative_simpson(y, **kwarg, initial=initial) + + # Compare result against reference + if not use_dx: + assert_allclose(res, ref, rtol=2e-15) + else: + i0 = 0 if use_initial else 1 + # all terms are "close" + assert_allclose(res, ref, rtol=0.0025) + # only even-interval terms are "exact" + assert_allclose(res[i0::2], ref[i0::2], rtol=2e-15) + + @pytest.mark.parametrize('axis', np.arange(-3, 3)) + @pytest.mark.parametrize('x_ndim', (1, 3)) + @pytest.mark.parametrize('x_len', (1, 2, 7)) + @pytest.mark.parametrize('i_ndim', (None, 0, 3,)) + @pytest.mark.parametrize('dx', (None, True)) + def test_nd(self, axis, x_ndim, x_len, i_ndim, dx): + # Test behavior of `cumulative_simpson` with N-D `y` + rng = np.random.default_rng(82456839535679456794) + + # determine shapes + shape = [5, 6, x_len] + shape[axis], shape[-1] = shape[-1], shape[axis] + shape_len_1 = shape.copy() + shape_len_1[axis] = 1 + i_shape = shape_len_1 if i_ndim == 3 else () + + # initialize arguments + y = rng.random(size=shape) + x, dx = None, None + if dx: + dx = rng.random(size=shape_len_1) if x_ndim > 1 else rng.random() + else: + x = (np.sort(rng.random(size=shape), axis=axis) if x_ndim > 1 + else np.sort(rng.random(size=shape[axis]))) + initial = None if i_ndim is None else rng.random(size=i_shape) + + # compare results + res = cumulative_simpson(y, x=x, dx=dx, initial=initial, axis=axis) + ref = cumulative_simpson_nd_reference(y, x=x, dx=dx, initial=initial, axis=axis) + np.testing.assert_allclose(res, ref, rtol=1e-15) + + @pytest.mark.parametrize(('message', 'kwarg_update'), [ + ("x must be strictly increasing", dict(x=[2, 2, 3, 4])), + ("x must be strictly increasing", dict(x=[x0, [2, 2, 4, 8]], y=[y0, y0])), + ("x must be strictly increasing", dict(x=[x0, x0, x0], y=[y0, y0, y0], axis=0)), + ("At least one point is required", dict(x=[], y=[])), + ("`axis=4` is not valid for `y` with `y.ndim=1`", dict(axis=4)), + ("shape of `x` must be the same as `y` or 1-D", dict(x=np.arange(5))), + ("`initial` must either be a scalar or...", dict(initial=np.arange(5))), + ("`dx` must either be a scalar or...", dict(x=None, dx=np.arange(5))), + ]) + def test_simpson_exceptions(self, message, kwarg_update): + kwargs0 = dict(y=self.y0, x=self.x0, dx=None, initial=None, axis=-1) + with pytest.raises(ValueError, match=message): + cumulative_simpson(**dict(kwargs0, **kwarg_update)) + + def test_special_cases(self): + # Test special cases not checked elsewhere + rng = np.random.default_rng(82456839535679456794) + y = rng.random(size=10) + res = cumulative_simpson(y, dx=0) + assert_equal(res, 0) + + # Should add tests of: + # - all elements of `x` identical + # These should work as they do for `simpson` + + def _get_theoretical_diff_between_simps_and_cum_simps(self, y, x): + """`cumulative_simpson` and `simpson` can be tested against other to verify + they give consistent results. `simpson` will iteratively be called with + successively higher upper limits of integration. This function calculates + the theoretical correction required to `simpson` at even intervals to match + with `cumulative_simpson`. + """ + d = np.diff(x, axis=-1) + sub_integrals_h1 = _cumulative_simpson_unequal_intervals(y, d) + sub_integrals_h2 = _cumulative_simpson_unequal_intervals( + y[..., ::-1], d[..., ::-1] + )[..., ::-1] + + # Concatenate to build difference array + zeros_shape = (*y.shape[:-1], 1) + theoretical_difference = np.concatenate( + [ + np.zeros(zeros_shape), + (sub_integrals_h1[..., 1:] - sub_integrals_h2[..., :-1]), + np.zeros(zeros_shape), + ], + axis=-1, + ) + # Differences only expected at even intervals. Odd intervals will + # match exactly so there is no correction + theoretical_difference[..., 1::2] = 0.0 + # Note: the first interval will not match from this correction as + # `simpson` uses the trapezoidal rule + return theoretical_difference + + @pytest.mark.thread_unsafe + @pytest.mark.slow + @given( + y=hyp_num.arrays( + np.float64, + hyp_num.array_shapes(max_dims=4, min_side=3, max_side=10), + elements=st.floats(-10, 10, allow_nan=False).filter(lambda x: abs(x) > 1e-7) + ) + ) + def test_cumulative_simpson_against_simpson_with_default_dx( + self, y + ): + """Theoretically, the output of `cumulative_simpson` will be identical + to `simpson` at all even indices and in the last index. The first index + will not match as `simpson` uses the trapezoidal rule when there are only two + data points. Odd indices after the first index are shown to match with + a mathematically-derived correction.""" + def simpson_reference(y): + return np.stack( + [simpson(y[..., :i], dx=1.0) for i in range(2, y.shape[-1]+1)], axis=-1, + ) + + res = cumulative_simpson(y, dx=1.0) + ref = simpson_reference(y) + theoretical_difference = self._get_theoretical_diff_between_simps_and_cum_simps( + y, x=np.arange(y.shape[-1]) + ) + np.testing.assert_allclose( + res[..., 1:], ref[..., 1:] + theoretical_difference[..., 1:] + ) + + @pytest.mark.thread_unsafe + @pytest.mark.slow + @given( + y=hyp_num.arrays( + np.float64, + hyp_num.array_shapes(max_dims=4, min_side=3, max_side=10), + elements=st.floats(-10, 10, allow_nan=False).filter(lambda x: abs(x) > 1e-7) + ) + ) + def test_cumulative_simpson_against_simpson( + self, y + ): + """Theoretically, the output of `cumulative_simpson` will be identical + to `simpson` at all even indices and in the last index. The first index + will not match as `simpson` uses the trapezoidal rule when there are only two + data points. Odd indices after the first index are shown to match with + a mathematically-derived correction.""" + interval = 10/(y.shape[-1] - 1) + x = np.linspace(0, 10, num=y.shape[-1]) + x[1:] = x[1:] + 0.2*interval*np.random.uniform(-1, 1, len(x) - 1) + + def simpson_reference(y, x): + return np.stack( + [simpson(y[..., :i], x=x[..., :i]) for i in range(2, y.shape[-1]+1)], + axis=-1, + ) + + res = cumulative_simpson(y, x=x) + ref = simpson_reference(y, x) + theoretical_difference = self._get_theoretical_diff_between_simps_and_cum_simps( + y, x + ) + np.testing.assert_allclose( + res[..., 1:], ref[..., 1:] + theoretical_difference[..., 1:] + ) + +class TestLebedev: + def test_input_validation(self): + # only certain rules are available + message = "Order n=-1 not available..." + with pytest.raises(NotImplementedError, match=message): + integrate.lebedev_rule(-1) + + def test_quadrature(self): + # Test points/weights to integrate an example function + + def f(x): + return np.exp(x[0]) + + x, w = integrate.lebedev_rule(15) + res = w @ f(x) + ref = 14.7680137457653 # lebedev_rule reference [3] + assert_allclose(res, ref, rtol=1e-14) + assert_allclose(np.sum(w), 4 * np.pi) + + @pytest.mark.parametrize('order', list(range(3, 32, 2)) + list(range(35, 132, 6))) + def test_properties(self, order): + x, w = integrate.lebedev_rule(order) + # dispersion should be maximal; no clear spherical mean + with np.errstate(divide='ignore', invalid='ignore'): + res = stats.directional_stats(x.T, axis=0) + assert_allclose(res.mean_resultant_length, 0, atol=1e-15) + # weights should sum to 4*pi (surface area of unit sphere) + assert_allclose(np.sum(w), 4*np.pi) diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/tests/test_tanhsinh.py b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/tests/test_tanhsinh.py new file mode 100644 index 0000000000000000000000000000000000000000..e3c05990457e1e9859bf44808dc029061eab4dd7 --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/tests/test_tanhsinh.py @@ -0,0 +1,1153 @@ +# mypy: disable-error-code="attr-defined" +import os +import pytest +import math + +import numpy as np +from numpy.testing import assert_allclose + +from scipy.conftest import array_api_compatible +import scipy._lib._elementwise_iterative_method as eim +from scipy._lib._array_api_no_0d import xp_assert_close, xp_assert_equal +from scipy._lib._array_api import array_namespace, xp_size, xp_ravel, xp_copy, is_numpy +from scipy import special, stats +from scipy.integrate import quad_vec, nsum, tanhsinh as _tanhsinh +from scipy.integrate._tanhsinh import _pair_cache +from scipy.stats._discrete_distns import _gen_harmonic_gt1 + + +def norm_pdf(x, xp=None): + xp = array_namespace(x) if xp is None else xp + return 1/(2*xp.pi)**0.5 * xp.exp(-x**2/2) + +def norm_logpdf(x, xp=None): + xp = array_namespace(x) if xp is None else xp + return -0.5*math.log(2*xp.pi) - x**2/2 + + +def _vectorize(xp): + # xp-compatible version of np.vectorize + # assumes arguments are all arrays of the same shape + def decorator(f): + def wrapped(*arg_arrays): + shape = arg_arrays[0].shape + arg_arrays = [xp_ravel(arg_array) for arg_array in arg_arrays] + res = [] + for i in range(math.prod(shape)): + arg_scalars = [arg_array[i] for arg_array in arg_arrays] + res.append(f(*arg_scalars)) + return res + + return wrapped + + return decorator + + +@array_api_compatible +@pytest.mark.usefixtures("skip_xp_backends") +@pytest.mark.skip_xp_backends( + 'array_api_strict', reason='Currently uses fancy indexing assignment.' +) +@pytest.mark.skip_xp_backends( + 'jax.numpy', reason='JAX arrays do not support item assignment.' +) +class TestTanhSinh: + + # Test problems from [1] Section 6 + def f1(self, t): + return t * np.log(1 + t) + + f1.ref = 0.25 + f1.b = 1 + + def f2(self, t): + return t ** 2 * np.arctan(t) + + f2.ref = (np.pi - 2 + 2 * np.log(2)) / 12 + f2.b = 1 + + def f3(self, t): + return np.exp(t) * np.cos(t) + + f3.ref = (np.exp(np.pi / 2) - 1) / 2 + f3.b = np.pi / 2 + + def f4(self, t): + a = np.sqrt(2 + t ** 2) + return np.arctan(a) / ((1 + t ** 2) * a) + + f4.ref = 5 * np.pi ** 2 / 96 + f4.b = 1 + + def f5(self, t): + return np.sqrt(t) * np.log(t) + + f5.ref = -4 / 9 + f5.b = 1 + + def f6(self, t): + return np.sqrt(1 - t ** 2) + + f6.ref = np.pi / 4 + f6.b = 1 + + def f7(self, t): + return np.sqrt(t) / np.sqrt(1 - t ** 2) + + f7.ref = 2 * np.sqrt(np.pi) * special.gamma(3 / 4) / special.gamma(1 / 4) + f7.b = 1 + + def f8(self, t): + return np.log(t) ** 2 + + f8.ref = 2 + f8.b = 1 + + def f9(self, t): + return np.log(np.cos(t)) + + f9.ref = -np.pi * np.log(2) / 2 + f9.b = np.pi / 2 + + def f10(self, t): + return np.sqrt(np.tan(t)) + + f10.ref = np.pi * np.sqrt(2) / 2 + f10.b = np.pi / 2 + + def f11(self, t): + return 1 / (1 + t ** 2) + + f11.ref = np.pi / 2 + f11.b = np.inf + + def f12(self, t): + return np.exp(-t) / np.sqrt(t) + + f12.ref = np.sqrt(np.pi) + f12.b = np.inf + + def f13(self, t): + return np.exp(-t ** 2 / 2) + + f13.ref = np.sqrt(np.pi / 2) + f13.b = np.inf + + def f14(self, t): + return np.exp(-t) * np.cos(t) + + f14.ref = 0.5 + f14.b = np.inf + + def f15(self, t): + return np.sin(t) / t + + f15.ref = np.pi / 2 + f15.b = np.inf + + def error(self, res, ref, log=False, xp=None): + xp = array_namespace(res, ref) if xp is None else xp + err = abs(res - ref) + + if not log: + return err + + with np.errstate(divide='ignore'): + return xp.log10(err) + + def test_input_validation(self, xp): + f = self.f1 + + zero = xp.asarray(0) + f_b = xp.asarray(f.b) + + message = '`f` must be callable.' + with pytest.raises(ValueError, match=message): + _tanhsinh(42, zero, f_b) + + message = '...must be True or False.' + with pytest.raises(ValueError, match=message): + _tanhsinh(f, zero, f_b, log=2) + + message = '...must be real numbers.' + with pytest.raises(ValueError, match=message): + _tanhsinh(f, xp.asarray(1+1j), f_b) + with pytest.raises(ValueError, match=message): + _tanhsinh(f, zero, f_b, atol='ekki') + with pytest.raises(ValueError, match=message): + _tanhsinh(f, zero, f_b, rtol=pytest) + + message = '...must be non-negative and finite.' + with pytest.raises(ValueError, match=message): + _tanhsinh(f, zero, f_b, rtol=-1) + with pytest.raises(ValueError, match=message): + _tanhsinh(f, zero, f_b, atol=xp.inf) + + message = '...may not be positive infinity.' + with pytest.raises(ValueError, match=message): + _tanhsinh(f, zero, f_b, rtol=xp.inf, log=True) + with pytest.raises(ValueError, match=message): + _tanhsinh(f, zero, f_b, atol=xp.inf, log=True) + + message = '...must be integers.' + with pytest.raises(ValueError, match=message): + _tanhsinh(f, zero, f_b, maxlevel=object()) + # with pytest.raises(ValueError, match=message): # unused for now + # _tanhsinh(f, zero, f_b, maxfun=1+1j) + with pytest.raises(ValueError, match=message): + _tanhsinh(f, zero, f_b, minlevel="migratory coconut") + + message = '...must be non-negative.' + with pytest.raises(ValueError, match=message): + _tanhsinh(f, zero, f_b, maxlevel=-1) + # with pytest.raises(ValueError, match=message): # unused for now + # _tanhsinh(f, zero, f_b, maxfun=-1) + with pytest.raises(ValueError, match=message): + _tanhsinh(f, zero, f_b, minlevel=-1) + + message = '...must be True or False.' + with pytest.raises(ValueError, match=message): + _tanhsinh(f, zero, f_b, preserve_shape=2) + + message = '...must be callable.' + with pytest.raises(ValueError, match=message): + _tanhsinh(f, zero, f_b, callback='elderberry') + + @pytest.mark.parametrize("limits, ref", [ + [(0, math.inf), 0.5], # b infinite + [(-math.inf, 0), 0.5], # a infinite + [(-math.inf, math.inf), 1.], # a and b infinite + [(math.inf, -math.inf), -1.], # flipped limits + [(1, -1), stats.norm.cdf(-1.) - stats.norm.cdf(1.)], # flipped limits + ]) + def test_integral_transforms(self, limits, ref, xp): + # Check that the integral transforms are behaving for both normal and + # log integration + limits = [xp.asarray(limit) for limit in limits] + dtype = xp.asarray(float(limits[0])).dtype + ref = xp.asarray(ref, dtype=dtype) + + res = _tanhsinh(norm_pdf, *limits) + xp_assert_close(res.integral, ref) + + logres = _tanhsinh(norm_logpdf, *limits, log=True) + xp_assert_close(xp.exp(logres.integral), ref, check_dtype=False) + # Transformation should not make the result complex unnecessarily + xp_test = array_namespace(*limits) # we need xp.isdtype + assert (xp_test.isdtype(logres.integral.dtype, "real floating") if ref > 0 + else xp_test.isdtype(logres.integral.dtype, "complex floating")) + + xp_assert_close(xp.exp(logres.error), res.error, atol=1e-16, check_dtype=False) + + # 15 skipped intentionally; it's very difficult numerically + @pytest.mark.skip_xp_backends(np_only=True, + reason='Cumbersome to convert everything.') + @pytest.mark.parametrize('f_number', range(1, 15)) + def test_basic(self, f_number, xp): + f = getattr(self, f"f{f_number}") + rtol = 2e-8 + res = _tanhsinh(f, 0, f.b, rtol=rtol) + assert_allclose(res.integral, f.ref, rtol=rtol) + if f_number not in {14}: # mildly underestimates error here + true_error = abs(self.error(res.integral, f.ref)/res.integral) + assert true_error < res.error + + if f_number in {7, 10, 12}: # succeeds, but doesn't know it + return + + assert res.success + assert res.status == 0 + + @pytest.mark.skip_xp_backends(np_only=True, + reason="Distributions aren't xp-compatible.") + @pytest.mark.parametrize('ref', (0.5, [0.4, 0.6])) + @pytest.mark.parametrize('case', stats._distr_params.distcont) + def test_accuracy(self, ref, case, xp): + distname, params = case + if distname in {'dgamma', 'dweibull', 'laplace', 'kstwo'}: + # should split up interval at first-derivative discontinuity + pytest.skip('tanh-sinh is not great for non-smooth integrands') + if (distname in {'studentized_range', 'levy_stable'} + and not int(os.getenv('SCIPY_XSLOW', 0))): + pytest.skip('This case passes, but it is too slow.') + dist = getattr(stats, distname)(*params) + x = dist.interval(ref) + res = _tanhsinh(dist.pdf, *x) + assert_allclose(res.integral, ref) + + @pytest.mark.parametrize('shape', [tuple(), (12,), (3, 4), (3, 2, 2)]) + def test_vectorization(self, shape, xp): + # Test for correct functionality, output shapes, and dtypes for various + # input shapes. + rng = np.random.default_rng(82456839535679456794) + a = xp.asarray(rng.random(shape)) + b = xp.asarray(rng.random(shape)) + p = xp.asarray(rng.random(shape)) + n = math.prod(shape) + + def f(x, p): + f.ncall += 1 + f.feval += 1 if (xp_size(x) == n or x.ndim <= 1) else x.shape[-1] + return x**p + f.ncall = 0 + f.feval = 0 + + @_vectorize(xp) + def _tanhsinh_single(a, b, p): + return _tanhsinh(lambda x: x**p, a, b) + + res = _tanhsinh(f, a, b, args=(p,)) + refs = _tanhsinh_single(a, b, p) + + xp_test = array_namespace(a) # need xp.stack, isdtype + attrs = ['integral', 'error', 'success', 'status', 'nfev', 'maxlevel'] + for attr in attrs: + ref_attr = xp_test.stack([getattr(ref, attr) for ref in refs]) + res_attr = xp_ravel(getattr(res, attr)) + xp_assert_close(res_attr, ref_attr, rtol=1e-15) + assert getattr(res, attr).shape == shape + + assert xp_test.isdtype(res.success.dtype, 'bool') + assert xp_test.isdtype(res.status.dtype, 'integral') + assert xp_test.isdtype(res.nfev.dtype, 'integral') + assert xp_test.isdtype(res.maxlevel.dtype, 'integral') + assert xp.max(res.nfev) == f.feval + # maxlevel = 2 -> 3 function calls (2 initialization, 1 work) + assert xp.max(res.maxlevel) >= 2 + assert xp.max(res.maxlevel) == f.ncall + + def test_flags(self, xp): + # Test cases that should produce different status flags; show that all + # can be produced simultaneously. + def f(xs, js): + f.nit += 1 + funcs = [lambda x: xp.exp(-x**2), # converges + lambda x: xp.exp(x), # reaches maxiter due to order=2 + lambda x: xp.full_like(x, xp.nan)] # stops due to NaN + res = [] + for i in range(xp_size(js)): + x = xs[i, ...] + j = int(xp_ravel(js)[i]) + res.append(funcs[j](x)) + return xp.stack(res) + f.nit = 0 + + args = (xp.arange(3, dtype=xp.int64),) + a = xp.asarray([xp.inf]*3) + b = xp.asarray([-xp.inf] * 3) + res = _tanhsinh(f, a, b, maxlevel=5, args=args) + ref_flags = xp.asarray([0, -2, -3], dtype=xp.int32) + xp_assert_equal(res.status, ref_flags) + + def test_flags_preserve_shape(self, xp): + # Same test as above but using `preserve_shape` option to simplify. + def f(x): + res = [xp.exp(-x[0]**2), # converges + xp.exp(x[1]), # reaches maxiter due to order=2 + xp.full_like(x[2], xp.nan)] # stops due to NaN + return xp.stack(res) + + a = xp.asarray([xp.inf] * 3) + b = xp.asarray([-xp.inf] * 3) + res = _tanhsinh(f, a, b, maxlevel=5, preserve_shape=True) + ref_flags = xp.asarray([0, -2, -3], dtype=xp.int32) + xp_assert_equal(res.status, ref_flags) + + def test_preserve_shape(self, xp): + # Test `preserve_shape` option + def f(x, xp): + return xp.stack([xp.stack([x, xp.sin(10 * x)]), + xp.stack([xp.cos(30 * x), x * xp.sin(100 * x)])]) + + ref = quad_vec(lambda x: f(x, np), 0, 1) + res = _tanhsinh(lambda x: f(x, xp), xp.asarray(0), xp.asarray(1), + preserve_shape=True) + dtype = xp.asarray(0.).dtype + xp_assert_close(res.integral, xp.asarray(ref[0], dtype=dtype)) + + def test_convergence(self, xp): + # demonstrate that number of accurate digits doubles each iteration + dtype = xp.float64 # this only works with good precision + def f(t): + return t * xp.log(1 + t) + ref = xp.asarray(0.25, dtype=dtype) + a, b = xp.asarray(0., dtype=dtype), xp.asarray(1., dtype=dtype) + + last_logerr = 0 + for i in range(4): + res = _tanhsinh(f, a, b, minlevel=0, maxlevel=i) + logerr = self.error(res.integral, ref, log=True, xp=xp) + assert (logerr < last_logerr * 2 or logerr < -15.5) + last_logerr = logerr + + def test_options_and_result_attributes(self, xp): + # demonstrate that options are behaving as advertised and status + # messages are as intended + xp_test = array_namespace(xp.asarray(1.)) # need xp.atan + + def f(x): + f.calls += 1 + f.feval += xp_size(xp.asarray(x)) + return x**2 * xp_test.atan(x) + + f.ref = xp.asarray((math.pi - 2 + 2 * math.log(2)) / 12, dtype=xp.float64) + + default_rtol = 1e-12 + default_atol = f.ref * default_rtol # effective default absolute tol + + # Keep things simpler by leaving tolerances fixed rather than + # having to make them dtype-dependent + a = xp.asarray(0., dtype=xp.float64) + b = xp.asarray(1., dtype=xp.float64) + + # Test default options + f.feval, f.calls = 0, 0 + ref = _tanhsinh(f, a, b) + assert self.error(ref.integral, f.ref) < ref.error < default_atol + assert ref.nfev == f.feval + ref.calls = f.calls # reference number of function calls + assert ref.success + assert ref.status == 0 + + # Test `maxlevel` equal to required max level + # We should get all the same results + f.feval, f.calls = 0, 0 + maxlevel = int(ref.maxlevel) + res = _tanhsinh(f, a, b, maxlevel=maxlevel) + res.calls = f.calls + assert res == ref + + # Now reduce the maximum level. We won't meet tolerances. + f.feval, f.calls = 0, 0 + maxlevel -= 1 + assert maxlevel >= 2 # can't compare errors otherwise + res = _tanhsinh(f, a, b, maxlevel=maxlevel) + assert self.error(res.integral, f.ref) < res.error > default_atol + assert res.nfev == f.feval < ref.nfev + assert f.calls == ref.calls - 1 + assert not res.success + assert res.status == eim._ECONVERR + + # `maxfun` is currently not enforced + + # # Test `maxfun` equal to required number of function evaluations + # # We should get all the same results + # f.feval, f.calls = 0, 0 + # maxfun = ref.nfev + # res = _tanhsinh(f, 0, f.b, maxfun = maxfun) + # assert res == ref + # + # # Now reduce `maxfun`. We won't meet tolerances. + # f.feval, f.calls = 0, 0 + # maxfun -= 1 + # res = _tanhsinh(f, 0, f.b, maxfun=maxfun) + # assert self.error(res.integral, f.ref) < res.error > default_atol + # assert res.nfev == f.feval < ref.nfev + # assert f.calls == ref.calls - 1 + # assert not res.success + # assert res.status == 2 + + # Take this result to be the new reference + ref = res + ref.calls = f.calls + + # Test `atol` + f.feval, f.calls = 0, 0 + # With this tolerance, we should get the exact same result as ref + atol = np.nextafter(float(ref.error), np.inf) + res = _tanhsinh(f, a, b, rtol=0, atol=atol) + assert res.integral == ref.integral + assert res.error == ref.error + assert res.nfev == f.feval == ref.nfev + assert f.calls == ref.calls + # Except the result is considered to be successful + assert res.success + assert res.status == 0 + + f.feval, f.calls = 0, 0 + # With a tighter tolerance, we should get a more accurate result + atol = np.nextafter(float(ref.error), -np.inf) + res = _tanhsinh(f, a, b, rtol=0, atol=atol) + assert self.error(res.integral, f.ref) < res.error < atol + assert res.nfev == f.feval > ref.nfev + assert f.calls > ref.calls + assert res.success + assert res.status == 0 + + # Test `rtol` + f.feval, f.calls = 0, 0 + # With this tolerance, we should get the exact same result as ref + rtol = np.nextafter(float(ref.error/ref.integral), np.inf) + res = _tanhsinh(f, a, b, rtol=rtol) + assert res.integral == ref.integral + assert res.error == ref.error + assert res.nfev == f.feval == ref.nfev + assert f.calls == ref.calls + # Except the result is considered to be successful + assert res.success + assert res.status == 0 + + f.feval, f.calls = 0, 0 + # With a tighter tolerance, we should get a more accurate result + rtol = np.nextafter(float(ref.error/ref.integral), -np.inf) + res = _tanhsinh(f, a, b, rtol=rtol) + assert self.error(res.integral, f.ref)/f.ref < res.error/res.integral < rtol + assert res.nfev == f.feval > ref.nfev + assert f.calls > ref.calls + assert res.success + assert res.status == 0 + + @pytest.mark.skip_xp_backends('torch', reason= + 'https://github.com/scipy/scipy/pull/21149#issuecomment-2330477359', + ) + @pytest.mark.parametrize('rtol', [1e-4, 1e-14]) + def test_log(self, rtol, xp): + # Test equivalence of log-integration and regular integration + test_tols = dict(atol=1e-18, rtol=1e-15) + + # Positive integrand (real log-integrand) + a = xp.asarray(-1., dtype=xp.float64) + b = xp.asarray(2., dtype=xp.float64) + res = _tanhsinh(norm_logpdf, a, b, log=True, rtol=math.log(rtol)) + ref = _tanhsinh(norm_pdf, a, b, rtol=rtol) + xp_assert_close(xp.exp(res.integral), ref.integral, **test_tols) + xp_assert_close(xp.exp(res.error), ref.error, **test_tols) + assert res.nfev == ref.nfev + + # Real integrand (complex log-integrand) + def f(x): + return -norm_logpdf(x)*norm_pdf(x) + + def logf(x): + return xp.log(norm_logpdf(x) + 0j) + norm_logpdf(x) + xp.pi * 1j + + a = xp.asarray(-xp.inf, dtype=xp.float64) + b = xp.asarray(xp.inf, dtype=xp.float64) + res = _tanhsinh(logf, a, b, log=True) + ref = _tanhsinh(f, a, b) + # In gh-19173, we saw `invalid` warnings on one CI platform. + # Silencing `all` because I can't reproduce locally and don't want + # to risk the need to run CI again. + with np.errstate(all='ignore'): + xp_assert_close(xp.exp(res.integral), ref.integral, **test_tols, + check_dtype=False) + xp_assert_close(xp.exp(res.error), ref.error, **test_tols, + check_dtype=False) + assert res.nfev == ref.nfev + + def test_complex(self, xp): + # Test integration of complex integrand + # Finite limits + def f(x): + return xp.exp(1j * x) + + a, b = xp.asarray(0.), xp.asarray(xp.pi/4) + res = _tanhsinh(f, a, b) + ref = math.sqrt(2)/2 + (1-math.sqrt(2)/2)*1j + xp_assert_close(res.integral, xp.asarray(ref)) + + # Infinite limits + def f(x): + return norm_pdf(x) + 1j/2*norm_pdf(x/2) + + a, b = xp.asarray(xp.inf), xp.asarray(-xp.inf) + res = _tanhsinh(f, a, b) + xp_assert_close(res.integral, xp.asarray(-(1+1j))) + + @pytest.mark.parametrize("maxlevel", range(4)) + def test_minlevel(self, maxlevel, xp): + # Verify that minlevel does not change the values at which the + # integrand is evaluated or the integral/error estimates, only the + # number of function calls + + # need `xp.concat`, `xp.atan`, and `xp.sort` + xp_test = array_namespace(xp.asarray(1.)) + + def f(x): + f.calls += 1 + f.feval += xp_size(xp.asarray(x)) + f.x = xp_test.concat((f.x, xp_ravel(x))) + return x**2 * xp_test.atan(x) + + f.feval, f.calls, f.x = 0, 0, xp.asarray([]) + + a = xp.asarray(0, dtype=xp.float64) + b = xp.asarray(1, dtype=xp.float64) + ref = _tanhsinh(f, a, b, minlevel=0, maxlevel=maxlevel) + ref_x = xp_test.sort(f.x) + + for minlevel in range(0, maxlevel + 1): + f.feval, f.calls, f.x = 0, 0, xp.asarray([]) + options = dict(minlevel=minlevel, maxlevel=maxlevel) + res = _tanhsinh(f, a, b, **options) + # Should be very close; all that has changed is the order of values + xp_assert_close(res.integral, ref.integral, rtol=4e-16) + # Difference in absolute errors << magnitude of integral + xp_assert_close(res.error, ref.error, atol=4e-16 * ref.integral) + assert res.nfev == f.feval == f.x.shape[0] + assert f.calls == maxlevel - minlevel + 1 + 1 # 1 validation call + assert res.status == ref.status + xp_assert_equal(ref_x, xp_test.sort(f.x)) + + def test_improper_integrals(self, xp): + # Test handling of infinite limits of integration (mixed with finite limits) + def f(x): + x[xp.isinf(x)] = xp.nan + return xp.exp(-x**2) + a = xp.asarray([-xp.inf, 0, -xp.inf, xp.inf, -20, -xp.inf, -20]) + b = xp.asarray([xp.inf, xp.inf, 0, -xp.inf, 20, 20, xp.inf]) + ref = math.sqrt(math.pi) + ref = xp.asarray([ref, ref/2, ref/2, -ref, ref, ref, ref]) + res = _tanhsinh(f, a, b) + xp_assert_close(res.integral, ref) + + @pytest.mark.parametrize("limits", ((0, 3), ([-math.inf, 0], [3, 3]))) + @pytest.mark.parametrize("dtype", ('float32', 'float64')) + def test_dtype(self, limits, dtype, xp): + # Test that dtypes are preserved + dtype = getattr(xp, dtype) + a, b = xp.asarray(limits, dtype=dtype) + + def f(x): + assert x.dtype == dtype + return xp.exp(x) + + rtol = 1e-12 if dtype == xp.float64 else 1e-5 + res = _tanhsinh(f, a, b, rtol=rtol) + assert res.integral.dtype == dtype + assert res.error.dtype == dtype + assert xp.all(res.success) + xp_assert_close(res.integral, xp.exp(b)-xp.exp(a)) + + def test_maxiter_callback(self, xp): + # Test behavior of `maxiter` parameter and `callback` interface + a, b = xp.asarray(-xp.inf), xp.asarray(xp.inf) + def f(x): + return xp.exp(-x*x) + + minlevel, maxlevel = 0, 2 + maxiter = maxlevel - minlevel + 1 + kwargs = dict(minlevel=minlevel, maxlevel=maxlevel, rtol=1e-15) + res = _tanhsinh(f, a, b, **kwargs) + assert not res.success + assert res.maxlevel == maxlevel + + def callback(res): + callback.iter += 1 + callback.res = res + assert hasattr(res, 'integral') + assert res.status == 1 + if callback.iter == maxiter: + raise StopIteration + callback.iter = -1 # callback called once before first iteration + callback.res = None + + del kwargs['maxlevel'] + res2 = _tanhsinh(f, a, b, **kwargs, callback=callback) + # terminating with callback is identical to terminating due to maxiter + # (except for `status`) + for key in res.keys(): + if key == 'status': + assert res[key] == -2 + assert res2[key] == -4 + else: + assert res2[key] == callback.res[key] == res[key] + + def test_jumpstart(self, xp): + # The intermediate results at each level i should be the same as the + # final results when jumpstarting at level i; i.e. minlevel=maxlevel=i + a = xp.asarray(-xp.inf, dtype=xp.float64) + b = xp.asarray(xp.inf, dtype=xp.float64) + + def f(x): + return xp.exp(-x*x) + + def callback(res): + callback.integrals.append(xp_copy(res.integral)[()]) + callback.errors.append(xp_copy(res.error)[()]) + callback.integrals = [] + callback.errors = [] + + maxlevel = 4 + _tanhsinh(f, a, b, minlevel=0, maxlevel=maxlevel, callback=callback) + + for i in range(maxlevel + 1): + res = _tanhsinh(f, a, b, minlevel=i, maxlevel=i) + xp_assert_close(callback.integrals[1+i], res.integral, rtol=1e-15) + xp_assert_close(callback.errors[1+i], res.error, rtol=1e-15, atol=1e-16) + + def test_special_cases(self, xp): + # Test edge cases and other special cases + a, b = xp.asarray(0), xp.asarray(1) + xp_test = array_namespace(a, b) # need `xp.isdtype` + + def f(x): + assert xp_test.isdtype(x.dtype, "real floating") + return x + + res = _tanhsinh(f, a, b) + assert res.success + xp_assert_close(res.integral, xp.asarray(0.5)) + + # Test levels 0 and 1; error is NaN + res = _tanhsinh(f, a, b, maxlevel=0) + assert res.integral > 0 + xp_assert_equal(res.error, xp.asarray(xp.nan)) + res = _tanhsinh(f, a, b, maxlevel=1) + assert res.integral > 0 + xp_assert_equal(res.error, xp.asarray(xp.nan)) + + # Test equal left and right integration limits + res = _tanhsinh(f, b, b) + assert res.success + assert res.maxlevel == -1 + xp_assert_close(res.integral, xp.asarray(0.)) + + # Test scalar `args` (not in tuple) + def f(x, c): + return x**c + + res = _tanhsinh(f, a, b, args=29) + xp_assert_close(res.integral, xp.asarray(1/30)) + + # Test NaNs + a = xp.asarray([xp.nan, 0, 0, 0]) + b = xp.asarray([1, xp.nan, 1, 1]) + c = xp.asarray([1, 1, xp.nan, 1]) + res = _tanhsinh(f, a, b, args=(c,)) + xp_assert_close(res.integral, xp.asarray([xp.nan, xp.nan, xp.nan, 0.5])) + xp_assert_equal(res.error[:3], xp.full((3,), xp.nan)) + xp_assert_equal(res.status, xp.asarray([-3, -3, -3, 0], dtype=xp.int32)) + xp_assert_equal(res.success, xp.asarray([False, False, False, True])) + xp_assert_equal(res.nfev[:3], xp.full((3,), 1, dtype=xp.int32)) + + # Test complex integral followed by real integral + # Previously, h0 was of the result dtype. If the `dtype` were complex, + # this could lead to complex cached abscissae/weights. If these get + # cast to real dtype for a subsequent real integral, we would get a + # ComplexWarning. Check that this is avoided. + _pair_cache.xjc = xp.empty(0) + _pair_cache.wj = xp.empty(0) + _pair_cache.indices = [0] + _pair_cache.h0 = None + a, b = xp.asarray(0), xp.asarray(1) + res = _tanhsinh(lambda x: xp.asarray(x*1j), a, b) + xp_assert_close(res.integral, xp.asarray(0.5*1j)) + res = _tanhsinh(lambda x: x, a, b) + xp_assert_close(res.integral, xp.asarray(0.5)) + + # Test zero-size + shape = (0, 3) + res = _tanhsinh(lambda x: x, xp.asarray(0), xp.zeros(shape)) + attrs = ['integral', 'error', 'success', 'status', 'nfev', 'maxlevel'] + for attr in attrs: + assert res[attr].shape == shape + + @pytest.mark.skip_xp_backends(np_only=True) + def test_compress_nodes_weights_gh21496(self, xp): + # See discussion in: + # https://github.com/scipy/scipy/pull/21496#discussion_r1878681049 + # This would cause "ValueError: attempt to get argmax of an empty sequence" + # Check that this has been resolved. + x = np.full(65, 3) + x[-1] = 1000 + _tanhsinh(np.sin, 1, x) + + +@array_api_compatible +@pytest.mark.usefixtures("skip_xp_backends") +@pytest.mark.skip_xp_backends('array_api_strict', reason='No fancy indexing.') +@pytest.mark.skip_xp_backends('jax.numpy', reason='No mutation.') +class TestNSum: + rng = np.random.default_rng(5895448232066142650) + p = rng.uniform(1, 10, size=10).tolist() + + def f1(self, k): + # Integers are never passed to `f1`; if they were, we'd get + # integer to negative integer power error + return k**(-2) + + f1.ref = np.pi**2/6 + f1.a = 1 + f1.b = np.inf + f1.args = tuple() + + def f2(self, k, p): + return 1 / k**p + + f2.ref = special.zeta(p, 1) + f2.a = 1. + f2.b = np.inf + f2.args = (p,) + + def f3(self, k, p): + return 1 / k**p + + f3.a = 1 + f3.b = rng.integers(5, 15, size=(3, 1)) + f3.ref = _gen_harmonic_gt1(f3.b, p) + f3.args = (p,) + + def test_input_validation(self, xp): + f = self.f1 + a, b = xp.asarray(f.a), xp.asarray(f.b) + + message = '`f` must be callable.' + with pytest.raises(ValueError, match=message): + nsum(42, a, b) + + message = '...must be True or False.' + with pytest.raises(ValueError, match=message): + nsum(f, a, b, log=2) + + message = '...must be real numbers.' + with pytest.raises(ValueError, match=message): + nsum(f, xp.asarray(1+1j), b) + with pytest.raises(ValueError, match=message): + nsum(f, a, xp.asarray(1+1j)) + with pytest.raises(ValueError, match=message): + nsum(f, a, b, step=xp.asarray(1+1j)) + with pytest.raises(ValueError, match=message): + nsum(f, a, b, tolerances=dict(atol='ekki')) + with pytest.raises(ValueError, match=message): + nsum(f, a, b, tolerances=dict(rtol=pytest)) + + with np.errstate(all='ignore'): + res = nsum(f, xp.asarray([np.nan, np.inf]), xp.asarray(1.)) + assert xp.all((res.status == -1) & xp.isnan(res.sum) + & xp.isnan(res.error) & ~res.success & res.nfev == 1) + res = nsum(f, xp.asarray(10.), xp.asarray([np.nan, 1])) + assert xp.all((res.status == -1) & xp.isnan(res.sum) + & xp.isnan(res.error) & ~res.success & res.nfev == 1) + res = nsum(f, xp.asarray(1.), xp.asarray(10.), + step=xp.asarray([xp.nan, -xp.inf, xp.inf, -1, 0])) + assert xp.all((res.status == -1) & xp.isnan(res.sum) + & xp.isnan(res.error) & ~res.success & res.nfev == 1) + + message = '...must be non-negative and finite.' + with pytest.raises(ValueError, match=message): + nsum(f, a, b, tolerances=dict(rtol=-1)) + with pytest.raises(ValueError, match=message): + nsum(f, a, b, tolerances=dict(atol=np.inf)) + + message = '...may not be positive infinity.' + with pytest.raises(ValueError, match=message): + nsum(f, a, b, tolerances=dict(rtol=np.inf), log=True) + with pytest.raises(ValueError, match=message): + nsum(f, a, b, tolerances=dict(atol=np.inf), log=True) + + message = '...must be a non-negative integer.' + with pytest.raises(ValueError, match=message): + nsum(f, a, b, maxterms=3.5) + with pytest.raises(ValueError, match=message): + nsum(f, a, b, maxterms=-2) + + @pytest.mark.parametrize('f_number', range(1, 4)) + def test_basic(self, f_number, xp): + dtype = xp.asarray(1.).dtype + f = getattr(self, f"f{f_number}") + a, b = xp.asarray(f.a), xp.asarray(f.b), + args = tuple(xp.asarray(arg) for arg in f.args) + ref = xp.asarray(f.ref, dtype=dtype) + res = nsum(f, a, b, args=args) + xp_assert_close(res.sum, ref) + xp_assert_equal(res.status, xp.zeros(ref.shape, dtype=xp.int32)) + xp_test = array_namespace(a) # CuPy doesn't have `bool` + xp_assert_equal(res.success, xp.ones(ref.shape, dtype=xp_test.bool)) + + with np.errstate(divide='ignore'): + logres = nsum(lambda *args: xp.log(f(*args)), + a, b, log=True, args=args) + xp_assert_close(xp.exp(logres.sum), res.sum) + xp_assert_close(xp.exp(logres.error), res.error, atol=1e-15) + xp_assert_equal(logres.status, res.status) + xp_assert_equal(logres.success, res.success) + + @pytest.mark.parametrize('maxterms', [0, 1, 10, 20, 100]) + def test_integral(self, maxterms, xp): + # test precise behavior of integral approximation + f = self.f1 + + def logf(x): + return -2*xp.log(x) + + def F(x): + return -1 / x + + a = xp.asarray([1, 5], dtype=xp.float64)[:, xp.newaxis] + b = xp.asarray([20, 100, xp.inf], dtype=xp.float64)[:, xp.newaxis, xp.newaxis] + step = xp.asarray([0.5, 1, 2], dtype=xp.float64).reshape((-1, 1, 1, 1)) + nsteps = xp.floor((b - a)/step) + b_original = b + b = a + nsteps*step + + k = a + maxterms*step + # partial sum + direct = xp.sum(f(a + xp.arange(maxterms)*step), axis=-1, keepdims=True) + integral = (F(b) - F(k))/step # integral approximation of remainder + low = direct + integral + f(b) # theoretical lower bound + high = direct + integral + f(k) # theoretical upper bound + ref_sum = (low + high)/2 # nsum uses average of the two + ref_err = (high - low)/2 # error (assuming perfect quadrature) + + # correct reference values where number of terms < maxterms + xp_test = array_namespace(a) # torch needs broadcast_arrays + a, b, step = xp_test.broadcast_arrays(a, b, step) + for i in np.ndindex(a.shape): + ai, bi, stepi = float(a[i]), float(b[i]), float(step[i]) + if (bi - ai)/stepi + 1 <= maxterms: + direct = xp.sum(f(xp.arange(ai, bi+stepi, stepi, dtype=xp.float64))) + ref_sum[i] = direct + ref_err[i] = direct * xp.finfo(direct.dtype).eps + + rtol = 1e-12 + res = nsum(f, a, b_original, step=step, maxterms=maxterms, + tolerances=dict(rtol=rtol)) + xp_assert_close(res.sum, ref_sum, rtol=10*rtol) + xp_assert_close(res.error, ref_err, rtol=100*rtol) + + i = ((b_original - a)/step + 1 <= maxterms) + xp_assert_close(res.sum[i], ref_sum[i], rtol=1e-15) + xp_assert_close(res.error[i], ref_err[i], rtol=1e-15) + + logres = nsum(logf, a, b_original, step=step, log=True, + tolerances=dict(rtol=math.log(rtol)), maxterms=maxterms) + xp_assert_close(xp.exp(logres.sum), res.sum) + xp_assert_close(xp.exp(logres.error), res.error) + + @pytest.mark.parametrize('shape', [tuple(), (12,), (3, 4), (3, 2, 2)]) + def test_vectorization(self, shape, xp): + # Test for correct functionality, output shapes, and dtypes for various + # input shapes. + rng = np.random.default_rng(82456839535679456794) + a = rng.integers(1, 10, size=shape) + # when the sum can be computed directly or `maxterms` is large enough + # to meet `atol`, there are slight differences (for good reason) + # between vectorized call and looping. + b = np.inf + p = rng.random(shape) + 1 + n = math.prod(shape) + + def f(x, p): + f.feval += 1 if (x.size == n or x.ndim <= 1) else x.shape[-1] + return 1 / x ** p + + f.feval = 0 + + @np.vectorize + def nsum_single(a, b, p, maxterms): + return nsum(lambda x: 1 / x**p, a, b, maxterms=maxterms) + + res = nsum(f, xp.asarray(a), xp.asarray(b), maxterms=1000, + args=(xp.asarray(p),)) + refs = nsum_single(a, b, p, maxterms=1000).ravel() + + attrs = ['sum', 'error', 'success', 'status', 'nfev'] + for attr in attrs: + ref_attr = [xp.asarray(getattr(ref, attr)) for ref in refs] + res_attr = getattr(res, attr) + xp_assert_close(xp_ravel(res_attr), xp.asarray(ref_attr), rtol=1e-15) + assert res_attr.shape == shape + + xp_test = array_namespace(xp.asarray(1.)) + assert xp_test.isdtype(res.success.dtype, 'bool') + assert xp_test.isdtype(res.status.dtype, 'integral') + assert xp_test.isdtype(res.nfev.dtype, 'integral') + if is_numpy(xp): # other libraries might have different number + assert int(xp.max(res.nfev)) == f.feval + + def test_status(self, xp): + f = self.f2 + + p = [2, 2, 0.9, 1.1, 2, 2] + a = xp.asarray([0, 0, 1, 1, 1, np.nan], dtype=xp.float64) + b = xp.asarray([10, np.inf, np.inf, np.inf, np.inf, np.inf], dtype=xp.float64) + ref = special.zeta(p, 1) + p = xp.asarray(p, dtype=xp.float64) + + with np.errstate(divide='ignore'): # intentionally dividing by zero + res = nsum(f, a, b, args=(p,)) + + ref_success = xp.asarray([False, False, False, False, True, False]) + ref_status = xp.asarray([-3, -3, -2, -4, 0, -1], dtype=xp.int32) + xp_assert_equal(res.success, ref_success) + xp_assert_equal(res.status, ref_status) + xp_assert_close(res.sum[res.success], xp.asarray(ref)[res.success]) + + def test_nfev(self, xp): + def f(x): + f.nfev += xp_size(x) + return 1 / x**2 + + f.nfev = 0 + res = nsum(f, xp.asarray(1), xp.asarray(10)) + assert res.nfev == f.nfev + + f.nfev = 0 + res = nsum(f, xp.asarray(1), xp.asarray(xp.inf), tolerances=dict(atol=1e-6)) + assert res.nfev == f.nfev + + def test_inclusive(self, xp): + # There was an edge case off-by one bug when `_direct` was called with + # `inclusive=True`. Check that this is resolved. + a = xp.asarray([1, 4]) + b = xp.asarray(xp.inf) + res = nsum(lambda k: 1 / k ** 2, a, b, + maxterms=500, tolerances=dict(atol=0.1)) + ref = nsum(lambda k: 1 / k ** 2, a, b) + assert xp.all(res.sum > (ref.sum - res.error)) + assert xp.all(res.sum < (ref.sum + res.error)) + + @pytest.mark.parametrize('log', [True, False]) + def test_infinite_bounds(self, log, xp): + a = xp.asarray([1, -np.inf, -np.inf]) + b = xp.asarray([np.inf, -1, np.inf]) + c = xp.asarray([1, 2, 3]) + + def f(x, a): + return (xp.log(xp.tanh(a / 2)) - a*xp.abs(x) if log + else xp.tanh(a/2) * xp.exp(-a*xp.abs(x))) + + res = nsum(f, a, b, args=(c,), log=log) + ref = xp.asarray([stats.dlaplace.sf(0, 1), stats.dlaplace.sf(0, 2), 1]) + ref = xp.log(ref) if log else ref + atol = (1e-10 if a.dtype==xp.float64 else 1e-5) if log else 0 + xp_assert_close(res.sum, xp.asarray(ref, dtype=a.dtype), atol=atol) + + # # Make sure the sign of `x` passed into `f` is correct. + def f(x, c): + return -3*xp.log(c*x) if log else 1 / (c*x)**3 + + a = xp.asarray([1, -np.inf]) + b = xp.asarray([np.inf, -1]) + arg = xp.asarray([1, -1]) + res = nsum(f, a, b, args=(arg,), log=log) + ref = np.log(special.zeta(3)) if log else special.zeta(3) + xp_assert_close(res.sum, xp.full(a.shape, ref, dtype=a.dtype)) + + def test_decreasing_check(self, xp): + # Test accuracy when we start sum on an uphill slope. + # Without the decreasing check, the terms would look small enough to + # use the integral approximation. Because the function is not decreasing, + # the error is not bounded by the magnitude of the last term of the + # partial sum. In this case, the error would be ~1e-4, causing the test + # to fail. + def f(x): + return xp.exp(-x ** 2) + + a, b = xp.asarray(-25, dtype=xp.float64), xp.asarray(np.inf, dtype=xp.float64) + res = nsum(f, a, b) + + # Reference computed with mpmath: + # from mpmath import mp + # mp.dps = 50 + # def fmp(x): return mp.exp(-x**2) + # ref = mp.nsum(fmp, (-25, 0)) + mp.nsum(fmp, (1, mp.inf)) + ref = xp.asarray(1.772637204826652, dtype=xp.float64) + + xp_assert_close(res.sum, ref, rtol=1e-15) + + def test_special_case(self, xp): + # test equal lower/upper limit + f = self.f1 + a = b = xp.asarray(2) + res = nsum(f, a, b) + xp_assert_equal(res.sum, xp.asarray(f(2))) + + # Test scalar `args` (not in tuple) + res = nsum(self.f2, xp.asarray(1), xp.asarray(np.inf), args=xp.asarray(2)) + xp_assert_close(res.sum, xp.asarray(self.f1.ref)) # f1.ref is correct w/ args=2 + + # Test 0 size input + a = xp.empty((3, 1, 1)) # arbitrary broadcastable shapes + b = xp.empty((0, 1)) # could use Hypothesis + p = xp.empty(4) # but it's overkill + shape = np.broadcast_shapes(a.shape, b.shape, p.shape) + res = nsum(self.f2, a, b, args=(p,)) + assert res.sum.shape == shape + assert res.status.shape == shape + assert res.nfev.shape == shape + + # Test maxterms=0 + def f(x): + with np.errstate(divide='ignore'): + return 1 / x + + res = nsum(f, xp.asarray(0), xp.asarray(10), maxterms=0) + assert xp.isnan(res.sum) + assert xp.isnan(res.error) + assert res.status == -2 + + res = nsum(f, xp.asarray(0), xp.asarray(10), maxterms=1) + assert xp.isnan(res.sum) + assert xp.isnan(res.error) + assert res.status == -3 + + # Test NaNs + # should skip both direct and integral methods if there are NaNs + a = xp.asarray([xp.nan, 1, 1, 1]) + b = xp.asarray([xp.inf, xp.nan, xp.inf, xp.inf]) + p = xp.asarray([2, 2, xp.nan, 2]) + res = nsum(self.f2, a, b, args=(p,)) + xp_assert_close(res.sum, xp.asarray([xp.nan, xp.nan, xp.nan, self.f1.ref])) + xp_assert_close(res.error[:3], xp.full((3,), xp.nan)) + xp_assert_equal(res.status, xp.asarray([-1, -1, -3, 0], dtype=xp.int32)) + xp_assert_equal(res.success, xp.asarray([False, False, False, True])) + # Ideally res.nfev[2] would be 1, but `tanhsinh` has some function evals + xp_assert_equal(res.nfev[:2], xp.full((2,), 1, dtype=xp.int32)) + + @pytest.mark.parametrize('dtype', ['float32', 'float64']) + def test_dtype(self, dtype, xp): + dtype = getattr(xp, dtype) + + def f(k): + assert k.dtype == dtype + return 1 / k ** xp.asarray(2, dtype=dtype) + + a = xp.asarray(1, dtype=dtype) + b = xp.asarray([10, xp.inf], dtype=dtype) + res = nsum(f, a, b) + assert res.sum.dtype == dtype + assert res.error.dtype == dtype + + rtol = 1e-12 if dtype == xp.float64 else 1e-6 + ref = _gen_harmonic_gt1(np.asarray([10, xp.inf]), 2) + xp_assert_close(res.sum, xp.asarray(ref, dtype=dtype), rtol=rtol) + + @pytest.mark.parametrize('case', [(10, 100), (100, 10)]) + def test_nondivisible_interval(self, case, xp): + # When the limits of the sum are such that (b - a)/step + # is not exactly integral, check that only floor((b - a)/step) + # terms are included. + n, maxterms = case + + def f(k): + return 1 / k ** 2 + + a = np.e + step = 1 / 3 + b0 = a + n * step + i = np.arange(-2, 3) + b = b0 + i * np.spacing(b0) + ns = np.floor((b - a) / step) + assert len(set(ns)) == 2 + + a, b = xp.asarray(a, dtype=xp.float64), xp.asarray(b, dtype=xp.float64) + step, ns = xp.asarray(step, dtype=xp.float64), xp.asarray(ns, dtype=xp.float64) + res = nsum(f, a, b, step=step, maxterms=maxterms) + xp_assert_equal(xp.diff(ns) > 0, xp.diff(res.sum) > 0) + xp_assert_close(res.sum[-1], res.sum[0] + f(b0)) + + @pytest.mark.skip_xp_backends(np_only=True, reason='Needs beta function.') + def test_logser_kurtosis_gh20648(self, xp): + # Some functions return NaN at infinity rather than 0 like they should. + # Check that this is accounted for. + ref = stats.yulesimon.moment(4, 5) + def f(x): + return stats.yulesimon._pmf(x, 5) * x**4 + + with np.errstate(invalid='ignore'): + assert np.isnan(f(np.inf)) + + res = nsum(f, 1, np.inf) + assert_allclose(res.sum, ref) diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/vode.py b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/vode.py new file mode 100644 index 0000000000000000000000000000000000000000..f92927901084ce33cdeb006057d85dd501b13aae --- /dev/null +++ b/evalkit_eagle/lib/python3.10/site-packages/scipy/integrate/vode.py @@ -0,0 +1,15 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__: list[str] = [] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="integrate", module="vode", + private_modules=["_vode"], all=__all__, + attribute=name) diff --git a/evalkit_eagle/lib/python3.10/site-packages/scipy/misc/__pycache__/__init__.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/scipy/misc/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ee6e68ae001ed01910c2b38adbb0d431d1428b20 Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/scipy/misc/__pycache__/__init__.cpython-310.pyc differ