repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
scipy
|
scipy-main/tools/wheels/check_license.py
|
#!/usr/bin/env python
"""
check_license.py [MODULE]
Check the presence of a LICENSE.txt in the installed module directory,
and that it appears to contain text prevalent for a SciPy binary
distribution.
"""
import os
import sys
import io
import re
import argparse
def check_text(text):
ok = "Copyright (c)" in text and re.search(
r"This binary distribution of \w+ also bundles the following software",
text,
re.IGNORECASE
)
return ok
def main():
p = argparse.ArgumentParser(usage=__doc__.rstrip())
p.add_argument("module", nargs="?", default="scipy")
args = p.parse_args()
# Drop '' from sys.path
sys.path.pop(0)
# Find module path
__import__(args.module)
mod = sys.modules[args.module]
# Check license text
license_txt = os.path.join(os.path.dirname(mod.__file__), "LICENSE.txt")
with io.open(license_txt, "r", encoding="utf-8") as f:
text = f.read()
ok = check_text(text)
if not ok:
print(
"ERROR: License text {} does not contain expected "
"text fragments\n".format(license_txt)
)
print(text)
sys.exit(1)
sys.exit(0)
if __name__ == "__main__":
main()
| 1,227
| 20.54386
| 79
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/test_functions.py
|
import time
import numpy as np
from numpy import sin, cos, pi, exp, sqrt, abs
from scipy.optimize import rosen
class SimpleQuadratic:
def fun(self, x):
return np.dot(x, x)
def der(self, x):
return 2. * x
def hess(self, x):
return 2. * np.eye(x.size)
class AsymmetricQuadratic:
def fun(self, x):
return np.dot(x, x) + x[0]
def der(self, x):
d = 2. * x
d[0] += 1
return d
def hess(self, x):
return 2. * np.eye(x.size)
class SlowRosen:
def fun(self, x):
time.sleep(40e-6)
return rosen(x)
class LJ:
"""
The Lennard Jones potential
a mathematically simple model that approximates the interaction between a
pair of neutral atoms or molecules.
https://en.wikipedia.org/wiki/Lennard-Jones_potential
E = sum_ij V(r_ij)
where r_ij is the cartesian distance between atom i and atom j, and the
pair potential has the form
V(r) = 4 * eps * ( (sigma / r)**12 - (sigma / r)**6
Notes
-----
the double loop over many atoms makes this *very* slow in Python. If it
were in a compiled language it would be much faster.
"""
def __init__(self, eps=1.0, sig=1.0):
self.sig = sig
self.eps = eps
def vij(self, r):
return 4. * self.eps * ((self.sig / r)**12 - (self.sig / r)**6)
def dvij(self, r):
p7 = 6. / self.sig * (self.sig / r)**7
p13 = -12. / self.sig * (self.sig / r)**13
return 4. * self.eps * (p7 + p13)
def fun(self, coords):
natoms = coords.size // 3
coords = np.reshape(coords, [natoms, 3])
energy = 0.
for i in range(natoms):
for j in range(i + 1, natoms):
dr = coords[j, :] - coords[i, :]
r = np.linalg.norm(dr)
energy += self.vij(r)
return energy
def der(self, coords):
natoms = coords.size // 3
coords = np.reshape(coords, [natoms, 3])
energy = 0.
grad = np.zeros([natoms, 3])
for i in range(natoms):
for j in range(i + 1, natoms):
dr = coords[j, :] - coords[i, :]
r = np.linalg.norm(dr)
energy += self.vij(r)
g = self.dvij(r)
grad[i, :] += -g * dr/r
grad[j, :] += g * dr/r
grad = grad.reshape([natoms * 3])
return grad
def get_random_configuration(self):
rnd = np.random.uniform(-1, 1, [3 * self.natoms])
return rnd * float(self.natoms)**(1. / 3)
class LJ38(LJ):
natoms = 38
target_E = -173.928427
class LJ30(LJ):
natoms = 30
target_E = -128.286571
class LJ20(LJ):
natoms = 20
target_E = -77.177043
class LJ13(LJ):
natoms = 13
target_E = -44.326801
class Booth:
target_E = 0.
solution = np.array([1., 3.])
xmin = np.array([-10., -10.])
xmax = np.array([10., 10.])
def fun(self, coords):
x, y = coords
return (x + 2. * y - 7.)**2 + (2. * x + y - 5.)**2
def der(self, coords):
x, y = coords
dfdx = 2. * (x + 2. * y - 7.) + 4. * (2. * x + y - 5.)
dfdy = 4. * (x + 2. * y - 7.) + 2. * (2. * x + y - 5.)
return np.array([dfdx, dfdy])
class Beale:
target_E = 0.
solution = np.array([3., 0.5])
xmin = np.array([-4.5, -4.5])
xmax = np.array([4.5, 4.5])
def fun(self, coords):
x, y = coords
p1 = (1.5 - x + x * y)**2
p2 = (2.25 - x + x * y**2)**2
p3 = (2.625 - x + x * y**3)**2
return p1 + p2 + p3
def der(self, coords):
x, y = coords
dfdx = (2. * (1.5 - x + x * y) * (-1. + y) +
2. * (2.25 - x + x * y**2) * (-1. + y**2) +
2. * (2.625 - x + x * y**3) * (-1. + y**3))
dfdy = (2. * (1.5 - x + x * y) * (x) +
2. * (2.25 - x + x * y**2) * (2. * y * x) +
2. * (2.625 - x + x * y**3) * (3. * x * y**2))
return np.array([dfdx, dfdy])
"""
Global Test functions for minimizers.
HolderTable, Ackey and Levi have many competing local minima and are suited
for global minimizers such as basinhopping or differential_evolution.
(https://en.wikipedia.org/wiki/Test_functions_for_optimization)
See also https://mpra.ub.uni-muenchen.de/2718/1/MPRA_paper_2718.pdf
"""
class HolderTable:
target_E = -19.2085
solution = [8.05502, 9.66459]
xmin = np.array([-10, -10])
xmax = np.array([10, 10])
stepsize = 2.
temperature = 2.
def fun(self, x):
return - abs(sin(x[0]) * cos(x[1]) * exp(abs(1. - sqrt(x[0]**2 +
x[1]**2) / pi)))
def dabs(self, x):
"""derivative of absolute value"""
if x < 0:
return -1.
elif x > 0:
return 1.
else:
return 0.
#commented out at the because it causes FloatingPointError in
#basinhopping
# def der(self, x):
# R = sqrt(x[0]**2 + x[1]**2)
# g = 1. - R / pi
# f = sin(x[0]) * cos(x[1]) * exp(abs(g))
# E = -abs(f)
#
# dRdx = x[0] / R
# dgdx = - dRdx / pi
# dfdx = cos(x[0]) * cos(x[1]) * exp(abs(g)) + f * self.dabs(g) * dgdx
# dEdx = - self.dabs(f) * dfdx
#
# dRdy = x[1] / R
# dgdy = - dRdy / pi
# dfdy = -sin(x[0]) * sin(x[1]) * exp(abs(g)) + f * self.dabs(g) * dgdy
# dEdy = - self.dabs(f) * dfdy
# return np.array([dEdx, dEdy])
class Ackley:
# note: this function is not smooth at the origin. the gradient will never
# converge in the minimizer
target_E = 0.
solution = [0., 0.]
xmin = np.array([-5, -5])
xmax = np.array([5, 5])
def fun(self, x):
E = (-20. * exp(-0.2 * sqrt(0.5 * (x[0]**2 + x[1]**2))) + 20. + np.e -
exp(0.5 * (cos(2. * pi * x[0]) + cos(2. * pi * x[1]))))
return E
def der(self, x):
R = sqrt(x[0]**2 + x[1]**2)
term1 = -20. * exp(-0.2 * R)
term2 = -exp(0.5 * (cos(2. * pi * x[0]) + cos(2. * pi * x[1])))
deriv1 = term1 * (-0.2 * 0.5 / R)
dfdx = 2. * deriv1 * x[0] - term2 * pi * sin(2. * pi * x[0])
dfdy = 2. * deriv1 * x[1] - term2 * pi * sin(2. * pi * x[1])
return np.array([dfdx, dfdy])
class Levi:
target_E = 0.
solution = [1., 1.]
xmin = np.array([-10, -10])
xmax = np.array([10, 10])
def fun(self, x):
E = (sin(3. * pi * x[0])**2 + (x[0] - 1.)**2 *
(1. + sin(3 * pi * x[1])**2) +
(x[1] - 1.)**2 * (1. + sin(2 * pi * x[1])**2))
return E
def der(self, x):
dfdx = (2. * 3. * pi *
cos(3. * pi * x[0]) * sin(3. * pi * x[0]) +
2. * (x[0] - 1.) * (1. + sin(3 * pi * x[1])**2))
dfdy = ((x[0] - 1.)**2 * 2. * 3. * pi * cos(3. * pi * x[1]) * sin(3. *
pi * x[1]) + 2. * (x[1] - 1.) *
(1. + sin(2 * pi * x[1])**2) + (x[1] - 1.)**2 *
2. * 2. * pi * cos(2. * pi * x[1]) * sin(2. * pi * x[1]))
return np.array([dfdx, dfdy])
class EggHolder:
target_E = -959.6407
solution = [512, 404.2319]
xmin = np.array([-512., -512])
xmax = np.array([512., 512])
def fun(self, x):
a = -(x[1] + 47) * np.sin(np.sqrt(abs(x[1] + x[0]/2. + 47)))
b = -x[0] * np.sin(np.sqrt(abs(x[0] - (x[1] + 47))))
return a + b
class CrossInTray:
target_E = -2.06261
solution = [1.34941, -1.34941]
xmin = np.array([-10., -10])
xmax = np.array([10., 10])
def fun(self, x):
arg = abs(100 - sqrt(x[0]**2 + x[1]**2)/pi)
val = np.power(abs(sin(x[0]) * sin(x[1]) * exp(arg)) + 1., 0.1)
return -0.0001 * val
class Schaffer2:
target_E = 0
solution = [0., 0.]
xmin = np.array([-100., -100])
xmax = np.array([100., 100])
def fun(self, x):
num = np.power(np.sin(x[0]**2 - x[1]**2), 2) - 0.5
den = np.power(1 + 0.001 * (x[0]**2 + x[1]**2), 2)
return 0.5 + num / den
class Schaffer4:
target_E = 0.292579
solution = [0, 1.253131828927371]
xmin = np.array([-100., -100])
xmax = np.array([100., 100])
def fun(self, x):
num = cos(sin(abs(x[0]**2 - x[1]**2)))**2 - 0.5
den = (1+0.001*(x[0]**2 + x[1]**2))**2
return 0.5 + num / den
| 8,342
| 25.235849
| 79
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/optimize_lap.py
|
from concurrent.futures import ThreadPoolExecutor, wait
import numpy as np
from .common import Benchmark, safe_import
with safe_import():
from scipy.optimize import linear_sum_assignment
from scipy.spatial.distance import cdist
def random_uniform(shape):
return np.random.uniform(-20, 20, shape)
def random_logarithmic(shape):
return 10**np.random.uniform(-20, 20, shape)
def random_integer(shape):
return np.random.randint(-1000, 1000, shape)
def random_binary(shape):
return np.random.randint(0, 2, shape)
def random_spatial(shape):
P = np.random.uniform(-1, 1, size=(shape[0], 2))
Q = np.random.uniform(-1, 1, size=(shape[1], 2))
return cdist(P, Q, 'sqeuclidean')
class LinearAssignment(Benchmark):
sizes = range(100, 401, 100)
shapes = [(i, i) for i in sizes]
shapes.extend([(i, 2 * i) for i in sizes])
shapes.extend([(2 * i, i) for i in sizes])
cost_types = ['uniform', 'spatial', 'logarithmic', 'integer', 'binary']
param_names = ['shape', 'cost_type']
params = [shapes, cost_types]
def setup(self, shape, cost_type):
cost_func = {'uniform': random_uniform,
'spatial': random_spatial,
'logarithmic': random_logarithmic,
'integer': random_integer,
'binary': random_binary}[cost_type]
self.cost_matrix = cost_func(shape)
def time_evaluation(self, *args):
linear_sum_assignment(self.cost_matrix)
class ParallelLinearAssignment(Benchmark):
shape = (100, 100)
param_names = ['threads']
params = [[1, 2, 4]]
def setup(self, threads):
self.cost_matrices = [random_uniform(self.shape) for _ in range(20)]
def time_evaluation(self, threads):
with ThreadPoolExecutor(max_workers=threads) as pool:
wait({pool.submit(linear_sum_assignment, cost_matrix)
for cost_matrix in self.cost_matrices})
| 1,956
| 27.362319
| 76
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/cython_special.py
|
import re
import numpy as np
from scipy import special
from .common import with_attributes, safe_import
with safe_import():
from scipy.special import cython_special
FUNC_ARGS = {
'airy_d': (1,),
'airy_D': (1,),
'beta_dd': (0.25, 0.75),
'erf_d': (1,),
'erf_D': (1+1j,),
'exprel_d': (1e-6,),
'gamma_d': (100,),
'gamma_D': (100+100j,),
'jv_dd': (1, 1),
'jv_dD': (1, (1+1j)),
'loggamma_D': (20,),
'logit_d': (0.5,),
'psi_d': (1,),
'psi_D': (1,),
}
class _CythonSpecialMeta(type):
"""
Add time_* benchmarks corresponding to cython_special._bench_*_cy
"""
def __new__(cls, cls_name, bases, dct):
params = [(10, 100, 1000), ('python', 'numpy', 'cython')]
param_names = ['N', 'api']
def get_time_func(name, args):
@with_attributes(params=[(name,), (args,)] + params,
param_names=['name', 'argument'] + param_names)
def func(self, name, args, N, api):
if api == 'python':
self.py_func(N, *args)
elif api == 'numpy':
self.np_func(*self.obj)
else:
self.cy_func(N, *args)
func.__name__ = 'time_' + name
return func
for name in FUNC_ARGS.keys():
func = get_time_func(name, FUNC_ARGS[name])
dct[func.__name__] = func
return type.__new__(cls, cls_name, bases, dct)
class CythonSpecial(metaclass=_CythonSpecialMeta):
def setup(self, name, args, N, api):
self.py_func = getattr(cython_special, '_bench_{}_py'.format(name))
self.cy_func = getattr(cython_special, '_bench_{}_cy'.format(name))
m = re.match('^(.*)_[dDl]+$', name)
self.np_func = getattr(special, m.group(1))
self.obj = []
for arg in args:
self.obj.append(arg*np.ones(N))
self.obj = tuple(self.obj)
| 1,956
| 26.56338
| 76
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/lsq_problems.py
|
"""Benchmark problems for nonlinear least squares."""
import inspect
import sys
import numpy as np
from numpy.polynomial.chebyshev import Chebyshev
from scipy.integrate import odeint
class LSQBenchmarkProblem:
"""Template class for nonlinear least squares benchmark problems.
The optimized variable is n-dimensional vector x and the objective function
has the form
F(x) = ||f(x)||^2 = sum(f_i(x)^2, i = 1, ..., m)
Where f is a vector function f = (f_1, ..., f_m), we call f_i as residuals.
Jacobian of f is an m by n matrix, its (i, j) element is the partial
derivative of f_i with respect to x_j.
Parameters
----------
n : int
Number of optimized variables.
m : int
Number of residuals.
x0 : ndarray, shape(n, )
Initial guess for optimized variable.
fopt : float
The sum of squared residuals at the optimum point. It must be provided
with the relative accuracy orders of magnitude higher than expected
`ftol` parameter of benchmarked optimization method.
lb : None or ndarray, shape(n, ), optional
Lower bounds for each optimized variable, -np.inf specifies no bound.
None means no bound for all variables.
ub : None or ndarray, shape(n ), optional
Upper bound for each optimized variable, np.inf specified no bound.
None means no bound for all variables.
Attributes
----------
INITIAL_GUESSES : list of ndarray
List containing initial guesses to try. Fill this list in a derived
class with at least one item.
"""
INITIAL_GUESSES = None
def __init__(self, n, m, fopt, x0, lb=None, ub=None):
self.n = n
self.m = m
self.fopt = fopt
self.x0 = x0
self.lb = lb
self.ub = ub
def fun(self, x):
"""Evaluate residuals at point `x`.
Parameters
----------
x : ndarray, shape (n,)
Point of evaluation.
Returns
-------
ndarray, shape (m,)
Vector of residuals at point `x`.
"""
raise NotImplementedError
def jac(self, x):
"""Evaluate jacobian at point x.
Parameters
----------
x : ndarray, shape (n,)
Vector of residuals f(x).
Returns
-------
ndarray, shape (m, n)
Jacobian matrix of `self.fun` at point `x`.
"""
raise NotImplementedError
def check_answer(self, x, ftol):
"""Check if `x` yields the objective value close enough to
the optimal value.
Parameters
----------
x : ndarray, shape (n,)
The point to test.
ftol : float
Maximum allowed relative error in the objective function value.
Returns
-------
bool
Whether `x` is optimal enough. If `x` violates bounds constraints
then False is returned.
"""
if (self.lb is not None and np.any(x < self.lb) or
self.ub is not None and np.any(x > self.ub)):
return False
f = np.sum(self.fun(x) ** 2)
return f < (1 + ftol) * self.fopt
class AlphaPineneDirect(LSQBenchmarkProblem):
"""Isomerization of alpha-pinene problem, direct formulation [1]_.
Number of variables --- 5, number of residuals --- 40, no bounds.
.. [1] Brett M. Averick et al. "The MINPACK-2 Test Problem Collection",
p. 20
"""
INITIAL_GUESSES = [
np.array([5.84, 2.65, 1.63, 27.77, 4.61]) * 1e-5
]
def __init__(self, x0):
super().__init__(5, 40, 2.064572e1, x0)
self.t = np.array([0, 1230, 3060, 4920, 7800, 10680, 15030, 22620,
36420], dtype=float)
self.y0 = np.array([100, 0, 0, 0, 0], dtype=float)
self.y = np.array([
[100, 0, 0, 0, 0],
[88.35, 7.3, 2.3, 0.4, 1.75],
[76.4, 15.6, 4.5, 0.7, 2.8],
[65.1, 23.1, 5.3, 1.1, 5.8],
[50.4, 32.9, 6, 1.5, 9.3],
[37.5, 42.7, 6.0, 1.9, 12],
[25.9, 49.1, 5.9, 2.2, 17],
[14, 57.4, 5.1, 2.6, 21],
[4.5, 63.1, 3.8, 2.9, 25.7]
])
def fun_ode_rhs(self, y, t, x):
return np.array(
[-(x[0] + x[1]) * y[0],
x[0] * y[0],
x[1] * y[0] - (x[2] + x[3]) * y[2] + x[4] * y[4],
x[2] * y[2],
x[3] * y[2] - x[4] * y[4]]
)
def jac_ode_rhs(self, y, t, x):
jac_part = np.array(
[-y[0], -y[0], 0, 0, 0,
y[0], 0, 0, 0, 0,
0, y[0], -y[2], -y[2], y[4],
0, 0, y[2], 0, 0,
0, 0, 0, y[2], -y[4]]
)
return np.hstack((self.fun_ode_rhs(y, t, x), jac_part))
def fun(self, x):
y_hat = odeint(self.fun_ode_rhs, self.y0, self.t, args=(x,))
return y_hat[1:].ravel() - self.y[1:].ravel()
def jac(self, x):
result = odeint(self.jac_ode_rhs, np.hstack((self.y0, np.zeros(25))),
self.t, args=(x,))
return result[1:, 5:].reshape((40, 5))
class CoatingThickness(LSQBenchmarkProblem):
"""Coating thickness standardization problem, [1]_.
Number of variables --- 134, number of residuals --- 252, no bounds.
.. [1] Brett M. Averick et al. "The MINPACK-2 Test Problem Collection",
p. 25
"""
INITIAL_GUESSES = [
np.hstack(([-8.0, 13.0, 1.2, 0.2, 0.1, 6.0, 5.5, -5.2],
np.zeros(126)))
]
def __init__(self, x0):
super().__init__(134, 252, 0.5054986, x0)
self.n0 = self.m // 4
self.xi = np.array([
[0.7140, 0.7169, 0.7232, 0.7151, 0.6848, 0.7070, 0.7177, 0.7073,
0.6734, 0.7174, 0.7125, 0.6947, 0.7121, 0.7166, 0.6894, 0.6897,
0.7024, 0.7026, 0.6800, 0.6957, 0.6987, 0.7111, 0.7097, 0.6809,
0.7139, 0.7046, 0.6950, 0.7032, 0.7019, 0.6975, 0.6955, 0.7056,
0.6965, 0.6848, 0.6995, 0.6105, 0.6027, 0.6084, 0.6081, 0.6057,
0.6116, 0.6052, 0.6136, 0.6032, 0.6081, 0.6092, 0.6122, 0.6157,
0.6191, 0.6169, 0.5483, 0.5371, 0.5576, 0.5521, 0.5495, 0.5499,
0.4937, 0.5092, 0.5433, 0.5018, 0.5363, 0.4977, 0.5296],
[5.145, 5.241, 5.389, 5.211, 5.154, 5.105, 5.191, 5.013, 5.582,
5.208, 5.142, 5.284, 5.262, 6.838, 6.215, 6.817, 6.889, 6.732,
6.717, 6.468, 6.776, 6.574, 6.465, 6.090, 6.350, 4.255, 4.154,
4.211, 4.287, 4.104, 4.007, 4.261, 4.150, 4.040, 4.155, 5.086,
5.021, 5.040, 5.247, 5.125, 5.136, 4.949, 5.253, 5.154, 5.227,
5.120, 5.291, 5.294, 5.304, 5.209, 5.384, 5.490, 5.563, 5.532,
5.372, 5.423, 7.237, 6.944, 6.957, 7.138, 7.009, 7.074, 7.046]
])
self.y = np.array(
[9.3636, 9.3512, 9.4891, 9.1888, 9.3161, 9.2585, 9.2913, 9.3914,
9.4524, 9.4995, 9.4179, 9.468, 9.4799, 11.2917, 11.5062, 11.4579,
11.3977, 11.3688, 11.3897, 11.3104, 11.3882, 11.3629, 11.3149,
11.2474, 11.2507, 8.1678, 8.1017, 8.3506, 8.3651, 8.2994, 8.1514,
8.2229, 8.1027, 8.3785, 8.4118, 8.0955, 8.0613, 8.0979, 8.1364,
8.1700, 8.1684, 8.0885, 8.1839, 8.1478, 8.1827, 8.029, 8.1000,
8.2579, 8.2248, 8.2540, 6.8518, 6.8547, 6.8831, 6.9137, 6.8984,
6.8888, 8.5189, 8.5308, 8.5184, 8.5222, 8.5705, 8.5353, 8.5213,
8.3158, 8.1995, 8.2283, 8.1857, 8.2738, 8.2131, 8.2613, 8.2315,
8.2078, 8.2996, 8.3026, 8.0995, 8.2990, 9.6753, 9.6687, 9.5704,
9.5435, 9.6780, 9.7668, 9.7827, 9.7844, 9.7011, 9.8006, 9.7610,
9.7813, 7.3073, 7.2572, 7.4686, 7.3659, 7.3587, 7.3132, 7.3542,
7.2339, 7.4375, 7.4022, 10.7914, 10.6554, 10.7359, 10.7583,
10.7735, 10.7907, 10.6465, 10.6994, 10.7756, 10.7402, 10.6800,
10.7000, 10.8160, 10.6921, 10.8677, 12.3495, 12.4424, 12.4303,
12.5086, 12.4513, 12.4625, 16.2290, 16.2781, 16.2082, 16.2715,
16.2464, 16.1626, 16.1568]
)
self.scale1 = 4.08
self.scale2 = 0.417
def fun(self, x):
xi = np.vstack(
(self.xi[0] + x[8:8 + self.n0],
self.xi[1] + x[8 + self.n0:])
)
z1 = x[0] + x[1] * xi[0] + x[2] * xi[1] + x[3] * xi[0] * xi[1]
z2 = x[4] + x[5] * xi[0] + x[6] * xi[1] + x[7] * xi[0] * xi[1]
return np.hstack(
(z1 - self.y[:self.n0],
z2 - self.y[self.n0:],
self.scale1 * x[8:8 + self.n0],
self.scale2 * x[8 + self.n0:])
)
def jac(self, x):
J = np.zeros((self.m, self.n))
ind = np.arange(self.n0)
xi = np.vstack(
(self.xi[0] + x[8:8 + self.n0],
self.xi[1] + x[8 + self.n0:])
)
J[:self.n0, 0] = 1
J[:self.n0, 1] = xi[0]
J[:self.n0, 2] = xi[1]
J[:self.n0, 3] = xi[0] * xi[1]
J[ind, ind + 8] = x[1] + x[3] * xi[1]
J[ind, ind + 8 + self.n0] = x[2] + x[3] * xi[0]
J[self.n0:2 * self.n0, 4] = 1
J[self.n0:2 * self.n0, 5] = xi[0]
J[self.n0:2 * self.n0, 6] = xi[1]
J[self.n0:2 * self.n0, 7] = xi[0] * xi[1]
J[ind + self.n0, ind + 8] = x[5] + x[7] * xi[1]
J[ind + self.n0, ind + 8 + self.n0] = x[6] + x[7] * xi[0]
J[ind + 2 * self.n0, ind + 8] = self.scale1
J[ind + 3 * self.n0, ind + 8 + self.n0] = self.scale2
return J
class ExponentialFitting(LSQBenchmarkProblem):
"""The problem of fitting the sum of exponentials with linear degrees
to data, [1]_.
Number of variables --- 5, number of residuals --- 33, no bounds.
.. [1] Brett M. Averick et al. "The MINPACK-2 Test Problem Collection",
p. 26
"""
INITIAL_GUESSES = [
np.array([0.5, 1.5, -1, 1e-2, 2e-2])
]
def __init__(self, x0):
super().__init__(5, 33, 5.464895e-5, x0)
self.t = np.arange(self.m, dtype=float) * 10
self.y = 1e-1 * np.array(
[8.44, 9.08, 9.32, 9.36, 9.25, 9.08, 8.81, 8.5, 8.18,
7.84, 7.51, 7.18, 6.85, 6.58, 6.28, 6.03, 5.8, 5.58,
5.38, 5.22, 5.06, 4.9, 4.78, 4.67, 4.57, 4.48, 4.38,
4.31, 4.24, 4.2, 4.14, 4.11, 4.06]
)
def fun(self, x):
return (x[0] + x[1] * np.exp(-x[3] * self.t) +
x[2] * np.exp(-x[4] * self.t) - self.y)
def jac(self, x):
J = np.empty((self.m, self.n))
J[:, 0] = 1
J[:, 1] = np.exp(-x[3] * self.t)
J[:, 2] = np.exp(-x[4] * self.t)
J[:, 3] = -x[1] * self.t * np.exp(-x[3] * self.t)
J[:, 4] = -x[2] * self.t * np.exp(-x[4] * self.t)
return J
class GaussianFitting(LSQBenchmarkProblem):
"""The problem of fitting the sum of exponentials with linear and
quadratic degrees to data, [1]_.
Number of variables --- 11, number of residuals --- 65, no bounds.
.. [1] Brett M. Averick et al. "The MINPACK-2 Test Problem Collection",
p. 27
"""
INITIAL_GUESSES = [
np.array([1.3, 6.5e-1, 6.5e-1, 7.0e-1, 6.0e-1,
3.0, 5.0, 7.0, 2.0, 4.5, 5.5])
]
def __init__(self, x0):
super().__init__(11, 65, 4.013772e-02, x0)
self.t = np.arange(self.m, dtype=float) * 1e-1
self.y = np.array(
[1.366, 1.191, 1.112, 1.013, 9.91e-1, 8.85e-1, 8.31e-1, 8.47e-1,
7.86e-1, 7.25e-1, 7.46e-1, 6.79e-1, 6.08e-1, 6.55e-1, 6.16e-1,
6.06e-1, 6.02e-1, 6.26e-1, 6.51e-1, 7.24e-1, 6.49e-1, 6.49e-1,
6.94e-1, 6.44e-1, 6.24e-1, 6.61e-1, 6.12e-1, 5.58e-1, 5.33e-1,
4.95e-1, 5.0e-1, 4.23e-1, 3.95e-1, 3.75e-1, 3.72e-1, 3.91e-1,
3.96e-1, 4.05e-1, 4.28e-1, 4.29e-1, 5.23e-1, 5.62e-1, 6.07e-1,
6.53e-1, 6.72e-1, 7.08e-1, 6.33e-1, 6.68e-1, 6.45e-1, 6.32e-1,
5.91e-1, 5.59e-1, 5.97e-1, 6.25e-1, 7.39e-1, 7.1e-1, 7.29e-1,
7.2e-1, 6.36e-1, 5.81e-1, 4.28e-1, 2.92e-1, 1.62e-1, 9.8e-2,
5.4e-2]
)
def fun(self, x):
return (x[0] * np.exp(-x[4] * self.t) +
x[1] * np.exp(-x[5] * (self.t - x[8]) ** 2) +
x[2] * np.exp(-x[6] * (self.t - x[9]) ** 2) +
x[3] * np.exp(-x[7] * (self.t - x[10]) ** 2) - self.y)
def jac(self, x):
J = np.empty((self.m, self.n))
e0 = np.exp(-x[4] * self.t)
e1 = np.exp(-x[5] * (self.t - x[8]) ** 2)
e2 = np.exp(-x[6] * (self.t - x[9]) ** 2)
e3 = np.exp(-x[7] * (self.t - x[10]) ** 2)
J[:, 0] = e0
J[:, 1] = e1
J[:, 2] = e2
J[:, 3] = e3
J[:, 4] = -x[0] * self.t * e0
J[:, 5] = -x[1] * (self.t - x[8]) ** 2 * e1
J[:, 6] = -x[2] * (self.t - x[9]) ** 2 * e2
J[:, 7] = -x[3] * (self.t - x[10]) ** 2 * e3
J[:, 8] = 2 * x[1] * x[5] * (self.t - x[8]) * e1
J[:, 9] = 2 * x[2] * x[6] * (self.t - x[9]) * e2
J[:, 10] = 2 * x[3] * x[7] * (self.t - x[10]) * e3
return J
class ThermistorResistance(LSQBenchmarkProblem):
"""The problem of fitting thermistor parameters to data, [1]_.
Number of variables --- 3, number of residuals --- 16, no bounds.
.. [1] Brett M. Averick et al. "The MINPACK-2 Test Problem Collection",
p. 28
"""
INITIAL_GUESSES = [
np.array([2e-2, 4e3, 2.5e2])
]
def __init__(self, x0_ind):
super().__init__(3, 16, 87.94585, x0_ind)
self.t = 5 + 45 * (1 + np.arange(self.m, dtype=float))
self.y = np.array(
[3.478e4, 2.861e4, 2.365e4, 1.963e4, 1.637e4, 1.372e4, 1.154e4,
9.744e3, 8.261e3, 7.03e3, 6.005e3, 5.147e3, 4.427e3, 3.82e3,
3.307e3, 2.872e3]
)
def fun(self, x):
return x[0] * np.exp(x[1] / (self.t + x[2])) - self.y
def jac(self, x):
J = np.empty((self.m, self.n))
e = np.exp(x[1] / (self.t + x[2]))
J[:, 0] = e
J[:, 1] = x[0] / (self.t + x[2]) * e
J[:, 2] = -x[0] * x[1] * (self.t + x[2]) ** -2 * e
return J
class EnzymeReaction(LSQBenchmarkProblem):
"""The problem of fitting kinetic parameters for an enzyme reaction, [1]_.
Number of variables --- 4, number of residuals --- 11, no bounds.
.. [1] Brett M. Averick et al. "The MINPACK-2 Test Problem Collection",
p. 29
"""
INITIAL_GUESSES = [
np.array([2.5, 3.9, 4.15, 3.9]) * 1e-1
]
def __init__(self, x0_ind):
super().__init__(4, 11, 3.075057e-04, x0_ind)
self.u = np.array([4.0, 2.0, 1.0, 5.0e-1, 2.5e-1, 1.67e-1,
1.25e-1, 1.0e-1, 8.33e-2, 7.14e-2, 6.25e-2])
self.y = np.array([1.957e-1, 1.947e-1, 1.735e-1, 1.6e-1, 8.44e-2,
6.27e-2, 4.56e-2, 3.42e-2, 3.23e-2, 2.35e-2,
2.46e-2])
def fun(self, x):
return (x[0] * (self.u ** 2 + x[1] * self.u) /
(self.u ** 2 + x[2] * self.u + x[3]) - self.y)
def jac(self, x):
J = np.empty((self.m, self.n))
den = self.u ** 2 + x[2] * self.u + x[3]
num = self.u ** 2 + x[1] * self.u
J[:, 0] = num / den
J[:, 1] = x[0] * self.u / den
J[:, 2] = -x[0] * num * self.u / den ** 2
J[:, 3] = -x[0] * num / den ** 2
return J
class ChebyshevQuadrature(LSQBenchmarkProblem):
"""The problem of determining the optimal nodes of a quadrature formula
with equal weights, [1]_.
Number of variables --- 11, number of residuals --- 11, no bounds.
.. [1] Brett M. Averick et al. "The MINPACK-2 Test Problem Collection",
p. 30
"""
INITIAL_GUESSES = [
(1 + np.arange(11, dtype=float)) / 12
]
def __init__(self, x0):
super().__init__(11, 11, 2.799761e-03, x0)
cp = Chebyshev(1)
self.T_all = [cp.basis(i, domain=[0.0, 1.0]) for i in range(11)]
def fun(self, x):
f = np.empty(self.n)
for i in range(self.m):
T = self.T_all[i]
f[i] = np.mean(T(x)) - T.integ(lbnd=0.0)(1.0)
return f
def jac(self, x):
J = np.empty((self.m, self.n))
for i in range(self.m):
T = self.T_all[i]
J[i] = T.deriv()(x)
J /= self.n
return J
def extract_lsq_problems():
"""Extract all least squares problems in this file for benchmarking.
Returns
-------
dict, str -> LSQBenchmarkProblem
The key is a problem name.
The value is an instance of LSQBenchmarkProblem.
"""
problems = {}
for name, problem_class in inspect.getmembers(sys.modules[__name__],
inspect.isclass):
if (name != "LSQBenchmarkProblem" and
issubclass(problem_class, LSQBenchmarkProblem) and
hasattr(problem_class, 'INITIAL_GUESSES')):
for i, x0 in enumerate(problem_class.INITIAL_GUESSES):
if len(problem_class.INITIAL_GUESSES) > 1:
key_name = "{0}_{1}".format(name, i)
else:
key_name = name
problems[key_name] = problem_class(x0)
return problems
| 17,304
| 34.461066
| 80
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/stats_sampling.py
|
import numpy as np
from .common import Benchmark, safe_import
with safe_import():
from scipy import stats
with safe_import():
from scipy.stats import sampling
with safe_import():
from scipy import special
# Beta distribution with a = 2, b = 3
class contdist1:
def __init__(self):
self.mode = 1/3
def pdf(self, x):
return 12 * x * (1-x)**2
def dpdf(self, x):
return 12 * ((1-x)**2 - 2*x*(1-x))
def cdf(self, x):
return 12 * (x**2/2 - x**3/3 + x**4/4)
def support(self):
return 0, 1
def __repr__(self):
# asv prints this.
return 'beta(2, 3)'
# Standard Normal Distribution
class contdist2:
def __init__(self):
self.mode = 0
def pdf(self, x):
return 1./np.sqrt(2*np.pi) * np.exp(-0.5 * x*x)
def dpdf(self, x):
return 1./np.sqrt(2*np.pi) * -x * np.exp(-0.5 * x*x)
def cdf(self, x):
return special.ndtr(x)
def __repr__(self):
return 'norm(0, 1)'
# pdf with piecewise linear function as transformed density with T = -1/sqrt
# Taken from UNU.RAN test suite (from file t_tdr_ps.c)
class contdist3:
def __init__(self, shift=0.):
self.shift = shift
self.mode = shift
def pdf(self, x):
x -= self.shift
y = 1. / (abs(x) + 1.)
return y * y
def dpdf(self, x):
x -= self.shift
y = 1. / (abs(x) + 1.)
y = 2. * y * y * y
return y if (x < 0.) else -y
def cdf(self, x):
x -= self.shift
if x <= 0.:
return 0.5 / (1. - x)
return 1. - 0.5 / (1. + x)
def __repr__(self):
return f'sqrtlinshft({self.shift})'
# Sin 2 distribution
# / 0.05 + 0.45*(1 +sin(2 Pi x)) if |x| <= 1
# f(x) = <
# \ 0 otherwise
# Taken from UNU.RAN test suite (from file t_pinv.c)
class contdist4:
def __init__(self):
self.mode = 0
def pdf(self, x):
return 0.05 + 0.45 * (1 + np.sin(2*np.pi*x))
def dpdf(self, x):
return 0.2 * 0.45 * (2*np.pi) * np.cos(2*np.pi*x)
def cdf(self, x):
return (0.05*(x + 1) +
0.9*(1. + 2.*np.pi*(1 + x) - np.cos(2.*np.pi*x)) /
(4.*np.pi))
def support(self):
return -1, 1
def __repr__(self):
return 'sin2'
# Sin 10 distribution
# / 0.05 + 0.45*(1 +sin(2 Pi x)) if |x| <= 5
# f(x) = <
# \ 0 otherwise
# Taken from UNU.RAN test suite (from file t_pinv.c)
class contdist5:
def __init__(self):
self.mode = 0
def pdf(self, x):
return 0.2 * (0.05 + 0.45 * (1 + np.sin(2*np.pi*x)))
def dpdf(self, x):
return 0.2 * 0.45 * (2*np.pi) * np.cos(2*np.pi*x)
def cdf(self, x):
return x/10. + 0.5 + 0.09/(2*np.pi) * (np.cos(10*np.pi) -
np.cos(2*np.pi*x))
def support(self):
return -5, 5
def __repr__(self):
return 'sin10'
allcontdists = [contdist1(), contdist2(), contdist3(), contdist3(10000.),
contdist4(), contdist5()]
class TransformedDensityRejection(Benchmark):
param_names = ['dist', 'c']
params = [allcontdists, [0., -0.5]]
def setup(self, dist, c):
self.urng = np.random.default_rng(0xfaad7df1c89e050200dbe258636b3265)
with np.testing.suppress_warnings() as sup:
sup.filter(RuntimeWarning)
try:
self.rng = sampling.TransformedDensityRejection(
dist, c=c, random_state=self.urng
)
except sampling.UNURANError:
# contdist3 is not T-concave for c=0. So, skip such test-cases
raise NotImplementedError(f"{dist} not T-concave for c={c}")
def time_tdr_setup(self, dist, c):
with np.testing.suppress_warnings() as sup:
sup.filter(RuntimeWarning)
sampling.TransformedDensityRejection(
dist, c=c, random_state=self.urng
)
def time_tdr_rvs(self, dist, c):
self.rng.rvs(100000)
class SimpleRatioUniforms(Benchmark):
param_names = ['dist', 'cdf_at_mode']
params = [allcontdists, [0, 1]]
def setup(self, dist, cdf_at_mode):
self.urng = np.random.default_rng(0xfaad7df1c89e050200dbe258636b3265)
try:
if cdf_at_mode:
cdf_at_mode = dist.cdf(dist.mode)
else:
cdf_at_mode = None
self.rng = sampling.SimpleRatioUniforms(
dist, mode=dist.mode,
cdf_at_mode=cdf_at_mode,
random_state=self.urng
)
except sampling.UNURANError:
raise NotImplementedError(f"{dist} not T-concave")
def time_srou_setup(self, dist, cdf_at_mode):
if cdf_at_mode:
cdf_at_mode = dist.cdf(dist.mode)
else:
cdf_at_mode = None
sampling.SimpleRatioUniforms(
dist, mode=dist.mode,
cdf_at_mode=cdf_at_mode,
random_state=self.urng
)
def time_srou_rvs(self, dist, cdf_at_mode):
self.rng.rvs(100000)
class NumericalInversePolynomial(Benchmark):
param_names = ['dist']
params = [allcontdists]
def setup(self, dist):
self.urng = np.random.default_rng(0xb235b58c1f616c59c18d8568f77d44d1)
with np.testing.suppress_warnings() as sup:
sup.filter(RuntimeWarning)
try:
self.rng = sampling.NumericalInversePolynomial(
dist, random_state=self.urng
)
except sampling.UNURANError:
raise NotImplementedError(f"setup failed for {dist}")
def time_pinv_setup(self, dist):
with np.testing.suppress_warnings() as sup:
sup.filter(RuntimeWarning)
sampling.NumericalInversePolynomial(
dist, random_state=self.urng
)
def time_pinv_rvs(self, dist):
self.rng.rvs(100000)
class NumericalInverseHermite(Benchmark):
param_names = ['dist', 'order']
params = [allcontdists, [3, 5]]
def setup(self, dist, order):
self.urng = np.random.default_rng(0xb235b58c1f616c59c18d8568f77d44d1)
with np.testing.suppress_warnings() as sup:
sup.filter(RuntimeWarning)
try:
self.rng = sampling.NumericalInverseHermite(
dist, order=order, random_state=self.urng
)
except sampling.UNURANError:
raise NotImplementedError(f"setup failed for {dist}")
def time_hinv_setup(self, dist, order):
with np.testing.suppress_warnings() as sup:
sup.filter(RuntimeWarning)
sampling.NumericalInverseHermite(
dist, order=order, random_state=self.urng
)
def time_hinv_rvs(self, dist, order):
self.rng.rvs(100000)
class DiscreteAliasUrn(Benchmark):
param_names = ['distribution']
params = [
# a subset of discrete distributions with finite domain.
[['nhypergeom', (20, 7, 1)],
['hypergeom', (30, 12, 6)],
['nchypergeom_wallenius', (140, 80, 60, 0.5)],
['binom', (5, 0.4)]]
]
def setup(self, distribution):
distname, params = distribution
dist = getattr(stats, distname)
domain = dist.support(*params)
self.urng = np.random.default_rng(0x2fc9eb71cd5120352fa31b7a048aa867)
x = np.arange(domain[0], domain[1] + 1)
self.pv = dist.pmf(x, *params)
self.rng = sampling.DiscreteAliasUrn(self.pv, random_state=self.urng)
def time_dau_setup(self, distribution):
sampling.DiscreteAliasUrn(self.pv, random_state=self.urng)
def time_dau_rvs(self, distribution):
self.rng.rvs(100000)
class DiscreteGuideTable(Benchmark):
param_names = ['distribution']
params = [
# a subset of discrete distributions with finite domain.
[['nhypergeom', (20, 7, 1)],
['hypergeom', (30, 12, 6)],
['nchypergeom_wallenius', (140, 80, 60, 0.5)],
['binom', (5, 0.4)]]
]
def setup(self, distribution):
distname, params = distribution
dist = getattr(stats, distname)
domain = dist.support(*params)
self.urng = np.random.default_rng(0x2fc9eb71cd5120352fa31b7a048aa867)
x = np.arange(domain[0], domain[1] + 1)
self.pv = dist.pmf(x, *params)
self.rng = sampling.DiscreteGuideTable(self.pv, random_state=self.urng)
def time_dgt_setup(self, distribution):
sampling.DiscreteGuideTable(self.pv, random_state=self.urng)
def time_dgt_rvs(self, distribution):
self.rng.rvs(100000)
| 8,743
| 27.115756
| 79
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/fft_basic.py
|
""" Test functions for fftpack.basic module
"""
from numpy import arange, asarray, zeros, dot, exp, pi, double, cdouble
from numpy.random import rand
import numpy as np
from concurrent import futures
import os
import scipy.fftpack
import numpy.fft
from .common import Benchmark, safe_import
with safe_import() as exc:
import scipy.fft as scipy_fft
has_scipy_fft = True
if exc.error:
has_scipy_fft = False
with safe_import() as exc:
import pyfftw.interfaces.numpy_fft as pyfftw_fft
import pyfftw
pyfftw.interfaces.cache.enable()
has_pyfftw = True
if exc.error:
pyfftw_fft = {} # noqa: F811
has_pyfftw = False
class PyfftwBackend:
"""Backend for pyfftw"""
__ua_domain__ = 'numpy.scipy.fft'
@staticmethod
def __ua_function__(method, args, kwargs):
kwargs.pop('overwrite_x', None)
fn = getattr(pyfftw_fft, method.__name__, None)
return (NotImplemented if fn is None
else fn(*args, **kwargs))
def random(size):
return rand(*size)
def direct_dft(x):
x = asarray(x)
n = len(x)
y = zeros(n, dtype=cdouble)
w = -arange(n)*(2j*pi/n)
for i in range(n):
y[i] = dot(exp(i*w), x)
return y
def direct_idft(x):
x = asarray(x)
n = len(x)
y = zeros(n, dtype=cdouble)
w = arange(n)*(2j*pi/n)
for i in range(n):
y[i] = dot(exp(i*w), x)/n
return y
def get_module(mod_name):
module_map = {
'scipy.fftpack': scipy.fftpack,
'scipy.fft': scipy_fft,
'numpy.fft': numpy.fft
}
if not has_scipy_fft and mod_name == 'scipy.fft':
raise NotImplementedError
return module_map[mod_name]
class Fft(Benchmark):
params = [
[100, 256, 313, 512, 1000, 1024, 2048, 2048*2, 2048*4],
['real', 'cmplx'],
['scipy.fftpack', 'scipy.fft', 'numpy.fft']
]
param_names = ['size', 'type', 'module']
def setup(self, size, cmplx, module):
if cmplx == 'cmplx':
self.x = random([size]).astype(cdouble)+random([size]).astype(cdouble)*1j
else:
self.x = random([size]).astype(double)
module = get_module(module)
self.fft = getattr(module, 'fft')
self.ifft = getattr(module, 'ifft')
def time_fft(self, size, cmplx, module):
self.fft(self.x)
def time_ifft(self, size, cmplx, module):
self.ifft(self.x)
class NextFastLen(Benchmark):
params = [
[12, 13, # small ones
1021, 1024, # 2 ** 10 and a prime
16381, 16384, # 2 ** 14 and a prime
262139, 262144, # 2 ** 17 and a prime
999983, 1048576, # 2 ** 20 and a prime
],
]
param_names = ['size']
def setup(self, size):
if not has_scipy_fft:
raise NotImplementedError
def time_next_fast_len(self, size):
scipy_fft.next_fast_len.__wrapped__(size)
def time_next_fast_len_cached(self, size):
scipy_fft.next_fast_len(size)
class RFft(Benchmark):
params = [
[100, 256, 313, 512, 1000, 1024, 2048, 2048*2, 2048*4],
['scipy.fftpack', 'scipy.fft', 'numpy.fft']
]
param_names = ['size', 'module']
def setup(self, size, module):
self.x = random([size]).astype(double)
module = get_module(module)
self.rfft = getattr(module, 'rfft')
self.irfft = getattr(module, 'irfft')
self.y = self.rfft(self.x)
def time_rfft(self, size, module):
self.rfft(self.x)
def time_irfft(self, size, module):
self.irfft(self.y)
class RealTransforms1D(Benchmark):
params = [
[75, 100, 135, 256, 313, 512, 675, 1024, 2025, 2048],
['I', 'II', 'III', 'IV'],
['scipy.fftpack', 'scipy.fft']
]
param_names = ['size', 'type', 'module']
def setup(self, size, type, module):
module = get_module(module)
self.dct = getattr(module, 'dct')
self.dst = getattr(module, 'dst')
self.type = {'I':1, 'II':2, 'III':3, 'IV':4}[type]
# The "logical" transform size should be smooth, which for dct/dst
# type 1 is offset by -1/+1 respectively
if self.type == 1:
size += 1
self.x = random([size]).astype(double)
if self.type == 1:
self.x_dst = self.x[:-2].copy()
def time_dct(self, size, type, module):
self.dct(self.x, self.type)
def time_dst(self, size, type, module):
x = self.x if self.type != 1 else self.x_dst
self.dst(x, self.type)
class Fftn(Benchmark):
params = [
["100x100", "313x100", "1000x100", "256x256", "512x512"],
['real', 'cmplx'],
['scipy.fftpack', 'scipy.fft', 'numpy.fft']
]
param_names = ['size', 'type', 'module']
def setup(self, size, cmplx, module):
size = list(map(int, size.split("x")))
if cmplx != 'cmplx':
self.x = random(size).astype(double)
else:
self.x = random(size).astype(cdouble)+random(size).astype(cdouble)*1j
self.fftn = getattr(get_module(module), 'fftn')
def time_fftn(self, size, cmplx, module):
self.fftn(self.x)
class RealTransformsND(Benchmark):
params = [
['75x75', '100x100', '135x135', '313x363', '1000x100', '256x256'],
['I', 'II', 'III', 'IV'],
['scipy.fftpack', 'scipy.fft']
]
param_names = ['size', 'type', 'module']
def setup(self, size, type, module):
self.dctn = getattr(get_module(module), 'dctn')
self.dstn = getattr(get_module(module), 'dstn')
self.type = {'I':1, 'II':2, 'III':3, 'IV':4}[type]
# The "logical" transform size should be smooth, which for dct/dst
# type 1 is offset by -1/+1 respectively
size = list(map(int, size.split('x')))
if self.type == 1:
size = (s + 1 for s in size)
self.x = random(size).astype(double)
if self.type == 1:
self.x_dst = self.x[:-2,:-2].copy()
def time_dctn(self, size, type, module):
self.dctn(self.x, self.type)
def time_dstn(self, size, type, module):
x = self.x if self.type != 1 else self.x_dst
self.dstn(x, self.type)
class FftBackends(Benchmark):
params = [
[100, 256, 313, 512, 1000, 1024, 2048, 2048*2, 2048*4],
['real', 'cmplx'],
['pocketfft', 'pyfftw', 'numpy', 'direct']
]
param_names = ['size', 'type', 'backend']
def setup(self, size, cmplx, backend):
import scipy.fft
if cmplx == 'cmplx':
self.x = random([size]).astype(cdouble)+random([size]).astype(cdouble)*1j
else:
self.x = random([size]).astype(double)
self.fft = scipy.fft.fft
self.ifft = scipy.fft.ifft
if backend == 'pocketfft':
scipy.fft.set_global_backend('scipy')
elif backend == 'pyfftw':
if not has_pyfftw:
raise NotImplementedError
scipy.fft.set_global_backend(PyfftwBackend)
elif backend == 'numpy':
from scipy.fft._debug_backends import NumPyBackend
scipy.fft.set_global_backend(NumPyBackend)
elif backend == 'direct':
import scipy.fft._pocketfft
self.fft = scipy.fft._pocketfft.fft
self.ifft = scipy.fft._pocketfft.ifft
def time_fft(self, size, cmplx, module):
self.fft(self.x)
def time_ifft(self, size, cmplx, module):
self.ifft(self.x)
class FftnBackends(Benchmark):
params = [
["100x100", "313x100", "1000x100", "256x256", "512x512"],
['real', 'cmplx'],
['pocketfft', 'pyfftw', 'numpy', 'direct']
]
param_names = ['size', 'type', 'backend']
def setup(self, size, cmplx, backend):
import scipy.fft
size = list(map(int, size.split("x")))
if cmplx == 'cmplx':
self.x = random(size).astype(double)+random(size).astype(double)*1j
else:
self.x = random(size).astype(double)
self.fftn = scipy.fft.fftn
self.ifftn = scipy.fft.ifftn
if backend == 'pocketfft':
scipy.fft.set_global_backend('scipy')
elif backend == 'pyfftw':
if not has_pyfftw:
raise NotImplementedError
scipy.fft.set_global_backend(PyfftwBackend)
elif backend == 'numpy':
from scipy.fft._debug_backends import NumPyBackend
scipy.fft.set_global_backend(NumPyBackend)
elif backend == 'direct':
import scipy.fft._pocketfft
self.fftn = scipy.fft._pocketfft.fftn
self.ifftn = scipy.fft._pocketfft.ifftn
def time_fft(self, size, cmplx, module):
self.fftn(self.x)
def time_ifft(self, size, cmplx, module):
self.ifftn(self.x)
class FftThreading(Benchmark):
params = [
['100x100', '1000x100', '256x256', '512x512'],
[1, 8, 32, 100],
['workers', 'threading']
]
param_names = ['size', 'num_transforms', 'method']
def setup(self, size, num_transforms, method):
if not has_scipy_fft:
raise NotImplementedError
size = list(map(int, size.split("x")))
self.xs = [(random(size)+1j*random(size)).astype(np.complex128)
for _ in range(num_transforms)]
if method == 'threading':
self.pool = futures.ThreadPoolExecutor(os.cpu_count())
def map_thread(self, func):
f = []
for x in self.xs:
f.append(self.pool.submit(func, x))
futures.wait(f)
def time_fft(self, size, num_transforms, method):
if method == 'threading':
self.map_thread(scipy_fft.fft)
else:
for x in self.xs:
scipy_fft.fft(x, workers=-1)
def time_fftn(self, size, num_transforms, method):
if method == 'threading':
self.map_thread(scipy_fft.fftn)
else:
for x in self.xs:
scipy_fft.fftn(x, workers=-1)
| 10,009
| 27.197183
| 85
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/special.py
|
import numpy as np
from .common import Benchmark, with_attributes, safe_import
with safe_import():
from scipy.special import ai_zeros, bi_zeros, erf, expn
with safe_import():
# wasn't always in scipy.special, so import separately
from scipy.special import comb
with safe_import():
from scipy.special import loggamma
class Airy(Benchmark):
def time_ai_zeros(self):
ai_zeros(100000)
def time_bi_zeros(self):
bi_zeros(100000)
class Erf(Benchmark):
def setup(self, *args):
self.rand = np.random.rand(100000)
def time_real(self, offset):
erf(self.rand + offset)
time_real.params = [0.0, 2.0]
time_real.param_names = ['offset']
class Comb(Benchmark):
def setup(self, *args):
self.N = np.arange(1, 1000, 50)
self.k = np.arange(1, 1000, 50)
@with_attributes(params=[(10, 100, 1000, 10000), (1, 10, 100)],
param_names=['N', 'k'])
def time_comb_exact(self, N, k):
comb(N, k, exact=True)
def time_comb_float(self):
comb(self.N[:,None], self.k[None,:])
class Loggamma(Benchmark):
def setup(self):
x, y = np.logspace(3, 5, 10), np.logspace(3, 5, 10)
x, y = np.meshgrid(x, y)
self.large_z = x + 1j*y
def time_loggamma_asymptotic(self):
loggamma(self.large_z)
class Expn(Benchmark):
def setup(self):
n, x = np.arange(50, 500), np.logspace(0, 20, 100)
n, x = np.meshgrid(n, x)
self.n, self.x = n, x
def time_expn_large_n(self):
expn(self.n, self.x)
| 1,576
| 22.191176
| 67
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/cluster.py
|
import numpy as np
from numpy.testing import suppress_warnings
from .common import Benchmark, safe_import
with safe_import():
from scipy.cluster.hierarchy import linkage
from scipy.cluster.vq import kmeans, kmeans2, vq
class HierarchyLinkage(Benchmark):
params = ['single', 'complete', 'average', 'weighted', 'centroid',
'median', 'ward']
param_names = ['method']
def __init__(self):
rnd = np.random.RandomState(0)
self.X = rnd.randn(2000, 2)
def time_linkage(self, method):
linkage(self.X, method=method)
class KMeans(Benchmark):
params = [2, 10, 50]
param_names = ['k']
def __init__(self):
rnd = np.random.RandomState(0)
self.obs = rnd.rand(1000, 5)
def time_kmeans(self, k):
kmeans(self.obs, k, iter=10)
class KMeans2(Benchmark):
params = [[2, 10, 50], ['random', 'points', '++']]
param_names = ['k', 'init']
def __init__(self):
rnd = np.random.RandomState(0)
self.obs = rnd.rand(1000, 5)
def time_kmeans2(self, k, init):
with suppress_warnings() as sup:
sup.filter(UserWarning,
"One of the clusters is empty. Re-run kmeans with a "
"different initialization")
kmeans2(self.obs, k, minit=init, iter=10)
class VQ(Benchmark):
params = [[2, 10, 50], ['float32', 'float64']]
param_names = ['k', 'dtype']
def __init__(self):
rnd = np.random.RandomState(0)
self.data = rnd.rand(5000, 5)
self.cbook_source = rnd.rand(50, 5)
def setup(self, k, dtype):
self.obs = self.data.astype(dtype)
self.cbook = self.cbook_source[:k].astype(dtype)
def time_vq(self, k, dtype):
vq(self.obs, self.cbook)
| 1,784
| 25.641791
| 76
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/sparse_csgraph.py
|
"""benchmarks for the scipy.sparse.csgraph module"""
import numpy as np
import scipy.sparse
from .common import Benchmark, safe_import
with safe_import():
from scipy.sparse.csgraph import laplacian
class Laplacian(Benchmark):
params = [
[30, 300, 900],
['dense', 'coo', 'csc', 'csr', 'dia'],
[True, False]
]
param_names = ['n', 'format', 'normed']
def setup(self, n, format, normed):
data = scipy.sparse.rand(9, n, density=0.5, random_state=42).toarray()
data = np.vstack((data, data))
diags = list(range(-9, 0)) + list(range(1, 10))
A = scipy.sparse.spdiags(data, diags, n, n)
if format == 'dense':
self.A = A.toarray()
else:
self.A = A.asformat(format)
def time_laplacian(self, n, format, normed):
laplacian(self.A, normed=normed)
| 867
| 27
| 78
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/linalg.py
|
import math
import numpy.linalg as nl
import numpy as np
from numpy.testing import assert_
from numpy.random import rand
from .common import Benchmark, safe_import
with safe_import():
import scipy.linalg as sl
def random(size):
return rand(*size)
class Bench(Benchmark):
params = [
[20, 100, 500, 1000],
['contig', 'nocont'],
['numpy', 'scipy']
]
param_names = ['size', 'contiguous', 'module']
def __init__(self):
# likely not useful to benchmark svd for large sizes
self.time_svd.__func__.params = [[20, 100, 500]] + self.params[1:]
def setup(self, size, contig, module):
if module == 'numpy' and size >= 200:
# skip: slow, and not useful to benchmark numpy
raise NotImplementedError()
a = random([size, size])
# larger diagonal ensures non-singularity:
for i in range(size):
a[i, i] = 10*(.1+a[i, i])
b = random([size])
if contig != 'contig':
a = a[-1::-1, -1::-1] # turn into a non-contiguous array
assert_(not a.flags['CONTIGUOUS'])
self.a = a
self.b = b
def time_solve(self, size, contig, module):
if module == 'numpy':
nl.solve(self.a, self.b)
else:
sl.solve(self.a, self.b)
def time_solve_triangular(self, size, contig, module):
# treats self.a as a lower-triangular matrix by ignoring the strictly
# upper-triangular part
if module == 'numpy':
pass
else:
sl.solve_triangular(self.a, self.b, lower=True)
def time_inv(self, size, contig, module):
if module == 'numpy':
nl.inv(self.a)
else:
sl.inv(self.a)
def time_det(self, size, contig, module):
if module == 'numpy':
nl.det(self.a)
else:
sl.det(self.a)
def time_eigvals(self, size, contig, module):
if module == 'numpy':
nl.eigvals(self.a)
else:
sl.eigvals(self.a)
def time_svd(self, size, contig, module):
if module == 'numpy':
nl.svd(self.a)
else:
sl.svd(self.a)
# Retain old benchmark results (remove this if changing the benchmark)
time_det.version = "87e530ee50eb6b6c06c7a8abe51c2168e133d5cbd486f4c1c2b9cedc5a078325"
time_eigvals.version = "9d68d3a6b473df9bdda3d3fd25c7f9aeea7d5cee869eec730fb2a2bcd1dfb907"
time_inv.version = "20beee193c84a5713da9749246a7c40ef21590186c35ed00a4fe854cce9e153b"
time_solve.version = "1fe788070f1c9132cbe78a47fdb4cce58266427fc636d2aa9450e3c7d92c644c"
time_svd.version = "0ccbda456d096e459d4a6eefc6c674a815179e215f83931a81cfa8c18e39d6e3"
class Norm(Benchmark):
params = [
[(20, 20), (100, 100), (1000, 1000), (20, 1000), (1000, 20)],
['contig', 'nocont'],
['numpy', 'scipy']
]
param_names = ['shape', 'contiguous', 'module']
def setup(self, shape, contig, module):
a = np.random.randn(*shape)
if contig != 'contig':
a = a[-1::-1,-1::-1] # turn into a non-contiguous array
assert_(not a.flags['CONTIGUOUS'])
self.a = a
def time_1_norm(self, size, contig, module):
if module == 'numpy':
nl.norm(self.a, ord=1)
else:
sl.norm(self.a, ord=1)
def time_inf_norm(self, size, contig, module):
if module == 'numpy':
nl.norm(self.a, ord=np.inf)
else:
sl.norm(self.a, ord=np.inf)
def time_frobenius_norm(self, size, contig, module):
if module == 'numpy':
nl.norm(self.a)
else:
sl.norm(self.a)
class Lstsq(Benchmark):
"""
Test the speed of four least-squares solvers on not full rank matrices.
Also check the difference in the solutions.
The matrix has the size ``(m, 2/3*m)``; the rank is ``1/2 * m``.
Matrix values are random in the range (-5, 5), the same is for the right
hand side. The complex matrix is the sum of real and imaginary matrices.
"""
param_names = ['dtype', 'size', 'driver']
params = [
[np.float64, np.complex128],
[10, 100, 1000],
['gelss', 'gelsy', 'gelsd', 'numpy'],
]
def setup(self, dtype, size, lapack_driver):
if lapack_driver == 'numpy' and size >= 200:
# skip: slow, and not useful to benchmark numpy
raise NotImplementedError()
rng = np.random.default_rng(1234)
n = math.ceil(2./3. * size)
k = math.ceil(1./2. * size)
m = size
if dtype is np.complex128:
A = ((10 * rng.random((m,k)) - 5) +
1j*(10 * rng.random((m,k)) - 5))
temp = ((10 * rng.random((k,n)) - 5) +
1j*(10 * rng.random((k,n)) - 5))
b = ((10 * rng.random((m,1)) - 5) +
1j*(10 * rng.random((m,1)) - 5))
else:
A = (10 * rng.random((m,k)) - 5)
temp = 10 * rng.random((k,n)) - 5
b = 10 * rng.random((m,1)) - 5
self.A = A.dot(temp)
self.b = b
def time_lstsq(self, dtype, size, lapack_driver):
if lapack_driver == 'numpy':
np.linalg.lstsq(self.A, self.b,
rcond=np.finfo(self.A.dtype).eps * 100)
else:
sl.lstsq(self.A, self.b, cond=None, overwrite_a=False,
overwrite_b=False, check_finite=False,
lapack_driver=lapack_driver)
# Retain old benchmark results (remove this if changing the benchmark)
time_lstsq.version = "15ee0be14a0a597c7d1c9a3dab2c39e15c8ac623484410ffefa406bf6b596ebe"
class SpecialMatrices(Benchmark):
param_names = ['size']
params = [[4, 128]]
def setup(self, size):
self.x = np.arange(1, size + 1).astype(float)
self.small_blocks = [np.ones([2, 2])] * (size//2)
self.big_blocks = [np.ones([size//2, size//2]),
np.ones([size//2, size//2])]
def time_block_diag_small(self, size):
sl.block_diag(*self.small_blocks)
def time_block_diag_big(self, size):
sl.block_diag(*self.big_blocks)
def time_circulant(self, size):
sl.circulant(self.x)
def time_companion(self, size):
sl.companion(self.x)
def time_dft(self, size):
sl.dft(size)
def time_hadamard(self, size):
sl.hadamard(size)
def time_hankel(self, size):
sl.hankel(self.x)
def time_helmert(self, size):
sl.helmert(size)
def time_hilbert(self, size):
sl.hilbert(size)
def time_invhilbert(self, size):
sl.invhilbert(size)
def time_leslie(self, size):
sl.leslie(self.x, self.x[1:])
def time_pascal(self, size):
sl.pascal(size)
def time_invpascal(self, size):
sl.invpascal(size)
def time_toeplitz(self, size):
sl.toeplitz(self.x)
class GetFuncs(Benchmark):
def setup(self):
self.x = np.eye(1)
def time_get_blas_funcs(self):
sl.blas.get_blas_funcs('gemm', dtype=float)
def time_get_blas_funcs_2(self):
sl.blas.get_blas_funcs(('gemm', 'axpy'), (self.x, self.x))
def time_small_cholesky(self):
sl.cholesky(self.x)
| 7,306
| 28.345382
| 93
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/ndimage_interpolation.py
|
import numpy as np
from .common import Benchmark
try:
from scipy.ndimage import (geometric_transform, affine_transform, rotate,
zoom, shift, map_coordinates)
except ImportError:
pass
def shift_func_2d(c):
return (c[0] - 0.5, c[1] - 0.5)
def shift_func_3d(c):
return (c[0] - 0.5, c[1] - 0.5, c[2] - 0.5)
class NdimageInterpolation(Benchmark):
param_names = ['shape', 'order', 'mode']
params = [
[(64, 64), (512, 512), (2048, 2048), (16, 16, 16), (128, 128, 128)],
[0, 1, 3, 5],
['mirror', 'constant']
]
def setup(self, shape, order, mode):
rstate = np.random.RandomState(5)
self.x = rstate.standard_normal(shape)
self.matrix_2d = np.asarray([[0.8, 0, 1.5],
[0, 1.2, -5.]])
self.matrix_3d = np.asarray([[0.8, 0, 0, 1.5],
[0, 1.2, 0, -5.],
[0, 0, 1, 0]])
def time_affine_transform(self, shape, order, mode):
if self.x.ndim == 2:
matrix = self.matrix_2d
else:
matrix = self.matrix_3d
affine_transform(self.x, matrix, order=order, mode=mode)
def time_rotate(self, shape, order, mode):
rotate(self.x, 15, order=order, mode=mode)
def time_shift(self, shape, order, mode):
shift(self.x, (-2.5,) * self.x.ndim, order=order, mode=mode)
def time_zoom(self, shape, order, mode):
zoom(self.x, (1.3,) * self.x.ndim, order=order, mode=mode)
def time_geometric_transform_mapping(self, shape, order, mode):
if self.x.ndim == 2:
mapping = shift_func_2d
if self.x.ndim == 3:
mapping = shift_func_3d
geometric_transform(self.x, mapping, order=order, mode=mode)
def time_map_coordinates(self, shape, order, mode):
coords = np.meshgrid(*[np.arange(0, s, 2) + 0.3 for s in self.x.shape])
map_coordinates(self.x, coords, order=order, mode=mode)
def peakmem_rotate(self, shape, order, mode):
rotate(self.x, 15, order=order, mode=mode)
def peakmem_shift(self, shape, order, mode):
shift(self.x, 3, order=order, mode=mode)
| 2,227
| 31.289855
| 79
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/stats.py
|
import warnings
import numpy as np
from .common import Benchmark, safe_import, is_xslow
with safe_import():
import scipy.stats as stats
with safe_import():
from scipy.stats._distr_params import distcont, distdiscrete
try: # builtin lib
from itertools import compress
except ImportError:
pass
class Anderson_KSamp(Benchmark):
def setup(self, *args):
self.rand = [np.random.normal(loc=i, size=1000) for i in range(3)]
def time_anderson_ksamp(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
stats.anderson_ksamp(self.rand)
class CorrelationFunctions(Benchmark):
param_names = ['alternative']
params = [
['two-sided', 'less', 'greater']
]
def setup(self, mode):
a = np.random.rand(2,2) * 10
self.a = a
def time_fisher_exact(self, alternative):
oddsratio, pvalue = stats.fisher_exact(self.a, alternative=alternative)
def time_barnard_exact(self, alternative):
resBarnard = stats.barnard_exact(self.a, alternative=alternative)
def time_boschloo_exact(self, alternative):
resBoschloo = stats.boschloo_exact(self.a, alternative=alternative)
class ANOVAFunction(Benchmark):
def setup(self):
rng = np.random.default_rng(12345678)
self.a = rng.random((6,3)) * 10
self.b = rng.random((6,3)) * 10
self.c = rng.random((6,3)) * 10
def time_f_oneway(self):
statistic, pvalue = stats.f_oneway(self.a, self.b, self.c)
statistic, pvalue = stats.f_oneway(self.a, self.b, self.c, axis=1)
class Kendalltau(Benchmark):
param_names = ['nan_policy','method','variant']
params = [
['propagate', 'raise', 'omit'],
['auto', 'asymptotic', 'exact'],
['b', 'c']
]
def setup(self, nan_policy, method, variant):
rng = np.random.default_rng(12345678)
a = np.arange(200)
rng.shuffle(a)
b = np.arange(200)
rng.shuffle(b)
self.a = a
self.b = b
def time_kendalltau(self, nan_policy, method, variant):
stats.kendalltau(self.a, self.b, nan_policy=nan_policy,
method=method, variant=variant)
class KS(Benchmark):
param_names = ['alternative', 'mode']
params = [
['two-sided', 'less', 'greater'],
['auto', 'exact', 'asymp'],
]
def setup(self, alternative, mode):
rng = np.random.default_rng(0x2e7c964ff9a5cd6be22014c09f1dbba9)
self.a = stats.norm.rvs(loc=5, scale=10, size=500, random_state=rng)
self.b = stats.norm.rvs(loc=8, scale=10, size=500, random_state=rng)
def time_ks_1samp(self, alternative, mode):
stats.ks_1samp(self.a, stats.norm.cdf,
alternative=alternative, mode=mode)
def time_ks_2samp(self, alternative, mode):
stats.ks_2samp(self.a, self.b, alternative=alternative, mode=mode)
class RankSums(Benchmark):
param_names = ['alternative']
params = [
['two-sided', 'less', 'greater']
]
def setup(self, alternative):
rng = np.random.default_rng(0xb6acd7192d6e5da0f68b5d8ab8ce7af2)
self.u1 = rng.uniform(-1, 1, 200)
self.u2 = rng.uniform(-0.5, 1.5, 300)
def time_ranksums(self, alternative):
stats.ranksums(self.u1, self.u2, alternative=alternative)
class BrunnerMunzel(Benchmark):
param_names = ['alternative', 'nan_policy', 'distribution']
params = [
['two-sided', 'less', 'greater'],
['propagate', 'raise', 'omit'],
['t', 'normal']
]
def setup(self, alternative, nan_policy, distribution):
rng = np.random.default_rng(0xb82c4db22b2818bdbc5dbe15ad7528fe)
self.u1 = rng.uniform(-1, 1, 200)
self.u2 = rng.uniform(-0.5, 1.5, 300)
def time_brunnermunzel(self, alternative, nan_policy, distribution):
stats.brunnermunzel(self.u1, self.u2, alternative=alternative,
distribution=distribution, nan_policy=nan_policy)
class InferentialStats(Benchmark):
def setup(self):
rng = np.random.default_rng(0x13d756fadb635ae7f5a8d39bbfb0c931)
self.a = stats.norm.rvs(loc=5, scale=10, size=500, random_state=rng)
self.b = stats.norm.rvs(loc=8, scale=10, size=500, random_state=rng)
self.c = stats.norm.rvs(loc=8, scale=20, size=500, random_state=rng)
self.chisq = rng.integers(1, 20, 500)
def time_ttest_ind_same_var(self):
# test different sized sample with variances
stats.ttest_ind(self.a, self.b)
stats.ttest_ind(self.a, self.b, equal_var=False)
def time_ttest_ind_diff_var(self):
# test different sized sample with different variances
stats.ttest_ind(self.a, self.c)
stats.ttest_ind(self.a, self.c, equal_var=False)
def time_chisqure(self):
stats.chisquare(self.chisq)
def time_friedmanchisquare(self):
stats.friedmanchisquare(self.a, self.b, self.c)
def time_epps_singleton_2samp(self):
stats.epps_singleton_2samp(self.a, self.b)
def time_kruskal(self):
stats.mstats.kruskal(self.a, self.b)
# Benchmark data for the truncnorm stats() method.
# The data in each row is:
# a, b, mean, variance, skewness, excess kurtosis. Generated using
# https://gist.github.com/WarrenWeckesser/636b537ee889679227d53543d333a720
truncnorm_cases = [[-20, -19, -19.052343945976656, 0.002725073018195613,
-1.9838693623377885, 5.871801893091683],
[-30, -29, -29.034401237736176, 0.0011806604886186853,
-1.9929615171469608, 5.943905539773037],
[-40, -39, -39.02560741993011, 0.0006548827702932775,
-1.9960847672775606, 5.968744357649675],
[39, 40, 39.02560741993011, 0.0006548827702932775,
1.9960847672775606, 5.968744357649675]]
truncnorm_cases = np.array(truncnorm_cases)
class TruncnormStats(Benchmark):
param_names = ['case', 'moment']
params = [list(range(len(truncnorm_cases))), ['m', 'v', 's', 'k']]
def track_truncnorm_stats_error(self, case, moment):
result_indices = dict(zip(['m', 'v', 's', 'k'], range(2, 6)))
ref = truncnorm_cases[case, result_indices[moment]]
a, b = truncnorm_cases[case, 0:2]
res = stats.truncnorm(a, b).stats(moments=moment)
return np.abs((res - ref)/ref)
class DistributionsAll(Benchmark):
# all distributions are in this list. A conversion to a set is used to
# remove duplicates that appear more than once in either `distcont` or
# `distdiscrete`.
dists = sorted(list(set([d[0] for d in distcont + distdiscrete])))
param_names = ['dist_name', 'method']
params = [
dists, ['pdf/pmf', 'logpdf/logpmf', 'cdf', 'logcdf', 'rvs', 'fit',
'sf', 'logsf', 'ppf', 'isf', 'moment', 'stats_s', 'stats_v',
'stats_m', 'stats_k', 'stats_mvsk', 'entropy']
]
# stats_mvsk is tested separately because of gh-11742
# `moment` tests a higher moment (order 5)
dist_data = dict(distcont + distdiscrete)
# custom shape values can be provided for any distribution in the format
# `dist_name`: [shape1, shape2, ...]
custom_input = {}
# these are the distributions that are the slowest
slow_dists = ['nct', 'ncx2', 'argus', 'cosine', 'foldnorm', 'gausshyper',
'kappa4', 'invgauss', 'wald', 'vonmises_line', 'ksone',
'genexpon', 'exponnorm', 'recipinvgauss', 'vonmises',
'foldcauchy', 'kstwo', 'levy_stable', 'skewnorm']
slow_methods = ['moment']
def setup(self, dist_name, method):
if not is_xslow() and (dist_name in self.slow_dists
or method in self.slow_methods):
raise NotImplementedError("Skipped")
self.dist = getattr(stats, dist_name)
dist_shapes = self.dist_data[dist_name]
if isinstance(self.dist, stats.rv_discrete):
# discrete distributions only use location
self.isCont = False
kwds = {'loc': 4}
else:
# continuous distributions use location and scale
self.isCont = True
kwds = {'loc': 4, 'scale': 10}
bounds = self.dist.interval(.99, *dist_shapes, **kwds)
x = np.linspace(*bounds, 100)
args = [x, *self.custom_input.get(dist_name, dist_shapes)]
self.args = args
self.kwds = kwds
if method == 'fit':
# there are no fit methods for discrete distributions
if isinstance(self.dist, stats.rv_discrete):
raise NotImplementedError("This attribute is not a member "
"of the distribution")
# the only positional argument is the data to be fitted
self.args = [self.dist.rvs(*dist_shapes, size=100, random_state=0, **kwds)]
elif method == 'rvs':
# add size keyword argument for data creation
kwds['size'] = 1000
kwds['random_state'] = 0
# keep shapes as positional arguments, omit linearly spaced data
self.args = args[1:]
elif method == 'pdf/pmf':
method = ('pmf' if isinstance(self.dist, stats.rv_discrete)
else 'pdf')
elif method == 'logpdf/logpmf':
method = ('logpmf' if isinstance(self.dist, stats.rv_discrete)
else 'logpdf')
elif method in ['ppf', 'isf']:
self.args = [np.linspace((0, 1), 100), *args[1:]]
elif method == 'moment':
# the first four moments may be optimized, so compute the fifth
self.args = [5, *args[1:]]
elif method.startswith('stats_'):
kwds['moments'] = method[6:]
method = 'stats'
self.args = args[1:]
elif method == 'entropy':
self.args = args[1:]
self.method = getattr(self.dist, method)
def time_distribution(self, dist_name, method):
self.method(*self.args, **self.kwds)
class TrackContinuousRoundtrip(Benchmark):
# Benchmarks that track a value for every distribution can go here
param_names = ['dist_name']
params = list(dict(distcont).keys())
dist_data = dict(distcont)
def setup(self, dist_name):
# Distribution setup follows `DistributionsAll` benchmark.
# This focuses on ppf, so the code for handling other functions is
# removed for simplicity.
self.dist = getattr(stats, dist_name)
self.shape_args = self.dist_data[dist_name]
def track_distribution_ppf_roundtrip(self, dist_name):
# Tracks the worst relative error of a
# couple of round-trip ppf -> cdf calculations.
vals = [0.001, 0.5, 0.999]
ppf = self.dist.ppf(vals, *self.shape_args)
round_trip = self.dist.cdf(ppf, *self.shape_args)
err_rel = np.abs(vals - round_trip) / vals
return np.max(err_rel)
def track_distribution_ppf_roundtrip_extrema(self, dist_name):
# Tracks the absolute error of an "extreme" round-trip
# ppf -> cdf calculation.
v = 1e-6
ppf = self.dist.ppf(v, *self.shape_args)
round_trip = self.dist.cdf(ppf, *self.shape_args)
err_abs = np.abs(v - round_trip)
return err_abs
def track_distribution_isf_roundtrip(self, dist_name):
# Tracks the worst relative error of a
# couple of round-trip isf -> sf calculations.
vals = [0.001, 0.5, 0.999]
isf = self.dist.isf(vals, *self.shape_args)
round_trip = self.dist.sf(isf, *self.shape_args)
err_rel = np.abs(vals - round_trip) / vals
return np.max(err_rel)
def track_distribution_isf_roundtrip_extrema(self, dist_name):
# Tracks the absolute error of an "extreme" round-trip
# isf -> sf calculation.
v = 1e-6
ppf = self.dist.isf(v, *self.shape_args)
round_trip = self.dist.sf(ppf, *self.shape_args)
err_abs = np.abs(v - round_trip)
return err_abs
class PDFPeakMemory(Benchmark):
# Tracks peak memory when a distribution is given a large array to process
# See gh-14095
# Run for up to 30 min - some dists are quite slow.
timeout = 1800.0
x = np.arange(1e6)
param_names = ['dist_name']
params = list(dict(distcont).keys())
dist_data = dict(distcont)
# So slow that 30min isn't enough time to finish.
slow_dists = ["levy_stable"]
def setup(self, dist_name):
# This benchmark is demanding. Skip it if the env isn't xslow.
if not is_xslow():
raise NotImplementedError("skipped - enviroment is not xslow. "
"To enable this benchamark, set the "
"enviroment variable SCIPY_XSLOW=1")
if dist_name in self.slow_dists:
raise NotImplementedError("skipped - dist is too slow.")
self.dist = getattr(stats, dist_name)
self.shape_args = self.dist_data[dist_name]
def peakmem_bigarr_pdf(self, dist_name):
self.dist.pdf(self.x, *self.shape_args)
class Distribution(Benchmark):
# though there is a new version of this benchmark that runs all the
# distributions, at the time of writing there was odd behavior on
# the asv for this benchmark, so it is retained.
# https://pv.github.io/scipy-bench/#stats.Distribution.time_distribution
param_names = ['distribution', 'properties']
params = [
['cauchy', 'gamma', 'beta'],
['pdf', 'cdf', 'rvs', 'fit']
]
def setup(self, distribution, properties):
rng = np.random.default_rng(12345678)
self.x = rng.random(100)
def time_distribution(self, distribution, properties):
if distribution == 'gamma':
if properties == 'pdf':
stats.gamma.pdf(self.x, a=5, loc=4, scale=10)
elif properties == 'cdf':
stats.gamma.cdf(self.x, a=5, loc=4, scale=10)
elif properties == 'rvs':
stats.gamma.rvs(size=1000, a=5, loc=4, scale=10)
elif properties == 'fit':
stats.gamma.fit(self.x, loc=4, scale=10)
elif distribution == 'cauchy':
if properties == 'pdf':
stats.cauchy.pdf(self.x, loc=4, scale=10)
elif properties == 'cdf':
stats.cauchy.cdf(self.x, loc=4, scale=10)
elif properties == 'rvs':
stats.cauchy.rvs(size=1000, loc=4, scale=10)
elif properties == 'fit':
stats.cauchy.fit(self.x, loc=4, scale=10)
elif distribution == 'beta':
if properties == 'pdf':
stats.beta.pdf(self.x, a=5, b=3, loc=4, scale=10)
elif properties == 'cdf':
stats.beta.cdf(self.x, a=5, b=3, loc=4, scale=10)
elif properties == 'rvs':
stats.beta.rvs(size=1000, a=5, b=3, loc=4, scale=10)
elif properties == 'fit':
stats.beta.fit(self.x, loc=4, scale=10)
# Retain old benchmark results (remove this if changing the benchmark)
time_distribution.version = "fb22ae5386501008d945783921fe44aef3f82c1dafc40cddfaccaeec38b792b0"
class DescriptiveStats(Benchmark):
param_names = ['n_levels']
params = [
[10, 1000]
]
def setup(self, n_levels):
rng = np.random.default_rng(12345678)
self.levels = rng.integers(n_levels, size=(1000, 10))
def time_mode(self, n_levels):
stats.mode(self.levels, axis=0)
class GaussianKDE(Benchmark):
param_names = ['points']
params = [10, 6400]
def setup(self, points):
self.length = points
rng = np.random.default_rng(12345678)
n = 2000
m1 = rng.normal(size=n)
m2 = rng.normal(scale=0.5, size=n)
xmin = m1.min()
xmax = m1.max()
ymin = m2.min()
ymax = m2.max()
X, Y = np.mgrid[xmin:xmax:80j, ymin:ymax:80j]
self.positions = np.vstack([X.ravel(), Y.ravel()])
values = np.vstack([m1, m2])
self.kernel = stats.gaussian_kde(values)
def time_gaussian_kde_evaluate(self, length):
self.kernel(self.positions[:, :self.length])
def time_gaussian_kde_logpdf(self, length):
self.kernel.logpdf(self.positions[:, :self.length])
class GroupSampling(Benchmark):
param_names = ['dim']
params = [[3, 10, 50, 200]]
def setup(self, dim):
self.rng = np.random.default_rng(12345678)
def time_unitary_group(self, dim):
stats.unitary_group.rvs(dim, random_state=self.rng)
def time_ortho_group(self, dim):
stats.ortho_group.rvs(dim, random_state=self.rng)
def time_special_ortho_group(self, dim):
stats.special_ortho_group.rvs(dim, random_state=self.rng)
class BinnedStatisticDD(Benchmark):
params = ["count", "sum", "mean", "min", "max", "median", "std", np.std]
def setup(self, statistic):
rng = np.random.default_rng(12345678)
self.inp = rng.random(9999).reshape(3, 3333) * 200
self.subbin_x_edges = np.arange(0, 200, dtype=np.float32)
self.subbin_y_edges = np.arange(0, 200, dtype=np.float64)
self.ret = stats.binned_statistic_dd(
[self.inp[0], self.inp[1]], self.inp[2], statistic=statistic,
bins=[self.subbin_x_edges, self.subbin_y_edges])
def time_binned_statistic_dd(self, statistic):
stats.binned_statistic_dd(
[self.inp[0], self.inp[1]], self.inp[2], statistic=statistic,
bins=[self.subbin_x_edges, self.subbin_y_edges])
def time_binned_statistic_dd_reuse_bin(self, statistic):
stats.binned_statistic_dd(
[self.inp[0], self.inp[1]], self.inp[2], statistic=statistic,
binned_statistic_result=self.ret)
class ContinuousFitAnalyticalMLEOverride(Benchmark):
# list of distributions to time
dists = ["pareto", "laplace", "rayleigh", "invgauss", "gumbel_r",
"gumbel_l", "powerlaw", "lognorm"]
# add custom values for rvs and fit, if desired, for any distribution:
# key should match name in dists and value should be list of loc, scale,
# and shapes
custom_input = {}
fnames = ['floc', 'fscale', 'f0', 'f1', 'f2']
fixed = {}
param_names = ["distribution", "case", "loc_fixed", "scale_fixed",
"shape1_fixed", "shape2_fixed", "shape3_fixed"]
# in the `_distr_params.py` list, some distributions have multiple sets of
# "sane" shape combinations. `case` needs to be an enumeration of the
# maximum number of cases for a benchmarked distribution; the maximum is
# currently two. Should a benchmarked distribution have more cases in the
# `_distr_params.py` list, this will need to be increased.
params = [dists, range(2), * [[True, False]] * 5]
def setup(self, dist_name, case, loc_fixed, scale_fixed,
shape1_fixed, shape2_fixed, shape3_fixed):
self.distn = eval("stats." + dist_name)
# default `loc` and `scale` are .834 and 4.342, and shapes are from
# `_distr_params.py`. If there are multiple cases of valid shapes in
# `distcont`, they are benchmarked separately.
default_shapes_n = [s[1] for s in distcont if s[0] == dist_name]
if case >= len(default_shapes_n):
raise NotImplementedError("no alternate case for this dist")
default_shapes = default_shapes_n[case]
param_values = self.custom_input.get(dist_name, [*default_shapes,
.834, 4.342])
# separate relevant and non-relevant parameters for this distribution
# based on the number of shapes
nparam = len(param_values)
all_parameters = [loc_fixed, scale_fixed, shape1_fixed, shape2_fixed,
shape3_fixed]
relevant_parameters = all_parameters[:nparam]
nonrelevant_parameters = all_parameters[nparam:]
# skip if all parameters are fixed or if non relevant parameters are
# not all false
if True in nonrelevant_parameters or False not in relevant_parameters:
raise NotImplementedError("skip non-relevant case")
# add fixed values if fixed in relevant_parameters to self.fixed
# with keys from self.fnames and values in the same order as `fnames`.
fixed_vales = self.custom_input.get(dist_name, [.834, 4.342,
*default_shapes])
self.fixed = dict(zip(compress(self.fnames, relevant_parameters),
compress(fixed_vales, relevant_parameters)))
self.param_values = param_values
# shapes need to come before loc and scale
self.data = self.distn.rvs(*param_values[2:], *param_values[:2],
size=1000,
random_state=np.random.default_rng(4653465))
def time_fit(self, dist_name, case, loc_fixed, scale_fixed,
shape1_fixed, shape2_fixed, shape3_fixed):
self.distn.fit(self.data, **self.fixed)
class BenchMoment(Benchmark):
params = [
[1, 2, 3, 8],
[100, 1000, 10000],
]
param_names = ["order", "size"]
def setup(self, order, size):
np.random.random(1234)
self.x = np.random.random(size)
def time_moment(self, order, size):
stats.moment(self.x, order)
class BenchSkewKurtosis(Benchmark):
params = [
[1, 2, 3, 8],
[100, 1000, 10000],
[False, True]
]
param_names = ["order", "size", "bias"]
def setup(self, order, size, bias):
np.random.random(1234)
self.x = np.random.random(size)
def time_skew(self, order, size, bias):
stats.skew(self.x, bias=bias)
def time_kurtosis(self, order, size, bias):
stats.kurtosis(self.x, bias=bias)
class BenchQMCDiscrepancy(Benchmark):
param_names = ['method']
params = [
["CD", "WD", "MD", "L2-star",]
]
def setup(self, method):
rng = np.random.default_rng(1234)
sample = rng.random((1000, 10))
self.sample = sample
def time_discrepancy(self, method):
disc = stats.qmc.discrepancy(self.sample, method=method)
class BenchQMCHalton(Benchmark):
param_names = ['d', 'scramble', 'n', 'workers']
params = [
[1, 10],
[True, False],
[10, 1_000, 100_000],
[1, 4]
]
def setup(self, d, scramble, n, workers):
self.rng = np.random.default_rng(1234)
def time_halton(self, d, scramble, n, workers):
seq = stats.qmc.Halton(d, scramble=scramble, seed=self.rng)
seq.random(n, workers=workers)
class BenchQMCSobol(Benchmark):
param_names = ['d', 'base2']
params = [
[1, 50, 100],
[3, 10, 11, 12],
]
def setup(self, d, base2):
self.rng = np.random.default_rng(168525179735951991038384544)
stats.qmc.Sobol(1, bits=32) # make it load direction numbers
def time_sobol(self, d, base2):
# scrambling is happening at init only, not worth checking
seq = stats.qmc.Sobol(d, scramble=False, bits=32, seed=self.rng)
seq.random_base2(base2)
class DistanceFunctions(Benchmark):
param_names = ['n_size']
params = [
[10, 4000]
]
def setup(self, n_size):
rng = np.random.default_rng(12345678)
self.u_values = rng.random(n_size) * 10
self.u_weights = rng.random(n_size) * 10
self.v_values = rng.random(n_size // 2) * 10
self.v_weights = rng.random(n_size // 2) * 10
def time_energy_distance(self, n_size):
distance = stats.energy_distance(
self.u_values, self.v_values,
self.u_weights, self.v_weights)
def time_wasserstein_distance(self, n_size):
distance = stats.wasserstein_distance(
self.u_values, self.v_values,
self.u_weights, self.v_weights)
class Somersd(Benchmark):
param_names = ['n_size']
params = [
[10, 100]
]
def setup(self, n_size):
rng = np.random.default_rng(12345678)
self.x = rng.choice(n_size, size=n_size)
self.y = rng.choice(n_size, size=n_size)
def time_somersd(self, n_size):
res = stats.somersd(self.x, self.y)
class KolmogorovSmirnov(Benchmark):
param_names = ['alternative', 'mode', 'size']
# No auto since it defaults to exact for 20 samples
params = [
['two-sided', 'less', 'greater'],
['exact', 'approx', 'asymp'],
[19, 20, 21]
]
def setup(self, alternative, mode, size):
np.random.seed(12345678)
a = stats.norm.rvs(size=20)
self.a = a
def time_ks(self, alternative, mode, size):
stats.kstest(self.a, 'norm', alternative=alternative,
mode=mode, N=size)
class KolmogorovSmirnovTwoSamples(Benchmark):
param_names = ['alternative', 'mode', 'size']
# No auto since it defaults to exact for 20 samples
params = [
['two-sided', 'less', 'greater'],
['exact', 'asymp'],
[(21, 20), (20, 20)]
]
def setup(self, alternative, mode, size):
np.random.seed(12345678)
a = stats.norm.rvs(size=size[0])
b = stats.norm.rvs(size=size[1])
self.a = a
self.b = b
def time_ks2(self, alternative, mode, size):
stats.ks_2samp(self.a, self.b, alternative=alternative, mode=mode)
class RandomTable(Benchmark):
param_names = ["method", "ntot", "ncell"]
params = [
["boyett", "patefield"],
[10, 100, 1000, 10000],
[4, 64, 256, 1024]
]
def setup(self, method, ntot, ncell):
self.rng = np.random.default_rng(12345678)
k = int(ncell ** 0.5)
assert k ** 2 == ncell
p = np.ones(k) / k
row = self.rng.multinomial(ntot, p)
col = self.rng.multinomial(ntot, p)
self.dist = stats.random_table(row, col)
def time_method(self, method, ntot, ncell):
self.dist.rvs(1000, method=method, random_state=self.rng)
| 26,280
| 34.371467
| 98
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/peak_finding.py
|
"""Benchmarks for peak finding related functions."""
from .common import Benchmark, safe_import
with safe_import():
from scipy.signal import find_peaks, peak_prominences, peak_widths
from scipy.datasets import electrocardiogram
class FindPeaks(Benchmark):
"""Benchmark `scipy.signal.find_peaks`.
Notes
-----
The first value of `distance` is None in which case the benchmark shows
the actual speed of the underlying maxima finding function.
"""
param_names = ['distance']
params = [[None, 8, 64, 512, 4096]]
def setup(self, distance):
self.x = electrocardiogram()
def time_find_peaks(self, distance):
find_peaks(self.x, distance=distance)
class PeakProminences(Benchmark):
"""Benchmark `scipy.signal.peak_prominences`."""
param_names = ['wlen']
params = [[None, 8, 64, 512, 4096]]
def setup(self, wlen):
self.x = electrocardiogram()
self.peaks = find_peaks(self.x)[0]
def time_peak_prominences(self, wlen):
peak_prominences(self.x, self.peaks, wlen)
class PeakWidths(Benchmark):
"""Benchmark `scipy.signal.peak_widths`."""
param_names = ['rel_height']
params = [[0, 0.25, 0.5, 0.75, 1]]
def setup(self, rel_height):
self.x = electrocardiogram()
self.peaks = find_peaks(self.x)[0]
self.prominence_data = peak_prominences(self.x, self.peaks)
def time_peak_widths(self, rel_height):
peak_widths(self.x, self.peaks, rel_height, self.prominence_data)
| 1,523
| 26.214286
| 75
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/optimize_milp.py
|
import os
import numpy as np
from numpy.testing import assert_allclose
from .common import Benchmark, safe_import
with safe_import():
from scipy.optimize import milp
with safe_import():
from scipy.optimize.tests.test_linprog import magic_square
# MIPLIB 2017 benchmarks included with permission of the authors
# The MIPLIB benchmark problem set was downloaded from https://miplib.zib.de/.
# An MPS converter (scikit-glpk) was used to load the data into Python. The
# arrays were arranged to the format required by `milp` and saved to `npz`
# format using `np.savez`.
milp_problems = ["piperout-27"]
class MilpMiplibBenchmarks(Benchmark):
params = [milp_problems]
param_names = ['problem']
def setup(self, prob):
if not hasattr(self, 'data'):
dir_path = os.path.dirname(os.path.realpath(__file__))
datafile = os.path.join(dir_path, "linprog_benchmark_files",
"milp_benchmarks.npz")
self.data = np.load(datafile, allow_pickle=True)
c, A_ub, b_ub, A_eq, b_eq, bounds, integrality = self.data[prob]
lb = [li for li, ui in bounds]
ub = [ui for li, ui in bounds]
cons = []
if A_ub is not None:
cons.append((A_ub, -np.inf, b_ub))
if A_eq is not None:
cons.append((A_eq, b_eq, b_eq))
self.c = c
self.constraints = cons
self.bounds = (lb, ub)
self.integrality = integrality
def time_milp(self, prob):
res = milp(c=self.c, constraints=self.constraints, bounds=self.bounds,
integrality=self.integrality)
assert res.success
class MilpMagicSquare(Benchmark):
params = [[3, 4, 5, 6]]
param_names = ['size']
def setup(self, n):
A_eq, b_eq, self.c, self.numbers, self.M = magic_square(n)
self.constraints = (A_eq, b_eq, b_eq)
def time_magic_square(self, n):
res = milp(c=self.c*0, constraints=self.constraints,
bounds=(0, 1), integrality=True)
assert res.status == 0
x = np.round(res.x)
s = (self.numbers.flatten() * x).reshape(n**2, n, n)
square = np.sum(s, axis=0)
assert_allclose(square.sum(axis=0), self.M)
assert_allclose(square.sum(axis=1), self.M)
assert_allclose(np.diag(square).sum(), self.M)
assert_allclose(np.diag(square[:, ::-1]).sum(), self.M)
| 2,427
| 30.947368
| 78
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/linalg_solve_toeplitz.py
|
"""Benchmark the solve_toeplitz solver (Levinson recursion)
"""
import numpy as np
from .common import Benchmark, safe_import
with safe_import():
import scipy.linalg
class SolveToeplitz(Benchmark):
params = (
('float64', 'complex128'),
(100, 300, 1000),
('toeplitz', 'generic')
)
param_names = ('dtype', 'n', 'solver')
def setup(self, dtype, n, soltype):
random = np.random.RandomState(1234)
dtype = np.dtype(dtype)
# Sample a random Toeplitz matrix representation and rhs.
c = random.randn(n)
r = random.randn(n)
y = random.randn(n)
if dtype == np.complex128:
c = c + 1j*random.rand(n)
r = r + 1j*random.rand(n)
y = y + 1j*random.rand(n)
self.c = c
self.r = r
self.y = y
self.T = scipy.linalg.toeplitz(c, r=r)
def time_solve_toeplitz(self, dtype, n, soltype):
if soltype == 'toeplitz':
scipy.linalg.solve_toeplitz((self.c, self.r), self.y)
else:
scipy.linalg.solve(self.T, self.y)
| 1,102
| 25.261905
| 65
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/sparse.py
|
"""
Simple benchmarks for the sparse module
"""
import warnings
import time
import timeit
import pickle
import numpy
import numpy as np
from numpy import ones, array, asarray, empty
from .common import Benchmark, safe_import
with safe_import():
from scipy import sparse
from scipy.sparse import (coo_matrix, dia_matrix, lil_matrix,
dok_matrix, rand, SparseEfficiencyWarning)
def random_sparse(m, n, nnz_per_row):
rows = numpy.arange(m).repeat(nnz_per_row)
cols = numpy.random.randint(0, n, size=nnz_per_row*m)
vals = numpy.random.random_sample(m*nnz_per_row)
return coo_matrix((vals, (rows, cols)), (m, n)).tocsr()
# TODO move this to a matrix gallery and add unittests
def poisson2d(N, dtype='d', format=None):
"""
Return a sparse matrix for the 2D Poisson problem
with standard 5-point finite difference stencil on a
square N-by-N grid.
"""
if N == 1:
diags = asarray([[4]], dtype=dtype)
return dia_matrix((diags, [0]), shape=(1, 1)).asformat(format)
offsets = array([0, -N, N, -1, 1])
diags = empty((5, N**2), dtype=dtype)
diags[0] = 4 # main diagonal
diags[1:] = -1 # all offdiagonals
diags[3, N-1::N] = 0 # first lower diagonal
diags[4, N::N] = 0 # first upper diagonal
return dia_matrix((diags, offsets), shape=(N**2, N**2)).asformat(format)
class Arithmetic(Benchmark):
param_names = ['format', 'XY', 'op']
params = [
['csr', 'csc', 'coo', 'dia'],
['AA', 'AB', 'BA', 'BB'],
['__add__', '__sub__', 'multiply', '__mul__']
]
def setup(self, format, XY, op):
matrices = dict(A=poisson2d(250, format=format),
B=poisson2d(250, format=format)**2)
x = matrices[XY[0]]
self.y = matrices[XY[1]]
self.fn = getattr(x, op)
self.fn(self.y) # warmup
def time_arithmetic(self, format, XY, op):
self.fn(self.y)
class Sort(Benchmark):
params = ['Rand10', 'Rand25', 'Rand50', 'Rand100', 'Rand200']
param_names = ['matrix']
def setup(self, matrix):
n = 10000
if matrix.startswith('Rand'):
k = int(matrix[4:])
self.A = random_sparse(n, n, k)
self.A.has_sorted_indices = False
self.A.indices[:2] = 2, 1
else:
raise NotImplementedError()
def time_sort(self, matrix):
"""sort CSR column indices"""
self.A.sort_indices()
class Matvec(Benchmark):
params = [
['Identity', 'Poisson5pt', 'Block2x2', 'Block3x3'],
['dia', 'csr', 'csc', 'dok', 'lil', 'coo', 'bsr']
]
param_names = ['matrix', 'format']
def setup(self, matrix, format):
if matrix == 'Identity':
if format in ('lil', 'dok'):
raise NotImplementedError()
self.A = sparse.eye(10000, 10000, format=format)
elif matrix == 'Poisson5pt':
self.A = poisson2d(300, format=format)
elif matrix == 'Block2x2':
if format not in ('csr', 'bsr'):
raise NotImplementedError()
b = (2, 2)
self.A = sparse.kron(poisson2d(150),
ones(b)).tobsr(blocksize=b).asformat(format)
elif matrix == 'Block3x3':
if format not in ('csr', 'bsr'):
raise NotImplementedError()
b = (3, 3)
self.A = sparse.kron(poisson2d(100),
ones(b)).tobsr(blocksize=b).asformat(format)
else:
raise NotImplementedError()
self.x = ones(self.A.shape[1], dtype=float)
def time_matvec(self, matrix, format):
self.A * self.x
class Matvecs(Benchmark):
params = ['dia', 'coo', 'csr', 'csc', 'bsr']
param_names = ["format"]
def setup(self, format):
self.A = poisson2d(300, format=format)
self.x = ones((self.A.shape[1], 10), dtype=self.A.dtype)
def time_matvecs(self, format):
self.A * self.x
class Matmul(Benchmark):
def setup(self):
H1, W1 = 1, 100000
H2, W2 = W1, 1000
C1 = 10
C2 = 1000000
rng = np.random.default_rng(0)
i = rng.integers(H1, size=C1)
j = rng.integers(W1, size=C1)
data = rng.random(C1)
self.matrix1 = coo_matrix((data, (i, j)), shape=(H1, W1)).tocsr()
i = rng.integers(H2, size=C2)
j = rng.integers(W2, size=C2)
data = rng.random(C2)
self.matrix2 = coo_matrix((data, (i, j)), shape=(H2, W2)).tocsr()
def time_large(self):
for i in range(100):
self.matrix1 * self.matrix2
# Retain old benchmark results (remove this if changing the benchmark)
time_large.version = "33aee08539377a7cb0fabaf0d9ff9d6d80079a428873f451b378c39f6ead48cb"
class Construction(Benchmark):
params = [
['Empty', 'Identity', 'Poisson5pt'],
['lil', 'dok']
]
param_names = ['matrix', 'format']
def setup(self, name, format):
if name == 'Empty':
self.A = coo_matrix((10000, 10000))
elif name == 'Identity':
self.A = sparse.eye(10000, format='coo')
else:
self.A = poisson2d(100, format='coo')
formats = {'lil': lil_matrix, 'dok': dok_matrix}
self.cls = formats[format]
def time_construction(self, name, format):
T = self.cls(self.A.shape)
for i, j, v in zip(self.A.row, self.A.col, self.A.data):
T[i, j] = v
class BlockDiagDenseConstruction(Benchmark):
param_names = ['num_matrices']
params = [1000, 5000, 10000, 15000, 20000]
def setup(self, num_matrices):
self.matrices = []
for i in range(num_matrices):
rows = np.random.randint(1, 4)
columns = np.random.randint(1, 4)
mat = np.random.randint(0, 10, (rows, columns))
self.matrices.append(mat)
def time_block_diag(self, num_matrices):
sparse.block_diag(self.matrices)
class BlockDiagSparseConstruction(Benchmark):
param_names = ['num_matrices']
params = [100, 500, 1000, 1500, 2000]
def setup(self, num_matrices):
self.matrices = []
for i in range(num_matrices):
rows = np.random.randint(1, 20)
columns = np.random.randint(1, 20)
mat = np.random.randint(0, 10, (rows, columns))
self.matrices.append(mat)
def time_block_diag(self, num_matrices):
sparse.block_diag(self.matrices)
class CsrHstack(Benchmark):
param_names = ['num_rows']
params = [10000, 25000, 50000, 100000, 250000]
def setup(self, num_rows):
num_cols = int(1e5)
density = 2e-3
nnz_per_row = int(density*num_cols)
self.mat = random_sparse(num_rows, num_cols, nnz_per_row)
def time_csr_hstack(self, num_rows):
sparse.hstack([self.mat, self.mat])
class Conversion(Benchmark):
params = [
['csr', 'csc', 'coo', 'dia', 'lil', 'dok', 'bsr'],
['csr', 'csc', 'coo', 'dia', 'lil', 'dok', 'bsr'],
]
param_names = ['from_format', 'to_format']
def setup(self, fromfmt, tofmt):
base = poisson2d(100, format=fromfmt)
try:
self.fn = getattr(base, 'to' + tofmt)
except Exception:
def fn():
raise RuntimeError()
self.fn = fn
def time_conversion(self, fromfmt, tofmt):
self.fn()
class Getset(Benchmark):
params = [
[1, 10, 100, 1000, 10000],
['different', 'same'],
['csr', 'csc', 'lil', 'dok']
]
param_names = ['N', 'sparsity pattern', 'format']
unit = "seconds"
def setup(self, N, sparsity_pattern, format):
if format == 'dok' and N > 500:
raise NotImplementedError()
self.A = rand(1000, 1000, density=1e-5)
A = self.A
N = int(N)
# indices to assign to
i, j = [], []
while len(i) < N:
n = N - len(i)
ip = numpy.random.randint(0, A.shape[0], size=n)
jp = numpy.random.randint(0, A.shape[1], size=n)
i = numpy.r_[i, ip]
j = numpy.r_[j, jp]
v = numpy.random.rand(n)
if N == 1:
i = int(i)
j = int(j)
v = float(v)
base = A.asformat(format)
self.m = base.copy()
self.i = i
self.j = j
self.v = v
def _timeit(self, kernel, recopy):
min_time = 1e99
if not recopy:
kernel(self.m, self.i, self.j, self.v)
number = 1
start = time.time()
while time.time() - start < 0.1:
if recopy:
m = self.m.copy()
else:
m = self.m
while True:
duration = timeit.timeit(
lambda: kernel(m, self.i, self.j, self.v), number=number)
if duration > 1e-5:
break
else:
number *= 10
min_time = min(min_time, duration/number)
return min_time
def track_fancy_setitem(self, N, sparsity_pattern, format):
def kernel(A, i, j, v):
A[i, j] = v
with warnings.catch_warnings():
warnings.simplefilter('ignore', SparseEfficiencyWarning)
return self._timeit(kernel, sparsity_pattern == 'different')
def time_fancy_getitem(self, N, sparsity_pattern, format):
self.m[self.i, self.j]
class NullSlice(Benchmark):
params = [[0.05, 0.01], ['csr', 'csc', 'lil']]
param_names = ['density', 'format']
def _setup(self, density, format):
n = 100000
k = 1000
# faster version of rand(n, k, format=format, density=density),
# with non-exact nnz
nz = int(n*k * density)
row = np.random.randint(0, n, size=nz)
col = np.random.randint(0, k, size=nz)
data = np.ones(nz, dtype=np.float64)
X = coo_matrix((data, (row, col)), shape=(n, k))
X.sum_duplicates()
X = X.asformat(format)
with open('{}-{}.pck'.format(density, format), 'wb') as f:
pickle.dump(X, f, protocol=pickle.HIGHEST_PROTOCOL)
def setup_cache(self):
for density in self.params[0]:
for fmt in self.params[1]:
self._setup(density, fmt)
setup_cache.timeout = 120
def setup(self, density, format):
# Unpickling is faster than computing the random matrix...
with open('{}-{}.pck'.format(density, format), 'rb') as f:
self.X = pickle.load(f)
def time_getrow(self, density, format):
self.X.getrow(100)
def time_getcol(self, density, format):
self.X.getcol(100)
def time_3_rows(self, density, format):
self.X[[0, 100, 105], :]
def time_10000_rows(self, density, format):
self.X[np.arange(10000), :]
def time_3_cols(self, density, format):
self.X[:, [0, 100, 105]]
def time_100_cols(self, density, format):
self.X[:, np.arange(100)]
# Retain old benchmark results (remove this if changing the benchmark)
time_10000_rows.version = "dc19210b894d5fd41d4563f85b7459ef5836cddaf77154b539df3ea91c5d5c1c"
time_100_cols.version = "8d43ed52084cdab150018eedb289a749a39f35d4dfa31f53280f1ef286a23046"
time_3_cols.version = "93e5123910772d62b3f72abff56c2732f83d217221bce409b70e77b89c311d26"
time_3_rows.version = "a9eac80863a0b2f4b510269955041930e5fdd15607238257eb78244f891ebfe6"
time_getcol.version = "291388763b355f0f3935db9272a29965d14fa3f305d3306059381e15300e638b"
time_getrow.version = "edb9e4291560d6ba8dd58ef371b3a343a333bc10744496adb3ff964762d33c68"
class Diagonal(Benchmark):
params = [[0.01, 0.1, 0.5], ['csr', 'csc', 'coo', 'lil', 'dok', 'dia']]
param_names = ['density', 'format']
def setup(self, density, format):
n = 1000
if format == 'dok' and n * density >= 500:
raise NotImplementedError()
warnings.simplefilter('ignore', SparseEfficiencyWarning)
self.X = sparse.rand(n, n, format=format, density=density)
def time_diagonal(self, density, format):
self.X.diagonal()
# Retain old benchmark results (remove this if changing the benchmark)
time_diagonal.version = "d84f53fdc6abc208136c8ce48ca156370f6803562f6908eb6bd1424f50310cf1"
class Sum(Benchmark):
params = [[0.01, 0.1, 0.5], ['csr', 'csc', 'coo', 'lil', 'dok', 'dia']]
param_names = ['density', 'format']
def setup(self, density, format):
n = 1000
if format == 'dok' and n * density >= 500:
raise NotImplementedError()
warnings.simplefilter('ignore', SparseEfficiencyWarning)
self.X = sparse.rand(n, n, format=format, density=density)
def time_sum(self, density, format):
self.X.sum()
def time_sum_axis0(self, density, format):
self.X.sum(axis=0)
def time_sum_axis1(self, density, format):
self.X.sum(axis=1)
# Retain old benchmark results (remove this if changing the benchmark)
time_sum.version = "05c305857e771024535e546360203b17f5aca2b39b023a49ab296bd746d6cdd3"
time_sum_axis0.version = "8aca682fd69aa140c69c028679826bdf43c717589b1961b4702d744ed72effc6"
time_sum_axis1.version = "1a6e05244b77f857c61f8ee09ca3abd006a10ba07eff10b1c5f9e0ac20f331b2"
class Iteration(Benchmark):
params = [[0.05, 0.01], ['csr', 'csc', 'lil']]
param_names = ['density', 'format']
def setup(self, density, format):
n = 500
k = 1000
self.X = sparse.rand(n, k, format=format, density=density)
def time_iteration(self, density, format):
for row in self.X:
pass
class Densify(Benchmark):
params = [
['dia', 'csr', 'csc', 'dok', 'lil', 'coo', 'bsr'],
['C', 'F'],
]
param_names = ['format', 'order']
def setup(self, format, order):
warnings.simplefilter('ignore', SparseEfficiencyWarning)
self.X = sparse.rand(1000, 1000, format=format, density=0.01)
def time_toarray(self, format, order):
self.X.toarray(order=order)
# Retain old benchmark results (remove this if changing the benchmark)
time_toarray.version = "2fbf492ec800b982946a62785beda803460b913cc80080043a5d407025893b2b"
class Random(Benchmark):
params = [
np.arange(0, 1.1, 0.1).tolist()
]
param_names = ['density']
def setup(self, density):
warnings.simplefilter('ignore', SparseEfficiencyWarning)
self.nrows = 1000
self.ncols = 1000
self.format = 'csr'
def time_rand(self, density):
sparse.rand(self.nrows, self.ncols,
format=self.format, density=density)
| 14,747
| 29.471074
| 96
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/sparse_linalg_onenormest.py
|
"""Compare the speed of exact one-norm calculation vs. its estimation.
"""
import numpy as np
from .common import Benchmark, safe_import
with safe_import():
import scipy.sparse
import scipy.special # import cycle workaround for some versions
import scipy.sparse.linalg
class BenchmarkOneNormEst(Benchmark):
params = [
[2, 3, 5, 10, 30, 100, 300, 500, 1000, 1e4, 1e5, 1e6],
['exact', 'onenormest']
]
param_names = ['n', 'solver']
def setup(self, n, solver):
rng = np.random.default_rng(1234)
nrepeats = 100
shape = (int(n), int(n))
if solver == 'exact' and n >= 300:
# skip: slow, and not useful to benchmark
raise NotImplementedError()
if n <= 1000:
# Sample the matrices.
self.matrices = []
for i in range(nrepeats):
M = rng.standard_normal(shape)
self.matrices.append(M)
else:
max_nnz = 100000
nrepeats = 1
self.matrices = []
for i in range(nrepeats):
M = scipy.sparse.rand(shape[0], shape[1], min(max_nnz/(shape[0]*shape[1]), 1e-5), random_state=rng)
self.matrices.append(M)
def time_onenormest(self, n, solver):
if solver == 'exact':
# Get the exact values of one-norms of squares.
for M in self.matrices:
M.dot(M)
scipy.sparse.linalg._matfuncs._onenorm(M)
elif solver == 'onenormest':
# Get the estimates of one-norms of squares.
for M in self.matrices:
scipy.sparse.linalg._matfuncs._onenormest_matrix_power(M, 2)
# Retain old benchmark results (remove this if changing the benchmark)
time_onenormest.version = "f7b31b4bf5caa50d435465e78dab6e133f3c263a52c4523eec785446185fdb6f"
| 1,883
| 32.052632
| 115
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/sparse_linalg_solve.py
|
"""
Check the speed of the conjugate gradient solver.
"""
import numpy as np
from numpy.testing import assert_equal
from .common import Benchmark, safe_import
with safe_import():
from scipy import linalg, sparse
from scipy.sparse.linalg import cg, minres, gmres, tfqmr, spsolve
with safe_import():
from scipy.sparse.linalg import lgmres
with safe_import():
from scipy.sparse.linalg import gcrotmk
def _create_sparse_poisson1d(n):
# Make Gilbert Strang's favorite matrix
# http://www-math.mit.edu/~gs/PIX/cupcakematrix.jpg
P1d = sparse.diags([[-1]*(n-1), [2]*n, [-1]*(n-1)], [-1, 0, 1])
assert_equal(P1d.shape, (n, n))
return P1d
def _create_sparse_poisson2d(n):
P1d = _create_sparse_poisson1d(n)
P2d = sparse.kronsum(P1d, P1d)
assert_equal(P2d.shape, (n*n, n*n))
return P2d.tocsr()
class Bench(Benchmark):
params = [
[4, 6, 10, 16, 25, 40, 64, 100],
['dense', 'spsolve', 'cg', 'minres', 'gmres', 'lgmres', 'gcrotmk',
'tfqmr']
]
mapping = {'spsolve': spsolve, 'cg': cg, 'minres': minres, 'gmres': gmres,
'lgmres': lgmres, 'gcrotmk': gcrotmk, 'tfqmr': tfqmr}
param_names = ['(n,n)', 'solver']
def setup(self, n, solver):
if solver == 'dense' and n >= 25:
raise NotImplementedError()
self.b = np.ones(n*n)
self.P_sparse = _create_sparse_poisson2d(n)
if solver == 'dense':
self.P_dense = self.P_sparse.A
def time_solve(self, n, solver):
if solver == 'dense':
linalg.solve(self.P_dense, self.b)
else:
self.mapping[solver](self.P_sparse, self.b)
class Lgmres(Benchmark):
params = [
[10, 50, 100, 1000, 10000],
[10, 30, 60, 90, 180],
]
param_names = ['n', 'm']
def setup(self, n, m):
rng = np.random.default_rng(1234)
self.A = sparse.eye(n, n) + sparse.rand(n, n, density=0.01, random_state=rng)
self.b = np.ones(n)
def time_inner(self, n, m):
lgmres(self.A, self.b, inner_m=m, maxiter=1)
| 2,078
| 27.094595
| 85
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/sparse_csgraph_maxflow.py
|
import numpy as np
import scipy.sparse
from .common import Benchmark, safe_import
with safe_import():
from scipy.sparse.csgraph import maximum_flow
class MaximumFlow(Benchmark):
params = [[200, 500, 1500], [0.1, 0.3, 0.5]]
param_names = ['n', 'density']
def setup(self, n, density):
# Create random matrices whose values are integers between 0 and 100.
data = (scipy.sparse.rand(n, n, density=density, format='lil',
random_state=42)*100).astype(np.int32)
data.setdiag(np.zeros(n, dtype=np.int32))
self.data = scipy.sparse.csr_matrix(data)
def time_maximum_flow(self, n, density):
maximum_flow(self.data, 0, n - 1)
| 714
| 30.086957
| 77
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/signal.py
|
from itertools import product
import numpy as np
from .common import Benchmark, safe_import
with safe_import():
import scipy.signal as signal
class Resample(Benchmark):
# Some slow (prime), some fast (in radix)
param_names = ['N', 'num']
params = [[977, 9973, 2 ** 14, 2 ** 16]] * 2
def setup(self, N, num):
x = np.linspace(0, 10, N, endpoint=False)
self.y = np.cos(-x**2/6.0)
def time_complex(self, N, num):
signal.resample(self.y + 0j, num)
def time_real(self, N, num):
signal.resample(self.y, num)
class CalculateWindowedFFT(Benchmark):
def setup(self):
rng = np.random.default_rng(5678)
# Create some long arrays for computation
x = rng.standard_normal(2**20)
y = rng.standard_normal(2**20)
self.x = x
self.y = y
def time_welch(self):
signal.welch(self.x)
def time_csd(self):
signal.csd(self.x, self.y)
def time_periodogram(self):
signal.periodogram(self.x)
def time_spectrogram(self):
signal.spectrogram(self.x)
def time_coherence(self):
signal.coherence(self.x, self.y)
class Convolve2D(Benchmark):
param_names = ['mode', 'boundary']
params = [
['full', 'valid', 'same'],
['fill', 'wrap', 'symm']
]
def setup(self, mode, boundary):
rng = np.random.default_rng(1234)
# sample a bunch of pairs of 2d arrays
pairs = []
for ma, na, mb, nb in product((8, 13, 30, 36), repeat=4):
a = rng.standard_normal((ma, na))
b = rng.standard_normal((mb, nb))
pairs.append((a, b))
self.pairs = pairs
def time_convolve2d(self, mode, boundary):
for a, b in self.pairs:
if mode == 'valid':
if b.shape[0] > a.shape[0] or b.shape[1] > a.shape[1]:
continue
signal.convolve2d(a, b, mode=mode, boundary=boundary)
def time_correlate2d(self, mode, boundary):
for a, b in self.pairs:
if mode == 'valid':
if b.shape[0] > a.shape[0] or b.shape[1] > a.shape[1]:
continue
signal.correlate2d(a, b, mode=mode, boundary=boundary)
class FFTConvolve(Benchmark):
param_names = ['mode', 'size']
params = [
['full', 'valid', 'same'],
[(a,b) for a,b in product((1, 2, 8, 36, 60, 150, 200, 500), repeat=2)
if b <= a]
]
def setup(self, mode, size):
rng = np.random.default_rng(1234)
self.a = rng.standard_normal(size[0])
self.b = rng.standard_normal(size[1])
def time_convolve2d(self, mode, size):
signal.fftconvolve(self.a, self.b, mode=mode)
class OAConvolve(Benchmark):
param_names = ['mode', 'size']
params = [
['full', 'valid', 'same'],
[(a, b) for a, b in product((40, 200, 3000), repeat=2)
if b < a]
]
def setup(self, mode, size):
rng = np.random.default_rng(1234)
self.a = rng.standard_normal(size[0])
self.b = rng.standard_normal(size[1])
def time_convolve2d(self, mode, size):
signal.oaconvolve(self.a, self.b, mode=mode)
class Convolve(Benchmark):
param_names = ['mode']
params = [
['full', 'valid', 'same']
]
def setup(self, mode):
rng = np.random.default_rng(1234)
# sample a bunch of pairs of 2d arrays
pairs = {'1d': [], '2d': []}
for ma, nb in product((1, 2, 8, 13, 30, 36, 50, 75), repeat=2):
a = rng.standard_normal(ma)
b = rng.standard_normal(nb)
pairs['1d'].append((a, b))
for n_image in [256, 512, 1024]:
for n_kernel in [3, 5, 7]:
x = rng.standard_normal((n_image, n_image))
h = rng.standard_normal((n_kernel, n_kernel))
pairs['2d'].append((x, h))
self.pairs = pairs
def time_convolve(self, mode):
for a, b in self.pairs['1d']:
if b.shape[0] > a.shape[0]:
continue
signal.convolve(a, b, mode=mode)
def time_convolve2d(self, mode):
for a, b in self.pairs['2d']:
if mode == 'valid':
if b.shape[0] > a.shape[0] or b.shape[1] > a.shape[1]:
continue
signal.convolve(a, b, mode=mode)
def time_correlate(self, mode):
for a, b in self.pairs['1d']:
if b.shape[0] > a.shape[0]:
continue
signal.correlate(a, b, mode=mode)
def time_correlate2d(self, mode):
for a, b in self.pairs['2d']:
if mode == 'valid':
if b.shape[0] > a.shape[0] or b.shape[1] > a.shape[1]:
continue
signal.correlate(a, b, mode=mode)
class LTI(Benchmark):
def setup(self):
self.system = signal.lti(1.0, [1, 0, 1])
self.t = np.arange(0, 100, 0.5)
self.u = np.sin(2 * self.t)
def time_lsim(self):
signal.lsim(self.system, self.u, self.t)
def time_lsim2(self):
signal.lsim2(self.system, self.u, self.t)
def time_step(self):
signal.step(self.system, T=self.t)
def time_impulse(self):
signal.impulse(self.system, T=self.t)
def time_bode(self):
signal.bode(self.system)
class Upfirdn1D(Benchmark):
param_names = ['up', 'down']
params = [
[1, 4],
[1, 4]
]
def setup(self, up, down):
rng = np.random.default_rng(1234)
# sample a bunch of pairs of 2d arrays
pairs = []
for nfilt in [8, ]:
for n in [32, 128, 512, 2048]:
h = rng.standard_normal(nfilt)
x = rng.standard_normal(n)
pairs.append((h, x))
self.pairs = pairs
def time_upfirdn1d(self, up, down):
for h, x in self.pairs:
signal.upfirdn(h, x, up=up, down=down)
class Upfirdn2D(Benchmark):
param_names = ['up', 'down', 'axis']
params = [
[1, 4],
[1, 4],
[0, -1],
]
def setup(self, up, down, axis):
rng = np.random.default_rng(1234)
# sample a bunch of pairs of 2d arrays
pairs = []
for nfilt in [8, ]:
for n in [32, 128, 512]:
h = rng.standard_normal(nfilt)
x = rng.standard_normal((n, n))
pairs.append((h, x))
self.pairs = pairs
def time_upfirdn2d(self, up, down, axis):
for h, x in self.pairs:
signal.upfirdn(h, x, up=up, down=down, axis=axis)
class FIRLS(Benchmark):
param_names = ['n', 'edges']
params = [
[21, 101, 1001, 2001],
[(0.1, 0.9), (0.01, 0.99)],
]
def time_firls(self, n, edges):
signal.firls(n, (0,) + edges + (1,), [1, 1, 0, 0])
| 6,839
| 26.46988
| 77
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/cluster_hierarchy_disjoint_set.py
|
import numpy as np
try:
from scipy.cluster.hierarchy import DisjointSet
except ImportError:
pass
from .common import Benchmark
class Bench(Benchmark):
params = [[100, 1000, 10000]]
param_names = ['n']
def setup(self, n):
# Create random edges
rng = np.random.RandomState(seed=0)
self.edges = rng.randint(0, 10 * n, (n, 2))
self.nodes = np.unique(self.edges)
self.disjoint_set = DisjointSet(self.nodes)
self.pre_merged = DisjointSet(self.nodes)
for a, b in self.edges:
self.pre_merged.merge(a, b)
self.pre_merged_found = DisjointSet(self.nodes)
for a, b in self.edges:
self.pre_merged_found.merge(a, b)
for x in self.nodes:
self.pre_merged_found[x]
def time_merge(self, n):
dis = self.disjoint_set
for a, b in self.edges:
dis.merge(a, b)
def time_merge_already_merged(self, n):
dis = self.pre_merged
for a, b in self.edges:
dis.merge(a, b)
def time_find(self, n):
dis = self.pre_merged
return [dis[i] for i in self.nodes]
def time_find_already_found(self, n):
dis = self.pre_merged_found
return [dis[i] for i in self.nodes]
def time_contains(self, n):
assert self.nodes[0] in self.pre_merged
assert self.nodes[n // 2] in self.pre_merged
assert self.nodes[-1] in self.pre_merged
def time_absence(self, n):
# Test for absence
assert None not in self.pre_merged
assert "dummy" not in self.pre_merged
assert (1, 2, 3) not in self.pre_merged
| 1,653
| 26.566667
| 55
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/signal_filtering.py
|
import numpy as np
import timeit
from concurrent.futures import ThreadPoolExecutor, wait
from .common import Benchmark, safe_import
with safe_import():
from scipy.signal import (lfilter, firwin, decimate, butter, sosfilt,
medfilt2d)
class Decimate(Benchmark):
param_names = ['q', 'ftype', 'zero_phase']
params = [
[2, 10, 30],
['iir', 'fir'],
[True, False]
]
def setup(self, q, ftype, zero_phase):
np.random.seed(123456)
sample_rate = 10000.
t = np.arange(int(1e6), dtype=np.float64) / sample_rate
self.sig = np.sin(2*np.pi*500*t) + 0.3 * np.sin(2*np.pi*4e3*t)
def time_decimate(self, q, ftype, zero_phase):
decimate(self.sig, q, ftype=ftype, zero_phase=zero_phase)
class Lfilter(Benchmark):
param_names = ['n_samples', 'numtaps']
params = [
[1e3, 50e3, 1e6],
[9, 23, 51]
]
def setup(self, n_samples, numtaps):
np.random.seed(125678)
sample_rate = 25000.
t = np.arange(n_samples, dtype=np.float64) / sample_rate
nyq_rate = sample_rate / 2.
cutoff_hz = 3000.0
self.sig = np.sin(2*np.pi*500*t) + 0.3 * np.sin(2*np.pi*11e3*t)
self.coeff = firwin(numtaps, cutoff_hz/nyq_rate)
def time_lfilter(self, n_samples, numtaps):
lfilter(self.coeff, 1.0, self.sig)
class ParallelSosfilt(Benchmark):
timeout = 100
timer = timeit.default_timer
param_names = ['n_samples', 'threads']
params = [
[1e3, 10e3],
[1, 2, 4]
]
def setup(self, n_samples, threads):
self.filt = butter(8, 8e-6, "lowpass", output="sos")
self.data = np.arange(int(n_samples) * 3000).reshape(int(n_samples), 3000)
self.chunks = np.array_split(self.data, threads)
def time_sosfilt(self, n_samples, threads):
with ThreadPoolExecutor(max_workers=threads) as pool:
futures = []
for i in range(threads):
futures.append(pool.submit(sosfilt, self.filt, self.chunks[i]))
wait(futures)
class Sosfilt(Benchmark):
param_names = ['n_samples', 'order']
params = [
[1000, 1000000],
[6, 20]
]
def setup(self, n_samples, order):
self.sos = butter(order, [0.1575, 0.1625], 'band', output='sos')
self.y = np.random.RandomState(0).randn(n_samples)
def time_sosfilt_basic(self, n_samples, order):
sosfilt(self.sos, self.y)
class MedFilt2D(Benchmark):
param_names = ['threads']
params = [[1, 2, 4]]
def setup(self, threads):
rng = np.random.default_rng(8176)
self.chunks = np.array_split(rng.standard_normal((250, 349)), threads)
def _medfilt2d(self, threads):
with ThreadPoolExecutor(max_workers=threads) as pool:
wait({pool.submit(medfilt2d, chunk, 5) for chunk in self.chunks})
def time_medfilt2d(self, threads):
self._medfilt2d(threads)
def peakmem_medfilt2d(self, threads):
self._medfilt2d(threads)
| 3,044
| 28
| 82
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/sparse_linalg_lobpcg.py
|
from functools import partial
import numpy as np
from .common import Benchmark, safe_import
with safe_import():
from scipy import array, r_, ones, arange, sort, diag, cos, rand, pi
from scipy.linalg import eigh, orth, cho_factor, cho_solve
import scipy.sparse
from scipy.sparse.linalg import lobpcg
from scipy.sparse.linalg._interface import LinearOperator
def _sakurai(n):
""" Example taken from
T. Sakurai, H. Tadano, Y. Inadomi and U. Nagashima
A moment-based method for large-scale generalized eigenvalue problems
Appl. Num. Anal. Comp. Math. Vol. 1 No. 2 (2004) """
A = scipy.sparse.eye(n, n)
d0 = array(r_[5, 6*ones(n-2), 5])
d1 = -4*ones(n)
d2 = ones(n)
B = scipy.sparse.spdiags([d2, d1, d0, d1, d2], [-2, -1, 0, 1, 2], n, n)
k = arange(1, n+1)
w_ex = sort(1. / (16.*pow(cos(0.5*k*pi/(n+1)), 4))) # exact eigenvalues
return A, B, w_ex
def _mikota_pair(n):
# Mikota pair acts as a nice test since the eigenvalues
# are the squares of the integers n, n=1,2,...
x = arange(1, n + 1)
B = diag(1. / x)
y = arange(n - 1, 0, -1)
z = arange(2 * n - 1, 0, -2)
A = diag(z) - diag(y, -1) - diag(y, 1)
return A.astype(float), B.astype(float)
def _as2d(ar):
if ar.ndim == 2:
return ar
else: # Assume 1!
aux = np.array(ar, copy=False)
aux.shape = (ar.shape[0], 1)
return aux
def _precond(LorU, lower, x):
y = cho_solve((LorU, lower), x)
return _as2d(y)
class Bench(Benchmark):
params = [
[],
['lobpcg', 'eigh']
]
param_names = ['n', 'solver']
def __init__(self):
self.time_mikota.__func__.params = list(self.params)
self.time_mikota.__func__.params[0] = [128, 256, 512, 1024, 2048]
self.time_mikota.__func__.setup = self.setup_mikota
self.time_sakurai.__func__.params = list(self.params)
self.time_sakurai.__func__.params[0] = [50, 400]
self.time_sakurai.__func__.setup = self.setup_sakurai
def setup_mikota(self, n, solver):
self.shape = (n, n)
self.A, self.B = _mikota_pair(n)
if solver == 'eigh' and n >= 512:
# skip: slow, and not useful to benchmark
raise NotImplementedError()
def setup_sakurai(self, n, solver):
self.shape = (n, n)
self.A, self.B, all_eigenvalues = _sakurai(n)
self.A_dense = self.A.A
self.B_dense = self.B.A
def time_mikota(self, n, solver):
m = 10
if solver == 'lobpcg':
X = rand(n, m)
X = orth(X)
LorU, lower = cho_factor(self.A, lower=0, overwrite_a=0)
M = LinearOperator(self.shape,
matvec=partial(_precond, LorU, lower),
matmat=partial(_precond, LorU, lower))
eigs, vecs = lobpcg(self.A, X, self.B, M, tol=1e-4, maxiter=40)
else:
eigh(self.A, self.B, eigvals_only=True, eigvals=(0, m - 1))
def time_sakurai(self, n, solver):
m = 3
if solver == 'lobpcg':
X = rand(n, m)
eigs, vecs, resnh = lobpcg(self.A, X, self.B, tol=1e-6, maxiter=500,
retResidualNormsHistory=1)
else:
eigh(self.A_dense, self.B_dense, eigvals_only=True, eigvals=(0, m - 1))
# Retain old benchmark results (remove this if changing the benchmark)
time_mikota.version = "a1fb679758f7e5cf79d18cc4930afdff999fccc142fe7a4f63e73b39ab1f58bb"
time_sakurai.version = "7c38d449924fb71f777bd408072ecc883b8b05e53a6544e97da3887fbc10b235"
| 3,646
| 31.5625
| 93
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/integrate.py
|
import numpy as np
from .common import Benchmark, safe_import
from scipy.integrate import quad
with safe_import():
import ctypes
import scipy.integrate._test_multivariate as clib_test
from scipy._lib import _ccallback_c
with safe_import() as exc:
from scipy import LowLevelCallable
from_cython = LowLevelCallable.from_cython
if exc.error:
LowLevelCallable = lambda func, data: (func, data)
from_cython = lambda *a: a
with safe_import() as exc:
import cffi
if exc.error:
cffi = None # noqa: F811
with safe_import():
from scipy.integrate import solve_bvp
class SolveBVP(Benchmark):
TOL = 1e-5
def fun_flow(self, x, y, p):
A = p[0]
return np.vstack((
y[1], y[2], 100 * (y[1] ** 2 - y[0] * y[2] - A),
y[4], -100 * y[0] * y[4] - 1, y[6], -70 * y[0] * y[6]
))
def bc_flow(self, ya, yb, p):
return np.array([
ya[0], ya[1], yb[0] - 1, yb[1], ya[3], yb[3], ya[5], yb[5] - 1])
def time_flow(self):
x = np.linspace(0, 1, 10)
y = np.ones((7, x.size))
solve_bvp(self.fun_flow, self.bc_flow, x, y, p=[1], tol=self.TOL)
def fun_peak(self, x, y):
eps = 1e-3
return np.vstack((
y[1],
-(4 * x * y[1] + 2 * y[0]) / (eps + x**2)
))
def bc_peak(self, ya, yb):
eps = 1e-3
v = (1 + eps) ** -1
return np.array([ya[0] - v, yb[0] - v])
def time_peak(self):
x = np.linspace(-1, 1, 5)
y = np.zeros((2, x.size))
solve_bvp(self.fun_peak, self.bc_peak, x, y, tol=self.TOL)
def fun_gas(self, x, y):
alpha = 0.8
return np.vstack((
y[1],
-2 * x * y[1] * (1 - alpha * y[0]) ** -0.5
))
def bc_gas(self, ya, yb):
return np.array([ya[0] - 1, yb[0]])
def time_gas(self):
x = np.linspace(0, 3, 5)
y = np.empty((2, x.size))
y[0] = 0.5
y[1] = -0.5
solve_bvp(self.fun_gas, self.bc_gas, x, y, tol=self.TOL)
class Quad(Benchmark):
def setup(self):
from math import sin
self.f_python = lambda x: sin(x)
self.f_cython = from_cython(_ccallback_c, "sine")
try:
from scipy.integrate.tests.test_quadpack import get_clib_test_routine
self.f_ctypes = get_clib_test_routine('_multivariate_sin', ctypes.c_double,
ctypes.c_int, ctypes.c_double)
except ImportError:
lib = ctypes.CDLL(clib_test.__file__)
self.f_ctypes = lib._multivariate_sin
self.f_ctypes.restype = ctypes.c_double
self.f_ctypes.argtypes = (ctypes.c_int, ctypes.c_double)
if cffi is not None:
voidp = ctypes.cast(self.f_ctypes, ctypes.c_void_p)
address = voidp.value
ffi = cffi.FFI()
self.f_cffi = LowLevelCallable(ffi.cast("double (*)(int, double *)", address))
def time_quad_python(self):
quad(self.f_python, 0, np.pi)
def time_quad_cython(self):
quad(self.f_cython, 0, np.pi)
def time_quad_ctypes(self):
quad(self.f_ctypes, 0, np.pi)
def time_quad_cffi(self):
quad(self.f_cffi, 0, np.pi)
| 3,261
| 27.365217
| 90
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/linalg_sqrtm.py
|
""" Benchmark linalg.sqrtm for various blocksizes.
"""
import numpy as np
from .common import Benchmark, safe_import
with safe_import():
import scipy.linalg
class Sqrtm(Benchmark):
params = [
['float64', 'complex128'],
[64, 256],
[32, 64, 256]
]
param_names = ['dtype', 'n', 'blocksize']
def setup(self, dtype, n, blocksize):
n = int(n)
dtype = np.dtype(dtype)
blocksize = int(blocksize)
A = np.random.rand(n, n)
if dtype == np.complex128:
A = A + 1j*np.random.rand(n, n)
self.A = A
if blocksize > n:
raise NotImplementedError()
def time_sqrtm(self, dtype, n, blocksize):
scipy.linalg.sqrtm(self.A, disp=False, blocksize=blocksize)
| 776
| 21.852941
| 67
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/optimize.py
|
import os
import time
import inspect
import json
import traceback
from collections import defaultdict
import numpy as np
from . import test_functions as funcs
from . import go_benchmark_functions as gbf
from .common import Benchmark, is_xslow, safe_import
from .lsq_problems import extract_lsq_problems
with safe_import():
import scipy.optimize
from scipy.optimize.optimize import rosen, rosen_der, rosen_hess
from scipy.optimize import (leastsq, basinhopping, differential_evolution,
dual_annealing, shgo, direct)
from scipy.optimize._minimize import MINIMIZE_METHODS
from .cutest.calfun import calfun
from .cutest.dfoxs import dfoxs
class _BenchOptimizers(Benchmark):
"""a framework for benchmarking the optimizer
Parameters
----------
function_name : string
fun : callable
der : callable
function that returns the derivative (jacobian, gradient) of fun
hess : callable
function that returns the hessian of fun
minimizer_kwargs : kwargs
additional keywords passed to the minimizer. e.g. tol, maxiter
"""
def __init__(self, function_name, fun, der=None, hess=None,
**minimizer_kwargs):
self.function_name = function_name
self.fun = fun
self.der = der
self.hess = hess
self.minimizer_kwargs = minimizer_kwargs
if "tol" not in minimizer_kwargs:
minimizer_kwargs["tol"] = 1e-4
self.results = []
@classmethod
def from_funcobj(cls, function_name, function, **minimizer_kwargs):
self = cls.__new__(cls)
self.function_name = function_name
self.function = function
self.fun = function.fun
if hasattr(function, 'der'):
self.der = function.der
self.bounds = function.bounds
self.minimizer_kwargs = minimizer_kwargs
self.results = []
return self
def reset(self):
self.results = []
def energy_gradient(self, x):
return self.fun(x), self.function.der(x)
def add_result(self, result, t, name):
"""add a result to the list"""
result.time = t
result.name = name
if not hasattr(result, "njev"):
result.njev = 0
if not hasattr(result, "nhev"):
result.nhev = 0
self.results.append(result)
def print_results(self):
"""print the current list of results"""
results = self.average_results()
results = sorted(results, key=lambda x: (x.nfail, x.mean_time))
if not results:
return
print("")
print("=========================================================")
print("Optimizer benchmark: %s" % (self.function_name))
print("dimensions: %d, extra kwargs: %s" % (results[0].ndim, str(self.minimizer_kwargs)))
print("averaged over %d starting configurations" % (results[0].ntrials))
print(" Optimizer nfail nfev njev nhev time")
print("---------------------------------------------------------")
for res in results:
print("%11s | %4d | %4d | %4d | %4d | %.6g" %
(res.name, res.nfail, res.mean_nfev, res.mean_njev, res.mean_nhev, res.mean_time))
def average_results(self):
"""group the results by minimizer and average over the runs"""
grouped_results = defaultdict(list)
for res in self.results:
grouped_results[res.name].append(res)
averaged_results = dict()
for name, result_list in grouped_results.items():
newres = scipy.optimize.OptimizeResult()
newres.name = name
newres.mean_nfev = np.mean([r.nfev for r in result_list])
newres.mean_njev = np.mean([r.njev for r in result_list])
newres.mean_nhev = np.mean([r.nhev for r in result_list])
newres.mean_time = np.mean([r.time for r in result_list])
funs = [r.fun for r in result_list]
newres.max_obj = np.max(funs)
newres.min_obj = np.min(funs)
newres.mean_obj = np.mean(funs)
newres.ntrials = len(result_list)
newres.nfail = len([r for r in result_list if not r.success])
newres.nsuccess = len([r for r in result_list if r.success])
try:
newres.ndim = len(result_list[0].x)
except TypeError:
newres.ndim = 1
averaged_results[name] = newres
return averaged_results
# for basinhopping
def accept_test(self, x_new=None, *args, **kwargs):
"""
Does the new candidate vector lie in between the bounds?
Returns
-------
accept_test : bool
The candidate vector lies in between the bounds
"""
if not hasattr(self.function, "xmin"):
return True
if np.any(x_new < self.function.xmin):
return False
if np.any(x_new > self.function.xmax):
return False
return True
def run_basinhopping(self):
"""
Do an optimization run for basinhopping
"""
kwargs = self.minimizer_kwargs
if hasattr(self.fun, "temperature"):
kwargs["T"] = self.function.temperature
if hasattr(self.fun, "stepsize"):
kwargs["stepsize"] = self.function.stepsize
minimizer_kwargs = {"method": "L-BFGS-B"}
x0 = self.function.initial_vector()
# basinhopping - no gradient
minimizer_kwargs['jac'] = False
self.function.nfev = 0
t0 = time.time()
res = basinhopping(
self.fun, x0, accept_test=self.accept_test,
minimizer_kwargs=minimizer_kwargs,
**kwargs)
t1 = time.time()
res.success = self.function.success(res.x)
res.nfev = self.function.nfev
self.add_result(res, t1 - t0, 'basinh.')
def run_direct(self):
"""
Do an optimization run for direct
"""
self.function.nfev = 0
t0 = time.time()
res = direct(self.fun,
self.bounds)
t1 = time.time()
res.success = self.function.success(res.x)
res.nfev = self.function.nfev
self.add_result(res, t1 - t0, 'DIRECT')
def run_shgo(self):
"""
Do an optimization run for shgo
"""
self.function.nfev = 0
t0 = time.time()
res = shgo(self.fun,
self.bounds)
t1 = time.time()
res.success = self.function.success(res.x)
res.nfev = self.function.nfev
self.add_result(res, t1 - t0, 'SHGO')
def run_differentialevolution(self):
"""
Do an optimization run for differential_evolution
"""
self.function.nfev = 0
t0 = time.time()
res = differential_evolution(self.fun,
self.bounds,
popsize=20)
t1 = time.time()
res.success = self.function.success(res.x)
res.nfev = self.function.nfev
self.add_result(res, t1 - t0, 'DE')
def run_dualannealing(self):
"""
Do an optimization run for dual_annealing
"""
self.function.nfev = 0
t0 = time.time()
res = dual_annealing(self.fun,
self.bounds)
t1 = time.time()
res.success = self.function.success(res.x)
res.nfev = self.function.nfev
self.add_result(res, t1 - t0, 'DA')
def bench_run_global(self, numtrials=50, methods=None):
"""
Run the optimization tests for the required minimizers.
"""
if methods is None:
methods = ['DE', 'basinh.', 'DA', 'DIRECT', 'SHGO']
stochastic_methods = ['DE', 'basinh.', 'DA']
method_fun = {'DE': self.run_differentialevolution,
'basinh.': self.run_basinhopping,
'DA': self.run_dualannealing,
'DIRECT': self.run_direct,
'SHGO': self.run_shgo, }
for m in methods:
if m in stochastic_methods:
for i in range(numtrials):
method_fun[m]()
else:
method_fun[m]()
def bench_run(self, x0, methods=None, **minimizer_kwargs):
"""do an optimization test starting at x0 for all the optimizers"""
kwargs = self.minimizer_kwargs
if methods is None:
methods = MINIMIZE_METHODS
# L-BFGS-B, BFGS, trust-constr, SLSQP can use gradients, but examine
# performance when numerical differentiation is used.
fonly_methods = ["COBYLA", 'Powell', 'nelder-mead', 'L-BFGS-B', 'BFGS',
'trust-constr', 'SLSQP']
for method in fonly_methods:
if method not in methods:
continue
t0 = time.time()
res = scipy.optimize.minimize(self.fun, x0, method=method,
**kwargs)
t1 = time.time()
self.add_result(res, t1-t0, method)
gradient_methods = ['L-BFGS-B', 'BFGS', 'CG', 'TNC', 'SLSQP',
'trust-constr']
if self.der is not None:
for method in gradient_methods:
if method not in methods:
continue
t0 = time.time()
res = scipy.optimize.minimize(self.fun, x0, method=method,
jac=self.der, **kwargs)
t1 = time.time()
self.add_result(res, t1-t0, method)
hessian_methods = ["Newton-CG", 'dogleg', 'trust-ncg',
'trust-exact', 'trust-krylov', 'trust-constr']
if self.hess is not None:
for method in hessian_methods:
if method not in methods:
continue
t0 = time.time()
res = scipy.optimize.minimize(self.fun, x0, method=method,
jac=self.der, hess=self.hess,
**kwargs)
t1 = time.time()
self.add_result(res, t1-t0, method)
class BenchSmoothUnbounded(Benchmark):
"""Benchmark the optimizers with smooth, unbounded, functions"""
params = [
['rosenbrock_slow', 'rosenbrock_nograd', 'rosenbrock', 'rosenbrock_tight',
'simple_quadratic', 'asymmetric_quadratic',
'sin_1d', 'booth', 'beale', 'LJ'],
["COBYLA", 'Powell', 'nelder-mead',
'L-BFGS-B', 'BFGS', 'CG', 'TNC', 'SLSQP',
"Newton-CG", 'dogleg', 'trust-ncg', 'trust-exact',
'trust-krylov', 'trust-constr'],
["mean_nfev", "mean_time"]
]
param_names = ["test function", "solver", "result type"]
def setup(self, func_name, method_name, ret_val):
b = getattr(self, 'run_' + func_name)(methods=[method_name])
r = b.average_results().get(method_name)
if r is None:
raise NotImplementedError()
self.result = getattr(r, ret_val)
def track_all(self, func_name, method_name, ret_val):
return self.result
# SlowRosen has a 50us delay on each function evaluation. By comparing to
# rosenbrock_nograd it should be possible to figure out how much time a
# minimizer uses internally, compared to the time required for function
# evaluation.
def run_rosenbrock_slow(self, methods=None):
s = funcs.SlowRosen()
b = _BenchOptimizers("Rosenbrock function",
fun=s.fun)
for i in range(10):
b.bench_run(np.random.uniform(-3, 3, 3), methods=methods)
return b
# see what the performance of the solvers are if numerical differentiation
# has to be used.
def run_rosenbrock_nograd(self, methods=None):
b = _BenchOptimizers("Rosenbrock function",
fun=rosen)
for i in range(10):
b.bench_run(np.random.uniform(-3, 3, 3), methods=methods)
return b
def run_rosenbrock(self, methods=None):
b = _BenchOptimizers("Rosenbrock function",
fun=rosen, der=rosen_der, hess=rosen_hess)
for i in range(10):
b.bench_run(np.random.uniform(-3, 3, 3), methods=methods)
return b
def run_rosenbrock_tight(self, methods=None):
b = _BenchOptimizers("Rosenbrock function",
fun=rosen, der=rosen_der, hess=rosen_hess,
tol=1e-8)
for i in range(10):
b.bench_run(np.random.uniform(-3, 3, 3), methods=methods)
return b
def run_simple_quadratic(self, methods=None):
s = funcs.SimpleQuadratic()
# print "checking gradient", scipy.optimize.check_grad(s.fun, s.der, np.array([1.1, -2.3]))
b = _BenchOptimizers("simple quadratic function",
fun=s.fun, der=s.der, hess=s.hess)
for i in range(10):
b.bench_run(np.random.uniform(-2, 2, 3), methods=methods)
return b
def run_asymmetric_quadratic(self, methods=None):
s = funcs.AsymmetricQuadratic()
# print "checking gradient", scipy.optimize.check_grad(s.fun, s.der, np.array([1.1, -2.3]))
b = _BenchOptimizers("function sum(x**2) + x[0]",
fun=s.fun, der=s.der, hess=s.hess)
for i in range(10):
b.bench_run(np.random.uniform(-2, 2, 3), methods=methods)
return b
def run_sin_1d(self, methods=None):
fun = lambda x: np.sin(x[0])
der = lambda x: np.array([np.cos(x[0])])
b = _BenchOptimizers("1d sin function",
fun=fun, der=der, hess=None)
for i in range(10):
b.bench_run(np.random.uniform(-2, 2, 1), methods=methods)
return b
def run_booth(self, methods=None):
s = funcs.Booth()
# print "checking gradient", scipy.optimize.check_grad(s.fun, s.der, np.array([1.1, -2.3]))
b = _BenchOptimizers("Booth's function",
fun=s.fun, der=s.der, hess=None)
for i in range(10):
b.bench_run(np.random.uniform(0, 10, 2), methods=methods)
return b
def run_beale(self, methods=None):
s = funcs.Beale()
# print "checking gradient", scipy.optimize.check_grad(s.fun, s.der, np.array([1.1, -2.3]))
b = _BenchOptimizers("Beale's function",
fun=s.fun, der=s.der, hess=None)
for i in range(10):
b.bench_run(np.random.uniform(0, 10, 2), methods=methods)
return b
def run_LJ(self, methods=None):
s = funcs.LJ()
# print "checking gradient", scipy.optimize.check_grad(s.get_energy, s.get_gradient,
# np.random.uniform(-2,2,3*4))
natoms = 4
b = _BenchOptimizers("%d atom Lennard Jones potential" % (natoms),
fun=s.fun, der=s.der, hess=None)
for i in range(10):
b.bench_run(np.random.uniform(-2, 2, natoms*3), methods=methods)
return b
class BenchLeastSquares(Benchmark):
"""Class for benchmarking nonlinear least squares solvers."""
problems = extract_lsq_problems()
params = [
list(problems.keys()),
["average time", "nfev", "success"]
]
param_names = [
"problem", "result type"
]
def track_all(self, problem_name, result_type):
problem = self.problems[problem_name]
if problem.lb is not None or problem.ub is not None:
raise NotImplementedError
ftol = 1e-5
if result_type == 'average time':
n_runs = 10
t0 = time.time()
for _ in range(n_runs):
leastsq(problem.fun, problem.x0, Dfun=problem.jac, ftol=ftol,
full_output=True)
return (time.time() - t0) / n_runs
x, cov_x, info, message, ier = leastsq(
problem.fun, problem.x0, Dfun=problem.jac,
ftol=ftol, full_output=True
)
if result_type == 'nfev':
return info['nfev']
elif result_type == 'success':
return int(problem.check_answer(x, ftol))
else:
raise NotImplementedError
# `export SCIPY_XSLOW=1` to enable BenchGlobal.track_all
# `export SCIPY_GLOBAL_BENCH=AMGM,Adjiman,...` to run specific tests
# `export SCIPY_GLOBAL_BENCH_NUMTRIALS=10` to specify n_iterations, default 100
#
# Note that it can take several hours to run; intermediate output
# can be found under benchmarks/global-bench-results.json
class BenchGlobal(Benchmark):
"""
Benchmark the global optimizers using the go_benchmark_functions
suite
"""
timeout = 300
_functions = dict([
item for item in inspect.getmembers(gbf, inspect.isclass)
if (issubclass(item[1], gbf.Benchmark) and
item[0] not in ('Benchmark') and
not item[0].startswith('Problem'))
])
if not is_xslow():
_enabled_functions = []
elif 'SCIPY_GLOBAL_BENCH' in os.environ:
_enabled_functions = [x.strip() for x in
os.environ['SCIPY_GLOBAL_BENCH'].split(',')]
else:
_enabled_functions = list(_functions.keys())
params = [
list(_functions.keys()),
["success%", "<nfev>"],
['DE', 'basinh.', 'DA', 'DIRECT', 'SHGO'],
]
param_names = ["test function", "result type", "solver"]
def __init__(self):
self.enabled = is_xslow()
try:
self.numtrials = int(os.environ['SCIPY_GLOBAL_BENCH_NUMTRIALS'])
except (KeyError, ValueError):
self.numtrials = 100
self.dump_fn = os.path.join(os.path.dirname(__file__), '..', 'global-bench-results.json')
self.results = {}
def setup(self, name, ret_value, solver):
if name not in self._enabled_functions:
raise NotImplementedError("skipped")
# load json backing file
with open(self.dump_fn, 'r') as f:
self.results = json.load(f)
def teardown(self, name, ret_value, solver):
if not self.enabled:
return
with open(self.dump_fn, 'w') as f:
json.dump(self.results, f, indent=2, sort_keys=True)
def track_all(self, name, ret_value, solver):
if name in self.results and solver in self.results[name]:
# have we done the function, and done the solver?
# if so, then just return the ret_value
av_results = self.results[name]
if ret_value == 'success%':
return 100 * av_results[solver]['nsuccess'] / av_results[solver]['ntrials']
elif ret_value == '<nfev>':
return av_results[solver]['mean_nfev']
else:
raise ValueError()
klass = self._functions[name]
f = klass()
try:
b = _BenchOptimizers.from_funcobj(name, f)
with np.errstate(all='ignore'):
b.bench_run_global(methods=[solver],
numtrials=self.numtrials)
av_results = b.average_results()
if name not in self.results:
self.results[name] = {}
self.results[name][solver] = av_results[solver]
if ret_value == 'success%':
return 100 * av_results[solver]['nsuccess'] / av_results[solver]['ntrials']
elif ret_value == '<nfev>':
return av_results[solver]['mean_nfev']
else:
raise ValueError()
except Exception:
print("".join(traceback.format_exc()))
self.results[name] = "".join(traceback.format_exc())
def setup_cache(self):
if not self.enabled:
return
# create the logfile to start with
with open(self.dump_fn, 'w') as f:
json.dump({}, f, indent=2)
class BenchDFO(Benchmark):
"""
Benchmark the optimizers with the CUTEST DFO benchmark of Moré and Wild.
The original benchmark suite is available at
https://github.com/POptUS/BenDFO
"""
params = [
list(range(53)), # adjust which problems to solve
["COBYLA", "SLSQP", "Powell", "nelder-mead", "L-BFGS-B", "BFGS",
"trust-constr"], # note: methods must also be listed in bench_run
["mean_nfev", "min_obj"], # defined in average_results
]
param_names = ["DFO benchmark problem number", "solver", "result type"]
def setup(self, prob_number, method_name, ret_val):
probs = np.loadtxt(os.path.join(os.path.dirname(__file__),
"cutest", "dfo.txt"))
params = probs[prob_number]
nprob = int(params[0])
n = int(params[1])
m = int(params[2])
s = params[3]
factor = 10 ** s
def func(x):
return calfun(x, m, nprob)
x0 = dfoxs(n, nprob, factor)
b = getattr(self, "run_cutest")(
func, x0, prob_number=prob_number, methods=[method_name]
)
r = b.average_results().get(method_name)
if r is None:
raise NotImplementedError()
self.result = getattr(r, ret_val)
def track_all(self, prob_number, method_name, ret_val):
return self.result
def run_cutest(self, func, x0, prob_number, methods=None):
if methods is None:
methods = MINIMIZE_METHODS
b = _BenchOptimizers(f"DFO benchmark problem {prob_number}", fun=func)
b.bench_run(x0, methods=methods)
return b
| 21,806
| 34.172581
| 102
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/io_matlab.py
|
from .common import set_mem_rlimit, run_monitored, get_mem_info
import os
import tempfile
from io import BytesIO
import numpy as np
from .common import Benchmark, safe_import
with safe_import():
from scipy.io import savemat, loadmat
class MemUsage(Benchmark):
param_names = ['size', 'compressed']
timeout = 4*60
unit = "actual/optimal memory usage ratio"
@property
def params(self):
return [list(self._get_sizes().keys()), [True, False]]
def _get_sizes(self):
sizes = {
'1M': 1e6,
'10M': 10e6,
'100M': 100e6,
'300M': 300e6,
# '500M': 500e6,
# '1000M': 1000e6,
}
return sizes
def setup(self, size, compressed):
set_mem_rlimit()
self.sizes = self._get_sizes()
size = int(self.sizes[size])
mem_info = get_mem_info()
try:
mem_available = mem_info['memavailable']
except KeyError:
mem_available = mem_info['memtotal']
max_size = int(mem_available * 0.7)//4
if size > max_size:
raise NotImplementedError()
# Setup temp file
f = tempfile.NamedTemporaryFile(delete=False, suffix='.mat')
f.close()
self.filename = f.name
def teardown(self, size, compressed):
os.unlink(self.filename)
def track_loadmat(self, size, compressed):
size = int(self.sizes[size])
x = np.random.rand(size//8).view(dtype=np.uint8)
savemat(self.filename, dict(x=x), do_compression=compressed, oned_as='row')
del x
code = """
from scipy.io import loadmat
loadmat('%s')
""" % (self.filename,)
time, peak_mem = run_monitored(code)
return peak_mem / size
def track_savemat(self, size, compressed):
size = int(self.sizes[size])
code = """
import numpy as np
from scipy.io import savemat
x = np.random.rand(%d//8).view(dtype=np.uint8)
savemat('%s', dict(x=x), do_compression=%r, oned_as='row')
""" % (size, self.filename, compressed)
time, peak_mem = run_monitored(code)
return peak_mem / size
class StructArr(Benchmark):
params = [
[(10, 10, 20), (20, 20, 40), (30, 30, 50)],
[False, True]
]
param_names = ['(vars, fields, structs)', 'compression']
@staticmethod
def make_structarr(n_vars, n_fields, n_structs):
var_dict = {}
for vno in range(n_vars):
vname = 'var%00d' % vno
end_dtype = [('f%d' % d, 'i4', 10) for d in range(n_fields)]
s_arrs = np.zeros((n_structs,), dtype=end_dtype)
var_dict[vname] = s_arrs
return var_dict
def setup(self, nvfs, compression):
n_vars, n_fields, n_structs = nvfs
self.var_dict = StructArr.make_structarr(n_vars, n_fields, n_structs)
self.str_io = BytesIO()
savemat(self.str_io, self.var_dict, do_compression=compression)
def time_savemat(self, nvfs, compression):
savemat(self.str_io, self.var_dict, do_compression=compression)
def time_loadmat(self, nvfs, compression):
loadmat(self.str_io)
| 3,221
| 26.775862
| 83
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/common.py
|
"""
Airspeed Velocity benchmark utilities
"""
import sys
import os
import re
import time
import textwrap
import subprocess
import itertools
import random
class Benchmark:
"""
Base class with sensible options
"""
pass
def is_xslow():
try:
return int(os.environ.get('SCIPY_XSLOW', '0'))
except ValueError:
return False
class LimitedParamBenchmark(Benchmark):
"""
Limits parameter combinations to `max_number` choices, chosen
pseudo-randomly with fixed seed.
Raises NotImplementedError (skip) if not in active set.
"""
num_param_combinations = 0
def setup(self, *args, **kwargs):
slow = is_xslow()
if slow:
# no need to skip
return
param_seed = kwargs.pop('param_seed', None)
if param_seed is None:
param_seed = 1
params = kwargs.pop('params', None)
if params is None:
params = self.params
num_param_combinations = kwargs.pop('num_param_combinations', None)
if num_param_combinations is None:
num_param_combinations = self.num_param_combinations
all_choices = list(itertools.product(*params))
rng = random.Random(param_seed)
rng.shuffle(all_choices)
active_choices = all_choices[:num_param_combinations]
if args not in active_choices:
raise NotImplementedError("skipped")
def get_max_rss_bytes(rusage):
"""
Extract the max RSS value in bytes.
"""
if not rusage:
return None
if sys.platform.startswith('linux'):
# On Linux getrusage() returns ru_maxrss in kilobytes
# https://man7.org/linux/man-pages/man2/getrusage.2.html
return rusage.ru_maxrss * 1024
elif sys.platform == "darwin":
# on macOS ru_maxrss is in bytes
return rusage.ru_maxrss
else:
# Unknown, just return whatever is here.
return rusage.ru_maxrss
def run_monitored_wait4(code):
"""
Run code in a new Python process, and monitor peak memory usage.
Returns
-------
duration : float
Duration in seconds (including Python startup time)
peak_memusage : int
Peak memory usage in bytes of the child Python process
Notes
-----
Works on Unix platforms (Linux, macOS) that have `os.wait4()`.
"""
code = textwrap.dedent(code)
start = time.time()
process = subprocess.Popen([sys.executable, '-c', code])
pid, returncode, rusage = os.wait4(process.pid, 0)
duration = time.time() - start
max_rss_bytes = get_max_rss_bytes(rusage)
if returncode != 0:
raise AssertionError("Running failed:\n%s" % code)
return duration, max_rss_bytes
def run_monitored_proc(code):
"""
Run code in a new Python process, and monitor peak memory usage.
Returns
-------
duration : float
Duration in seconds (including Python startup time)
peak_memusage : float
Peak memory usage (rough estimate only) in bytes
"""
if not sys.platform.startswith('linux'):
raise RuntimeError("Peak memory monitoring only works on Linux")
code = textwrap.dedent(code)
process = subprocess.Popen([sys.executable, '-c', code])
peak_memusage = -1
start = time.time()
while True:
ret = process.poll()
if ret is not None:
break
with open('/proc/%d/status' % process.pid, 'r') as f:
procdata = f.read()
m = re.search(r'VmRSS:\s*(\d+)\s*kB', procdata, re.S | re.I)
if m is not None:
memusage = float(m.group(1)) * 1e3
peak_memusage = max(memusage, peak_memusage)
time.sleep(0.01)
process.wait()
duration = time.time() - start
if process.returncode != 0:
raise AssertionError("Running failed:\n%s" % code)
return duration, peak_memusage
def run_monitored(code):
"""
Run code in a new Python process, and monitor peak memory usage.
Returns
-------
duration : float
Duration in seconds (including Python startup time)
peak_memusage : float or int
Peak memory usage (rough estimate only) in bytes
"""
if hasattr(os, 'wait4'):
return run_monitored_wait4(code)
else:
return run_monitored_proc(code)
def get_mem_info():
"""Get information about available memory"""
import psutil
vm = psutil.virtual_memory()
return {
"memtotal": vm.total,
"memavailable": vm.available,
}
def set_mem_rlimit(max_mem=None):
"""
Set address space rlimit
"""
import resource
if max_mem is None:
mem_info = get_mem_info()
max_mem = int(mem_info['memtotal'] * 0.7)
cur_limit = resource.getrlimit(resource.RLIMIT_AS)
if cur_limit[0] > 0:
max_mem = min(max_mem, cur_limit[0])
try:
resource.setrlimit(resource.RLIMIT_AS, (max_mem, cur_limit[1]))
except ValueError:
# on macOS may raise: current limit exceeds maximum limit
pass
def with_attributes(**attrs):
def decorator(func):
for key, value in attrs.items():
setattr(func, key, value)
return func
return decorator
class safe_import:
def __enter__(self):
self.error = False
return self
def __exit__(self, type_, value, traceback):
if type_ is not None:
self.error = True
suppress = not (
os.getenv('SCIPY_ALLOW_BENCH_IMPORT_ERRORS', '1').lower() in
('0', 'false') or not issubclass(type_, ImportError))
return suppress
| 5,643
| 23.754386
| 76
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/sparse_linalg_svds.py
|
import os
import numpy as np
from .common import Benchmark, safe_import
with safe_import():
from scipy.sparse.linalg import svds
class BenchSVDS(Benchmark):
# Benchmark SVD using the MatrixMarket test matrices recommended by the
# author of PROPACK at http://sun.stanford.edu/~rmunk/PROPACK/
params = [
[25],
["abb313", "illc1033", "illc1850", "qh1484", "rbs480a", "tols4000",
"well1033", "well1850", "west0479", "west2021"],
['arpack', 'lobpcg', 'propack']
]
param_names = ['k', 'problem', 'solver']
def setup(self, k, problem, solver):
dir_path = os.path.dirname(os.path.realpath(__file__))
datafile = os.path.join(dir_path, "svds_benchmark_files",
"svds_benchmark_files.npz")
matrices = np.load(datafile, allow_pickle=True)
self.A = matrices[problem][()]
def time_svds(self, k, problem, solver):
# consider k = int(np.min(self.A.shape) * k)
np.random.seed(0)
svds(self.A, k=k, solver=solver)
| 1,053
| 33
| 75
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/optimize_qap.py
|
import numpy as np
from .common import Benchmark, safe_import
import os
with safe_import():
from scipy.optimize import quadratic_assignment
# XXX this should probably have an is_xslow with selected tests.
# Even with this, it takes ~30 seconds to collect the ones to run
# (even if they will all be skipped in the `setup` function).
class QuadraticAssignment(Benchmark):
methods = ['faq', '2opt']
probs = ["bur26a", "bur26b", "bur26c", "bur26d", "bur26e", "bur26f",
"bur26g", "bur26h", "chr12a", "chr12b", "chr12c", "chr15a",
"chr15b", "chr15c", "chr18a", "chr18b", "chr20a", "chr20b",
"chr20c", "chr22a", "chr22b", "chr25a",
"els19",
"esc16a", "esc16b", "esc16c", "esc16d", "esc16e", "esc16g",
"esc16h", "esc16i", "esc16j", "esc32e", "esc32g", "esc128",
"had12", "had14", "had16", "had18", "had20", "kra30a",
"kra30b", "kra32",
"lipa20a", "lipa20b", "lipa30a", "lipa30b", "lipa40a", "lipa40b",
"lipa50a", "lipa50b", "lipa60a", "lipa60b", "lipa70a", "lipa70b",
"lipa80a", "lipa90a", "lipa90b",
"nug12", "nug14", "nug16a", "nug16b", "nug17", "nug18", "nug20",
"nug21", "nug22", "nug24", "nug25", "nug27", "nug28", "nug30",
"rou12", "rou15", "rou20",
"scr12", "scr15", "scr20",
"sko42", "sko49", "sko56", "sko64", "sko72", "sko81", "sko90",
"sko100a", "sko100b", "sko100c", "sko100d", "sko100e", "sko100f",
"ste36b", "ste36c",
"tai12a", "tai12b", "tai15a", "tai15b", "tai17a", "tai20a",
"tai20b", "tai25a", "tai25b", "tai30a", "tai30b", "tai35a",
"tai40a", "tai40b", "tai50a", "tai50b", "tai60a", "tai60b",
"tai64c", "tai80a", "tai100a", "tai100b", "tai150b", "tai256c",
"tho30", "tho40", "tho150", "wil50", "wil100"]
params = [methods, probs]
param_names = ['Method', 'QAP Problem']
def setup(self, method, qap_prob):
dir_path = os.path.dirname(os.path.realpath(__file__))
datafile = np.load(os.path.join(dir_path, "qapdata/qap_probs.npz"),
allow_pickle=True)
slnfile = np.load(os.path.join(dir_path, "qapdata/qap_sols.npz"),
allow_pickle=True)
self.A = datafile[qap_prob][0]
self.B = datafile[qap_prob][1]
self.opt_solution = slnfile[qap_prob]
self.method = method
def time_evaluation(self, method, qap_prob):
quadratic_assignment(self.A, self.B, self.method)
def track_score(self, method, qap_prob):
res = quadratic_assignment(self.A, self.B, self.method)
score = int(res['fun'])
percent_diff = (score - self.opt_solution) / self.opt_solution
return percent_diff
| 2,852
| 45.016129
| 78
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/sparse_csgraph_dijkstra.py
|
"""benchmarks for the scipy.sparse.csgraph module"""
import numpy as np
import scipy.sparse
from .common import Benchmark, safe_import
with safe_import():
from scipy.sparse.csgraph import dijkstra
class Dijkstra(Benchmark):
params = [
[30, 300, 900],
[True, False],
['random', 'star']
]
param_names = ['n', 'min_only', 'format']
def setup(self, n, min_only, format):
rng = np.random.default_rng(1234)
if format == 'random':
# make a random connectivity matrix
data = scipy.sparse.rand(n, n, density=0.2, format='csc',
random_state=42, dtype=np.bool_)
data.setdiag(np.zeros(n, dtype=np.bool_))
self.data = data
elif format == 'star':
rows = [0 for i in range(n - 1)] + [i + 1 for i in range(n - 1)]
cols = [i + 1 for i in range(n - 1)] + [0 for i in range(n - 1)]
weights = [i + 1 for i in range(n - 1)] * 2
self.data = scipy.sparse.csr_matrix((weights, (rows, cols)),
shape=(n, n))
# choose some random vertices
v = np.arange(n)
rng.shuffle(v)
self.indices = v[:int(n*.1)]
def time_dijkstra_multi(self, n, min_only, format):
dijkstra(self.data,
directed=False,
indices=self.indices,
min_only=min_only)
| 1,452
| 32.790698
| 76
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/__init__.py
|
import numpy as np
import random
np.random.seed(1234)
random.seed(1234)
| 73
| 11.333333
| 20
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/spatial.py
|
import numpy as np
from .common import Benchmark, LimitedParamBenchmark, safe_import
with safe_import():
from scipy.spatial import cKDTree, KDTree
with safe_import():
from scipy.spatial import distance
with safe_import():
from scipy.spatial import ConvexHull, Voronoi
with safe_import():
from scipy.spatial import SphericalVoronoi
with safe_import():
from scipy.spatial import geometric_slerp
with safe_import():
from scipy.spatial.transform import Rotation
class Build(Benchmark):
params = [
[(3,10000,1000), (8,10000,1000), (16,10000,1000)],
['KDTree', 'cKDTree'],
]
param_names = ['(m, n, r)', 'class']
def setup(self, mnr, cls_name):
self.cls = KDTree if cls_name == 'KDTree' else cKDTree
m, n, r = mnr
rng = np.random.default_rng(1234)
self.data = np.concatenate((rng.standard_normal((n//2,m)),
rng.standard_normal((n-n//2,m))+np.ones(m)))
self.queries = np.concatenate((rng.standard_normal((r//2,m)),
rng.standard_normal((r-r//2,m))+np.ones(m)))
def time_build(self, mnr, cls_name):
"""
Constructing kd-tree
=======================
dim | # points | time
"""
m, n, r = mnr
if cls_name == 'cKDTree_flat':
self.T = self.cls(self.data, leafsize=n)
else:
self.cls(self.data)
class PresortedDataSetup(Benchmark):
params = [
[(3, 10 ** 4, 1000), (8, 10 ** 4, 1000), (16, 10 ** 4, 1000)],
[True, False],
['random', 'sorted'],
[0.5]
]
param_names = ['(m, n, r)', 'balanced', 'order', 'radius']
def setup(self, mnr, balanced, order, radius):
m, n, r = mnr
rng = np.random.default_rng(1234)
self.data = {
'random': rng.uniform(size=(n, m)),
'sorted': np.repeat(np.arange(n, 0, -1)[:, np.newaxis],
m,
axis=1) / n
}
self.queries = rng.uniform(size=(r, m))
self.T = cKDTree(self.data.get(order), balanced_tree=balanced)
class BuildUnbalanced(PresortedDataSetup):
params = PresortedDataSetup.params[:-1]
param_names = PresortedDataSetup.param_names[:-1]
def setup(self, *args):
super().setup(*args, None)
def time_build(self, mnr, balanced, order):
cKDTree(self.data.get(order), balanced_tree=balanced)
class QueryUnbalanced(PresortedDataSetup):
params = PresortedDataSetup.params[:-1]
param_names = PresortedDataSetup.param_names[:-1]
def setup(self, *args):
super().setup(*args, None)
def time_query(self, mnr, balanced, order):
self.T.query(self.queries)
class RadiusUnbalanced(PresortedDataSetup):
params = PresortedDataSetup.params[:]
params[0] = [(3, 1000, 30), (8, 1000, 30), (16, 1000, 30)]
def time_query_pairs(self, mnr, balanced, order, radius):
self.T.query_pairs(radius)
def time_query_ball_point(self, mnr, balanced, order, radius):
self.T.query_ball_point(self.queries, radius)
LEAF_SIZES = [8, 128]
BOX_SIZES = [None, 0.0, 1.0]
class Query(LimitedParamBenchmark):
params = [
[(3,10000,1000), (8,10000,1000), (16,10000,1000)],
[1, 2, np.inf],
BOX_SIZES, LEAF_SIZES,
]
param_names = ['(m, n, r)', 'p', 'boxsize', 'leafsize']
num_param_combinations = 21
@staticmethod
def do_setup(self, mnr, p, boxsize, leafsize):
m, n, r = mnr
rng = np.random.default_rng(1234)
self.data = rng.uniform(size=(n, m))
self.queries = rng.uniform(size=(r, m))
self.T = cKDTree(self.data, leafsize=leafsize, boxsize=boxsize)
def setup(self, mnr, p, boxsize, leafsize):
LimitedParamBenchmark.setup(self, mnr, p, boxsize, leafsize)
Query.do_setup(self, mnr, p, boxsize, leafsize)
def time_query(self, mnr, p, boxsize, leafsize):
"""
Querying kd-tree
dim | # points | # queries | KDTree | cKDTree | flat cKDTree
"""
self.T.query(self.queries, p=p)
# Retain old benchmark results (remove this if changing the benchmark)
time_query.version = "327bc0627d5387347e9cdcf4c52a550c813bb80a859eeb0f3e5bfe6650a8a1db"
class Radius(LimitedParamBenchmark):
params = [
[(3,10000,1000)],
[1, 2, np.inf],
[0.2, 0.5],
BOX_SIZES, LEAF_SIZES,
]
param_names = ['(m, n, r)', 'p', 'probe radius', 'boxsize', 'leafsize']
num_param_combinations = 7
def __init__(self):
self.time_query_pairs.__func__.params = list(self.params)
self.time_query_pairs.__func__.params[0] = [(3,1000,30),
(8,1000,30),
(16,1000,30)]
self.time_query_ball_point.__func__.setup = self.setup_query_ball_point
self.time_query_ball_point_nosort.__func__.setup = self.setup_query_ball_point
self.time_query_pairs.__func__.setup = self.setup_query_pairs
def setup(self, *args):
pass
def setup_query_ball_point(self, mnr, p, probe_radius, boxsize, leafsize):
LimitedParamBenchmark.setup(self, mnr, p, probe_radius, boxsize, leafsize,
param_seed=3)
Query.do_setup(self, mnr, p, boxsize, leafsize)
def setup_query_pairs(self, mnr, p, probe_radius, boxsize, leafsize):
# query_pairs is fast enough so we can run all parameter combinations
Query.do_setup(self, mnr, p, boxsize, leafsize)
def time_query_ball_point(self, mnr, p, probe_radius, boxsize, leafsize):
self.T.query_ball_point(self.queries, probe_radius, p=p)
def time_query_ball_point_nosort(self, mnr, p, probe_radius, boxsize, leafsize):
self.T.query_ball_point(self.queries, probe_radius, p=p,
return_sorted=False)
def time_query_pairs(self, mnr, p, probe_radius, boxsize, leafsize):
self.T.query_pairs(probe_radius, p=p)
# Retain old benchmark results (remove this if changing the benchmark)
time_query_ball_point.version = "e0c2074b35db7e5fca01a43b0fba8ab33a15ed73d8573871ea6feb57b3df4168"
time_query_pairs.version = "cf669f7d619e81e4a09b28bb3fceaefbdd316d30faf01524ab33d41661a53f56"
class Neighbors(LimitedParamBenchmark):
params = [
[(3,1000,1000),
(8,1000,1000),
(16,1000,1000)],
[1, 2, np.inf],
[0.2, 0.5],
BOX_SIZES, LEAF_SIZES,
['cKDTree', 'cKDTree_weighted'],
]
param_names = ['(m, n1, n2)', 'p', 'probe radius', 'boxsize', 'leafsize', 'cls']
num_param_combinations = 17
def setup(self, mn1n2, p, probe_radius, boxsize, leafsize, cls):
LimitedParamBenchmark.setup(self, mn1n2, p, probe_radius, boxsize, leafsize, cls)
m, n1, n2 = mn1n2
self.data1 = np.random.uniform(size=(n1, m))
self.data2 = np.random.uniform(size=(n2, m))
self.w1 = np.ones(n1)
self.w2 = np.ones(n2)
self.T1 = cKDTree(self.data1, boxsize=boxsize, leafsize=leafsize)
self.T2 = cKDTree(self.data2, boxsize=boxsize, leafsize=leafsize)
def time_sparse_distance_matrix(self, mn1n2, p, probe_radius, boxsize, leafsize, cls):
self.T1.sparse_distance_matrix(self.T2, probe_radius, p=p)
def time_count_neighbors(self, mn1n2, p, probe_radius, boxsize, leafsize, cls):
"""
Count neighbors kd-tree
dim | # points T1 | # points T2 | p | probe radius | BoxSize | LeafSize | cls
"""
if cls != 'cKDTree_weighted':
self.T1.count_neighbors(self.T2, probe_radius, p=p)
else:
self.T1.count_neighbors(self.T2, probe_radius, weights=(self.w1, self.w2), p=p)
# Retain old benchmark results (remove this if changing the benchmark)
time_sparse_distance_matrix.version = "9aa921dce6da78394ab29d949be27953484613dcf9c9632c01ae3973d4b29596"
time_count_neighbors.version = "830287f1cf51fa6ba21854a60b03b2a6c70b2f2485c3cdcfb19a360e0a7e2ca2"
class CNeighbors(Benchmark):
params = [
[
(2,1000,1000),
(8,1000,1000),
(16,1000,1000)
],
[2, 10, 100, 400, 1000],
]
param_names = ['(m, n1, n2)', 'Nr']
def setup(self, mn1n2, Nr):
m, n1, n2 = mn1n2
data1 = np.random.uniform(size=(n1, m))
data2 = np.random.uniform(size=(n2, m))
self.w1 = np.ones(len(data1))
self.w2 = np.ones(len(data2))
self.T1d = cKDTree(data1, leafsize=1)
self.T2d = cKDTree(data2, leafsize=1)
self.T1s = cKDTree(data1, leafsize=8)
self.T2s = cKDTree(data2, leafsize=8)
self.r = np.linspace(0, 0.5, Nr)
def time_count_neighbors_deep(self, mn1n2, Nr):
"""
Count neighbors for a very deep kd-tree
dim | # points T1 | # points T2 | Nr
"""
self.T1d.count_neighbors(self.T2d, self.r)
def time_count_neighbors_shallow(self, mn1n2, Nr):
"""
Count neighbors for a shallow kd-tree
dim | # points T1 | # points T2 | Nr
"""
self.T1s.count_neighbors(self.T2s, self.r)
def generate_spherical_points(num_points):
# generate uniform points on sphere
# see: https://stackoverflow.com/a/23785326
rng = np.random.default_rng(123)
points = rng.normal(size=(num_points, 3))
points /= np.linalg.norm(points, axis=1)[:, np.newaxis]
return points
class SphericalVor(Benchmark):
params = [10, 100, 1000, 5000, 10000]
param_names = ['num_points']
def setup(self, num_points):
self.points = generate_spherical_points(num_points)
def time_spherical_voronoi_calculation(self, num_points):
"""Perform spherical Voronoi calculation, but not the sorting of
vertices in the Voronoi polygons.
"""
SphericalVoronoi(self.points, radius=1, center=np.zeros(3))
class SphericalVorSort(Benchmark):
params = [10, 100, 1000, 5000, 10000]
param_names = ['num_points']
def setup(self, num_points):
self.points = generate_spherical_points(num_points)
self.sv = SphericalVoronoi(self.points, radius=1,
center=np.zeros(3))
def time_spherical_polygon_vertex_sorting(self, num_points):
"""Time the vertex sorting operation in the Spherical Voronoi
code.
"""
self.sv.sort_vertices_of_regions()
class SphericalVorAreas(Benchmark):
params = [10, 100, 1000, 5000, 10000]
param_names = ['num_points']
def setup(self, num_points):
self.points = generate_spherical_points(num_points)
self.sv = SphericalVoronoi(self.points, radius=1,
center=np.zeros(3))
def time_spherical_polygon_area_calculation(self, num_points):
"""Time the area calculation in the Spherical Voronoi code."""
self.sv.calculate_areas()
class Xdist(Benchmark):
params = ([10, 100, 1000],
['euclidean', 'minkowski', 'cityblock',
'seuclidean', 'sqeuclidean', 'cosine', 'correlation',
'hamming', 'jaccard', 'jensenshannon', 'chebyshev', 'canberra',
'braycurtis', 'mahalanobis', 'yule', 'dice', 'kulczynski1',
'rogerstanimoto', 'russellrao', 'sokalmichener', 'sokalsneath',
'wminkowski', 'minkowski-P3'])
param_names = ['num_points', 'metric']
def setup(self, num_points, metric):
rng = np.random.default_rng(123)
self.points = rng.random((num_points, 3))
self.metric = metric
if metric == 'minkowski-P3':
# p=2 is just the euclidean metric, try another p value as well
self.kwargs = {'p': 3.0}
self.metric = 'minkowski'
elif metric == 'wminkowski':
# use an equal weight vector since weights are required
self.kwargs = {'w': np.ones(3)}
else:
self.kwargs = {}
def time_cdist(self, num_points, metric):
"""Time scipy.spatial.distance.cdist over a range of input data
sizes and metrics.
"""
distance.cdist(self.points, self.points, self.metric, **self.kwargs)
def time_pdist(self, num_points, metric):
"""Time scipy.spatial.distance.pdist over a range of input data
sizes and metrics.
"""
distance.pdist(self.points, self.metric, **self.kwargs)
class XdistWeighted(Benchmark):
params = (
[10, 20, 100],
['euclidean', 'minkowski', 'cityblock', 'sqeuclidean', 'cosine',
'correlation', 'hamming', 'jaccard', 'chebyshev', 'canberra',
'braycurtis', 'yule', 'dice', 'kulczynski1', 'rogerstanimoto',
'russellrao', 'sokalmichener', 'sokalsneath', 'minkowski-P3'])
param_names = ['num_points', 'metric']
def setup(self, num_points, metric):
rng = np.random.default_rng(123)
self.points = rng.random((num_points, 3))
self.metric = metric
if metric == 'minkowski-P3':
# p=2 is just the euclidean metric, try another p value as well
self.kwargs = {'p': 3.0}
self.metric = 'minkowski'
else:
self.kwargs = {}
self.weights = np.ones(3)
def time_cdist(self, num_points, metric):
"""Time scipy.spatial.distance.cdist for weighted distance metrics."""
distance.cdist(self.points, self.points, self.metric, w=self.weights,
**self.kwargs)
def time_pdist(self, num_points, metric):
"""Time scipy.spatial.distance.pdist for weighted distance metrics."""
distance.pdist(self.points, self.metric, w=self.weights, **self.kwargs)
class ConvexHullBench(Benchmark):
params = ([10, 100, 1000, 5000], [True, False])
param_names = ['num_points', 'incremental']
def setup(self, num_points, incremental):
rng = np.random.default_rng(123)
self.points = rng.random((num_points, 3))
def time_convex_hull(self, num_points, incremental):
"""Time scipy.spatial.ConvexHull over a range of input data sizes
and settings.
"""
ConvexHull(self.points, incremental)
class VoronoiBench(Benchmark):
params = ([10, 100, 1000, 5000, 10000], [False, True])
param_names = ['num_points', 'furthest_site']
def setup(self, num_points, furthest_site):
rng = np.random.default_rng(123)
self.points = rng.random((num_points, 3))
def time_voronoi_calculation(self, num_points, furthest_site):
"""Time conventional Voronoi diagram calculation."""
Voronoi(self.points, furthest_site=furthest_site)
class Hausdorff(Benchmark):
params = [10, 100, 1000]
param_names = ['num_points']
def setup(self, num_points):
rng = np.random.default_rng(123)
self.points1 = rng.random((num_points, 3))
self.points2 = rng.random((num_points, 3))
def time_directed_hausdorff(self, num_points):
# time directed_hausdorff code in 3 D
distance.directed_hausdorff(self.points1, self.points2)
class GeometricSlerpBench(Benchmark):
params = [10, 1000, 10000]
param_names = ['num_points']
def setup(self, num_points):
points = generate_spherical_points(50)
# any two points from the random spherical points
# will suffice for the interpolation bounds:
self.start = points[0]
self.end = points[-1]
self.t = np.linspace(0, 1, num_points)
def time_geometric_slerp_3d(self, num_points):
# time geometric_slerp() for 3D interpolation
geometric_slerp(start=self.start,
end=self.end,
t=self.t)
class RotationBench(Benchmark):
params = [1, 10, 1000, 10000]
param_names = ['num_rotations']
def setup(self, num_rotations):
rng = np.random.default_rng(1234)
self.rotations = Rotation.random(num_rotations, random_state=rng)
def time_matrix_conversion(self, num_rotations):
'''Time converting rotation from and to matrices'''
Rotation.from_matrix(self.rotations.as_matrix())
def time_euler_conversion(self, num_rotations):
'''Time converting rotation from and to euler angles'''
Rotation.from_euler("XYZ", self.rotations.as_euler("XYZ"))
def time_rotvec_conversion(self, num_rotations):
'''Time converting rotation from and to rotation vectors'''
Rotation.from_rotvec(self.rotations.as_rotvec())
def time_mrp_conversion(self, num_rotations):
'''Time converting rotation from and to Modified Rodrigues Parameters'''
Rotation.from_mrp(self.rotations.as_mrp())
def time_mul_inv(self, num_rotations):
'''Time multiplication and inverse of rotations'''
self.rotations * self.rotations.inv()
| 16,940
| 34.29375
| 108
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/linalg_logm.py
|
""" Benchmark linalg.logm for various blocksizes.
"""
import numpy as np
from .common import Benchmark, safe_import
with safe_import():
import scipy.linalg
class Logm(Benchmark):
params = [
['float64', 'complex128'],
[64, 256],
['gen', 'her', 'pos']
]
param_names = ['dtype', 'n', 'structure']
def setup(self, dtype, n, structure):
n = int(n)
dtype = np.dtype(dtype)
A = np.random.rand(n, n)
if dtype == np.complex128:
A = A + 1j*np.random.rand(n, n)
if structure == 'pos':
A = A @ A.T.conj()
elif structure == 'her':
A = A + A.T.conj()
self.A = A
def time_logm(self, dtype, n, structure):
scipy.linalg.logm(self.A, disp=False)
| 785
| 20.833333
| 49
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/optimize_linprog.py
|
import os
import numpy as np
from numpy.testing import suppress_warnings
from .common import Benchmark, is_xslow, safe_import
with safe_import():
from scipy.optimize import linprog, OptimizeWarning
with safe_import():
from scipy.optimize.tests.test_linprog import lpgen_2d, magic_square
with safe_import():
from scipy.linalg import toeplitz
methods = [("highs-ipm", {}),
("highs-ds", {})]
problems = ['25FV47', '80BAU3B', 'ADLITTLE', 'AFIRO', 'AGG', 'AGG2', 'AGG3',
'BANDM', 'BEACONFD', 'BLEND', 'BNL1', 'BNL2', 'BORE3D', 'BRANDY',
'CAPRI', 'CYCLE', 'CZPROB', 'D2Q06C', 'D6CUBE', 'DEGEN2', 'DEGEN3',
'DFL001', 'E226', 'ETAMACRO', 'FFFFF800', 'FINNIS', 'FIT1D',
'FIT1P', 'FIT2D', 'FIT2P', 'GANGES', 'GFRD-PNC', 'GREENBEA',
'GREENBEB', 'GROW15', 'GROW22', 'GROW7', 'ISRAEL', 'KB2', 'LOTFI',
'MAROS', 'MAROS-R7', 'MODSZK1', 'PEROLD', 'PILOT', 'PILOT4',
'PILOT87', 'PILOT-JA', 'PILOTNOV', 'PILOT-WE', 'QAP8', 'QAP12',
'QAP15', 'RECIPE', 'SC105', 'SC205', 'SC50A', 'SC50B', 'SCAGR25',
'SCAGR7', 'SCFXM1', 'SCFXM2', 'SCFXM3', 'SCORPION', 'SCRS8',
'SCSD1', 'SCSD6', 'SCSD8', 'SCTAP1', 'SCTAP2', 'SCTAP3', 'SHARE1B',
'SHARE2B', 'SHELL', 'SHIP04L', 'SHIP04S', 'SHIP08L', 'SHIP08S',
'SHIP12L', 'SHIP12S', 'SIERRA', 'STAIR', 'STANDATA', 'STANDMPS',
'STOCFOR1', 'STOCFOR2', 'STOCFOR3', 'TRUSS', 'TUFF', 'VTP-BASE',
'WOOD1P', 'WOODW']
infeasible_problems = ['bgdbg1', 'bgetam', 'bgindy', 'bgprtr', 'box1',
'ceria3d', 'chemcom', 'cplex1', 'cplex2', 'ex72a',
'ex73a', 'forest6', 'galenet', 'gosh', 'gran',
'itest2', 'itest6', 'klein1', 'klein2', 'klein3',
'mondou2', 'pang', 'pilot4i', 'qual', 'reactor',
'refinery', 'vol1', 'woodinfe']
if not is_xslow():
enabled_problems = ['ADLITTLE', 'AFIRO', 'BLEND', 'BEACONFD', 'GROW7',
'LOTFI', 'SC105', 'SCTAP1', 'SHARE2B', 'STOCFOR1']
enabled_infeasible_problems = ['bgdbg1', 'bgprtr', 'box1', 'chemcom',
'cplex2', 'ex72a', 'ex73a', 'forest6',
'galenet', 'itest2', 'itest6', 'klein1',
'refinery', 'woodinfe']
else:
enabled_problems = problems
enabled_infeasible_problems = infeasible_problems
def klee_minty(D):
A_1 = np.array([2**(i + 1) if i > 0 else 1 for i in range(D)])
A1_ = np.zeros(D)
A1_[0] = 1
A_ub = toeplitz(A_1, A1_)
b_ub = np.array([5**(i + 1) for i in range(D)])
c = -np.array([2**(D - i - 1) for i in range(D)])
xf = np.zeros(D)
xf[-1] = 5**D
obj = c @ xf
return c, A_ub, b_ub, xf, obj
class MagicSquare(Benchmark):
solutions = [(3, 1.7305505947214375), (4, 1.5485271031586025),
(5, 1.807494583582637), (6, 1.747266446858304)]
params = [methods, solutions]
param_names = ['method', '(dimensions, objective)']
def setup(self, meth, prob):
if not is_xslow():
if prob[0] > 4:
raise NotImplementedError("skipped")
dims, obj = prob
self.A_eq, self.b_eq, self.c, numbers, _ = magic_square(dims)
self.fun = None
def time_magic_square(self, meth, prob):
method, options = meth
with suppress_warnings() as sup:
sup.filter(OptimizeWarning, "A_eq does not appear")
res = linprog(c=self.c, A_eq=self.A_eq, b_eq=self.b_eq,
bounds=(0, 1), method=method, options=options)
self.fun = res.fun
def track_magic_square(self, meth, prob):
dims, obj = prob
if self.fun is None:
self.time_magic_square(meth, prob)
self.abs_error = np.abs(self.fun - obj)
self.rel_error = np.abs((self.fun - obj)/obj)
return min(self.abs_error, self.rel_error)
class KleeMinty(Benchmark):
params = [
methods,
[3, 6, 9]
]
param_names = ['method', 'dimensions']
def setup(self, meth, dims):
self.c, self.A_ub, self.b_ub, self.xf, self.obj = klee_minty(dims)
self.fun = None
def time_klee_minty(self, meth, dims):
method, options = meth
res = linprog(c=self.c, A_ub=self.A_ub, b_ub=self.b_ub,
method=method, options=options)
self.fun = res.fun
self.x = res.x
def track_klee_minty(self, meth, prob):
if self.fun is None:
self.time_klee_minty(meth, prob)
self.abs_error = np.abs(self.fun - self.obj)
self.rel_error = np.abs((self.fun - self.obj)/self.obj)
return min(self.abs_error, self.rel_error)
class LpGen(Benchmark):
params = [
methods,
range(20, 100, 20),
range(20, 100, 20)
]
param_names = ['method', 'm', 'n']
def setup(self, meth, m, n):
self.A, self.b, self.c = lpgen_2d(m, n)
def time_lpgen(self, meth, m, n):
method, options = meth
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "scipy.linalg.solve\nIll-conditioned")
linprog(c=self.c, A_ub=self.A, b_ub=self.b,
method=method, options=options)
class Netlib(Benchmark):
params = [
methods,
problems
]
param_names = ['method', 'problems']
def setup(self, meth, prob):
if prob not in enabled_problems:
raise NotImplementedError("skipped")
dir_path = os.path.dirname(os.path.realpath(__file__))
datafile = os.path.join(dir_path, "linprog_benchmark_files",
prob + ".npz")
data = np.load(datafile, allow_pickle=True)
self.c = data["c"]
self.A_eq = data["A_eq"]
self.A_ub = data["A_ub"]
self.b_ub = data["b_ub"]
self.b_eq = data["b_eq"]
self.bounds = np.squeeze(data["bounds"])
self.obj = float(data["obj"].flatten()[0])
self.fun = None
def time_netlib(self, meth, prob):
method, options = meth
res = linprog(c=self.c,
A_ub=self.A_ub,
b_ub=self.b_ub,
A_eq=self.A_eq,
b_eq=self.b_eq,
bounds=self.bounds,
method=method,
options=options)
self.fun = res.fun
def track_netlib(self, meth, prob):
if self.fun is None:
self.time_netlib(meth, prob)
self.abs_error = np.abs(self.fun - self.obj)
self.rel_error = np.abs((self.fun - self.obj)/self.obj)
return min(self.abs_error, self.rel_error)
class Netlib_infeasible(Benchmark):
params = [
methods,
infeasible_problems
]
param_names = ['method', 'problems']
def setup(self, meth, prob):
if prob not in enabled_infeasible_problems:
raise NotImplementedError("skipped")
dir_path = os.path.dirname(os.path.realpath(__file__))
datafile = os.path.join(dir_path, "linprog_benchmark_files",
"infeasible", prob + ".npz")
data = np.load(datafile, allow_pickle=True)
self.c = data["c"]
self.A_eq = data["A_eq"]
self.A_ub = data["A_ub"]
self.b_ub = data["b_ub"]
self.b_eq = data["b_eq"]
self.bounds = np.squeeze(data["bounds"])
self.status = None
def time_netlib_infeasible(self, meth, prob):
method, options = meth
res = linprog(c=self.c,
A_ub=self.A_ub,
b_ub=self.b_ub,
A_eq=self.A_eq,
b_eq=self.b_eq,
bounds=self.bounds,
method=method,
options=options)
self.status = res.status
def track_netlib_infeasible(self, meth, prob):
if self.status is None:
self.time_netlib_infeasible(meth, prob)
return self.status
| 8,116
| 34.291304
| 79
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/fftpack_pseudo_diffs.py
|
""" Benchmark functions for fftpack.pseudo_diffs module
"""
from numpy import arange, sin, cos, pi, exp, tanh, sign
from .common import Benchmark, safe_import
with safe_import():
from scipy.fftpack import diff, fft, ifft, tilbert, hilbert, shift, fftfreq
def direct_diff(x, k=1, period=None):
fx = fft(x)
n = len(fx)
if period is None:
period = 2*pi
w = fftfreq(n)*2j*pi/period*n
if k < 0:
w = 1 / w**k
w[0] = 0.0
else:
w = w**k
if n > 2000:
w[250:n-250] = 0.0
return ifft(w*fx).real
def direct_tilbert(x, h=1, period=None):
fx = fft(x)
n = len(fx)
if period is None:
period = 2*pi
w = fftfreq(n)*h*2*pi/period*n
w[0] = 1
w = 1j/tanh(w)
w[0] = 0j
return ifft(w*fx)
def direct_hilbert(x):
fx = fft(x)
n = len(fx)
w = fftfreq(n)*n
w = 1j*sign(w)
return ifft(w*fx)
def direct_shift(x, a, period=None):
n = len(x)
if period is None:
k = fftfreq(n)*1j*n
else:
k = fftfreq(n)*2j*pi/period*n
return ifft(fft(x)*exp(k*a)).real
class Bench(Benchmark):
params = [
[100, 256, 512, 1000, 1024, 2048, 2048*2, 2048*4],
['fft', 'direct'],
]
param_names = ['size', 'type']
def setup(self, size, type):
size = int(size)
x = arange(size)*2*pi/size
a = 1
self.a = a
if size < 2000:
self.f = sin(x)*cos(4*x)+exp(sin(3*x))
self.sf = sin(x+a)*cos(4*(x+a))+exp(sin(3*(x+a)))
else:
self.f = sin(x)*cos(4*x)
self.sf = sin(x+a)*cos(4*(x+a))
def time_diff(self, size, soltype):
if soltype == 'fft':
diff(self.f, 3)
else:
direct_diff(self.f, 3)
def time_tilbert(self, size, soltype):
if soltype == 'fft':
tilbert(self.f, 1)
else:
direct_tilbert(self.f, 1)
def time_hilbert(self, size, soltype):
if soltype == 'fft':
hilbert(self.f)
else:
direct_hilbert(self.f)
def time_shift(self, size, soltype):
if soltype == 'fft':
shift(self.f, self.a)
else:
direct_shift(self.f, self.a)
| 2,237
| 21.836735
| 79
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/optimize_zeros.py
|
from math import sqrt, exp, cos, sin
import numpy as np
from .common import Benchmark, safe_import
# Import testing parameters
with safe_import():
from scipy.optimize._tstutils import methods, mstrings, functions, fstrings
from scipy.optimize import newton # newton predates benchmarks
class Zeros(Benchmark):
params = [
fstrings,
mstrings
]
param_names = ['test function', 'solver']
def setup(self, func, meth):
self.a = .5
self.b = sqrt(3)
self.func = functions[fstrings.index(func)]
self.meth = methods[mstrings.index(meth)]
def time_zeros(self, func, meth):
self.meth(self.func, self.a, self.b)
class Newton(Benchmark):
params = [
['f1', 'f2'],
['newton', 'secant', 'halley']
]
param_names = ['test function', 'solver']
def setup(self, func, meth):
self.x0 = 3
self.f_1 = None
self.f_2 = None
if func == 'f1':
self.f = lambda x: x ** 2 - 2 * x - 1
if meth in ('newton', 'halley'):
self.f_1 = lambda x: 2 * x - 2
if meth == 'halley':
self.f_2 = lambda x: 2.0 + 0 * x
else:
self.f = lambda x: exp(x) - cos(x)
if meth in ('newton', 'halley'):
self.f_1 = lambda x: exp(x) + sin(x)
if meth == 'halley':
self.f_2 = lambda x: exp(x) + cos(x)
def time_newton(self, func, meth):
newton(self.f, self.x0, args=(), fprime=self.f_1, fprime2=self.f_2)
class NewtonArray(Benchmark):
params = [['loop', 'array'], ['newton', 'secant', 'halley']]
param_names = ['vectorization', 'solver']
def setup(self, vec, meth):
if vec == 'loop':
if meth == 'newton':
self.fvec = lambda f, x0, args, fprime, fprime2: [
newton(f, x, args=(a0, a1) + args[2:], fprime=fprime)
for (x, a0, a1) in zip(x0, args[0], args[1])
]
elif meth == 'halley':
self.fvec = lambda f, x0, args, fprime, fprime2: [
newton(
f, x, args=(a0, a1) + args[2:], fprime=fprime,
fprime2=fprime2
) for (x, a0, a1) in zip(x0, args[0], args[1])
]
else:
self.fvec = lambda f, x0, args, fprime, fprime2: [
newton(f, x, args=(a0, a1) + args[2:]) for (x, a0, a1)
in zip(x0, args[0], args[1])
]
else:
if meth == 'newton':
self.fvec = lambda f, x0, args, fprime, fprime2: newton(
f, x0, args=args, fprime=fprime
)
elif meth == 'halley':
self.fvec = newton
else:
self.fvec = lambda f, x0, args, fprime, fprime2: newton(
f, x0, args=args
)
def time_array_newton(self, vec, meth):
def f(x, *a):
b = a[0] + x * a[3]
return a[1] - a[2] * (np.exp(b / a[5]) - 1.0) - b / a[4] - x
def f_1(x, *a):
b = a[3] / a[5]
return -a[2] * np.exp(a[0] / a[5] + x * b) * b - a[3] / a[4] - 1
def f_2(x, *a):
b = a[3] / a[5]
return -a[2] * np.exp(a[0] / a[5] + x * b) * b ** 2
a0 = np.array([
5.32725221, 5.48673747, 5.49539973,
5.36387202, 4.80237316, 1.43764452,
5.23063958, 5.46094772, 5.50512718,
5.42046290
])
a1 = (np.sin(range(10)) + 1.0) * 7.0
args = (a0, a1, 1e-09, 0.004, 10, 0.27456)
x0 = [7.0] * 10
self.fvec(f, x0, args=args, fprime=f_1, fprime2=f_2)
| 3,776
| 31.282051
| 79
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/sparse_csgraph_matching.py
|
import numpy as np
import scipy.sparse
from scipy.spatial.distance import cdist
from .common import Benchmark, safe_import
with safe_import():
from scipy.sparse.csgraph import maximum_bipartite_matching,\
min_weight_full_bipartite_matching
class MaximumBipartiteMatching(Benchmark):
params = [[5000, 7500, 10000], [0.0001, 0.0005, 0.001]]
param_names = ['n', 'density']
def setup(self, n, density):
# Create random sparse matrices. Note that we could use
# scipy.sparse.rand for this purpose, but simply using np.random and
# disregarding duplicates is quite a bit faster.
rng = np.random.default_rng(42)
d = rng.integers(0, n, size=(int(n*n*density), 2))
graph = scipy.sparse.csr_matrix((np.ones(len(d)), (d[:, 0], d[:, 1])),
shape=(n, n))
self.graph = graph
def time_maximum_bipartite_matching(self, n, density):
maximum_bipartite_matching(self.graph)
# For benchmarking min_weight_full_bipartite_matching, we rely on some of
# the classes defined in Burkard, Dell'Amico, Martello -- Assignment Problems,
# 2009, Section 4.10.1.
def random_uniform(shape, rng):
return scipy.sparse.csr_matrix(rng.uniform(1, 100, shape))
def random_uniform_sparse(shape, rng):
return scipy.sparse.random(shape[0], shape[1], density=0.1, format='csr', random_state=rng)
def random_uniform_integer(shape, rng):
return scipy.sparse.csr_matrix(rng.integers(1, 1000, shape))
def random_geometric(shape, rng):
P = rng.integers(1, 1000, size=(shape[0], 2))
Q = rng.integers(1, 1000, size=(shape[1], 2))
return scipy.sparse.csr_matrix(cdist(P, Q, 'sqeuclidean'))
def random_two_cost(shape, rng):
return scipy.sparse.csr_matrix(rng.choice((1, 1000000), shape))
def machol_wien(shape, rng):
# Machol--Wien instances being harder than the other examples, we cut
# down the size of the instance by 5.
return scipy.sparse.csr_matrix(
np.outer(np.arange(shape[0]//5) + 1, np.arange(shape[1]//5) + 1))
class MinWeightFullBipartiteMatching(Benchmark):
sizes = range(100, 401, 100)
param_names = ['shapes', 'input_type']
params = [
[(i, i) for i in sizes] + [(i, 2 * i) for i in sizes],
['random_uniform', 'random_uniform_sparse', 'random_uniform_integer',
'random_geometric', 'random_two_cost', 'machol_wien']
]
def setup(self, shape, input_type):
rng = np.random.default_rng(42)
input_func = {'random_uniform': random_uniform,
'random_uniform_sparse': random_uniform_sparse,
'random_uniform_integer': random_uniform_integer,
'random_geometric': random_geometric,
'random_two_cost': random_two_cost,
'machol_wien': machol_wien}[input_type]
self.biadjacency_matrix = input_func(shape, rng)
def time_evaluation(self, *args):
min_weight_full_bipartite_matching(self.biadjacency_matrix)
| 3,047
| 34.44186
| 95
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/sparse_linalg_expm.py
|
"""benchmarks for the scipy.sparse.linalg._expm_multiply module"""
import math
import numpy as np
from .common import Benchmark, safe_import
with safe_import():
import scipy.linalg
from scipy.sparse.linalg import expm as sp_expm
from scipy.sparse.linalg import expm_multiply
def random_sparse_csr(m, n, nnz_per_row):
# Copied from the scipy.sparse benchmark.
rows = np.arange(m).repeat(nnz_per_row)
cols = np.random.randint(0, n, size=nnz_per_row*m)
vals = np.random.random_sample(m*nnz_per_row)
M = scipy.sparse.coo_matrix((vals, (rows, cols)), (m, n), dtype=float)
return M.tocsr()
def random_sparse_csc(m, n, nnz_per_row, rng):
# Copied from the scipy.sparse benchmark.
rows = np.arange(m).repeat(nnz_per_row)
cols = rng.integers(0, n, size=nnz_per_row*m)
vals = rng.random(m*nnz_per_row)
M = scipy.sparse.coo_matrix((vals, (rows, cols)), (m, n), dtype=float)
# Use csc instead of csr, because sparse LU decomposition
# raises a warning when I use csr.
return M.tocsc()
class ExpmMultiply(Benchmark):
def setup(self):
self.n = 2000
self.i = 100
self.j = 200
nnz_per_row = 25
self.A = random_sparse_csr(self.n, self.n, nnz_per_row)
def time_expm_multiply(self):
# computing only column', j, 'of expm of the sparse matrix
v = np.zeros(self.n, dtype=float)
v[self.j] = 1
A_expm_col_j = expm_multiply(self.A, v)
A_expm_col_j[self.i]
class Expm(Benchmark):
params = [
[30, 100, 300],
['sparse', 'dense']
]
param_names = ['n', 'format']
def setup(self, n, format):
rng = np.random.default_rng(1234)
# Let the number of nonzero entries per row
# scale like the log of the order of the matrix.
nnz_per_row = int(math.ceil(math.log(n)))
# time the sampling of a random sparse matrix
self.A_sparse = random_sparse_csc(n, n, nnz_per_row, rng)
# first format conversion
self.A_dense = self.A_sparse.toarray()
def time_expm(self, n, format):
if format == 'sparse':
sp_expm(self.A_sparse)
elif format == 'dense':
scipy.linalg.expm(self.A_dense)
| 2,246
| 29.364865
| 74
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/sparse_matrix_power.py
|
from .common import Benchmark, safe_import
with safe_import():
from scipy.sparse import random
class BenchMatrixPower(Benchmark):
params = [
[0, 1, 2, 3, 8, 9],
[1000],
[1e-6, 1e-3],
]
param_names = ['x', 'N', 'density']
def setup(self, x: int, N: int, density: float):
self.A = random(N, N, density=density, format='csr')
def time_matrix_power(self, x: int, N: int, density: float):
self.A ** x
| 465
| 22.3
| 64
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/blas_lapack.py
|
import numpy as np
from .common import Benchmark, safe_import
with safe_import():
import scipy.linalg.blas as bla
class GetBlasLapackFuncs(Benchmark):
"""
Test the speed of grabbing the correct BLAS/LAPACK routine flavor.
In particular, upon receiving strange dtype arrays the results shouldn't
diverge too much. Hence the results here should be comparable
"""
param_names = ['dtype1', 'dtype2',
'dtype1_ord', 'dtype2_ord',
'size']
params = [
['b', 'G', 'd'],
['d', 'F', '?'],
['C', 'F'],
['C', 'F'],
[10, 100, 1000]
]
def setup(self, dtype1, dtype2, dtype1_ord, dtype2_ord, size):
self.arr1 = np.empty(size, dtype=dtype1, order=dtype1_ord)
self.arr2 = np.empty(size, dtype=dtype2, order=dtype2_ord)
def time_find_best_blas_type(self, dtype1, dtype2, dtype1_ord, dtype2_ord, size):
prefix, dtype, prefer_fortran = bla.find_best_blas_type((self.arr1, self.arr2))
| 1,015
| 29.787879
| 87
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/interpolate.py
|
import numpy as np
from .common import run_monitored, set_mem_rlimit, Benchmark, safe_import
with safe_import():
from scipy.stats import spearmanr
with safe_import():
import scipy.interpolate as interpolate
class Leaks(Benchmark):
unit = "relative increase with repeats"
def track_leaks(self):
set_mem_rlimit()
# Setup temp file, make it fit in memory
repeats = [2, 5, 10, 50, 200]
peak_mems = []
for repeat in repeats:
code = """
import numpy as np
from scipy.interpolate import griddata
def func(x, y):
return x*(1-x)*np.cos(4*np.pi*x) * np.sin(4*np.pi*y**2)**2
grid_x, grid_y = np.mgrid[0:1:100j, 0:1:200j]
points = np.random.rand(1000, 2)
values = func(points[:,0], points[:,1])
for t in range(%(repeat)d):
for method in ['nearest', 'linear', 'cubic']:
griddata(points, values, (grid_x, grid_y), method=method)
""" % dict(repeat=repeat)
_, peak_mem = run_monitored(code)
peak_mems.append(peak_mem)
corr, p = spearmanr(repeats, peak_mems)
if p < 0.05:
print("*"*79)
print("PROBABLE MEMORY LEAK")
print("*"*79)
else:
print("PROBABLY NO MEMORY LEAK")
return max(peak_mems) / min(peak_mems)
class BenchPPoly(Benchmark):
def setup(self):
rng = np.random.default_rng(1234)
m, k = 55, 3
x = np.sort(rng.random(m+1))
c = rng.random((k, m))
self.pp = interpolate.PPoly(c, x)
npts = 100
self.xp = np.linspace(0, 1, npts)
def time_evaluation(self):
self.pp(self.xp)
class GridData(Benchmark):
param_names = ['n_grids', 'method']
params = [
[10j, 100j, 1000j],
['nearest', 'linear', 'cubic']
]
def setup(self, n_grids, method):
self.func = lambda x, y: x*(1-x)*np.cos(4*np.pi*x) * np.sin(4*np.pi*y**2)**2
self.grid_x, self.grid_y = np.mgrid[0:1:n_grids, 0:1:n_grids]
self.points = np.random.rand(1000, 2)
self.values = self.func(self.points[:, 0], self.points[:, 1])
def time_evaluation(self, n_grids, method):
interpolate.griddata(self.points, self.values, (self.grid_x, self.grid_y), method=method)
class Interpolate1d(Benchmark):
param_names = ['n_samples', 'method']
params = [
[10, 50, 100, 1000, 10000],
['linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'],
]
def setup(self, n_samples, method):
self.x = np.arange(n_samples)
self.y = np.exp(-self.x/3.0)
self.interpolator = interpolate.interp1d(self.x, self.y, kind=method)
self.xp = np.linspace(self.x[0], self.x[-1], 4*n_samples)
def time_interpolate(self, n_samples, method):
"""Time the construction overhead."""
interpolate.interp1d(self.x, self.y, kind=method)
def time_interpolate_eval(self, n_samples, method):
"""Time the evaluation."""
self.interpolator(self.xp)
class Interpolate2d(Benchmark):
param_names = ['n_samples', 'method']
params = [
[10, 50, 100],
['linear', 'cubic', 'quintic'],
]
def setup(self, n_samples, method):
r_samples = n_samples / 2.
self.x = np.arange(-r_samples, r_samples, 0.25)
self.y = np.arange(-r_samples, r_samples, 0.25)
self.xx, self.yy = np.meshgrid(self.x, self.y)
self.z = np.sin(self.xx**2+self.yy**2)
def time_interpolate(self, n_samples, method):
interpolate.interp2d(self.x, self.y, self.z, kind=method)
class Rbf(Benchmark):
param_names = ['n_samples', 'function']
params = [
[10, 50, 100],
['multiquadric', 'inverse', 'gaussian', 'linear', 'cubic', 'quintic', 'thin_plate']
]
def setup(self, n_samples, function):
self.x = np.arange(n_samples)
self.y = np.sin(self.x)
r_samples = n_samples / 2.
self.X = np.arange(-r_samples, r_samples, 0.25)
self.Y = np.arange(-r_samples, r_samples, 0.25)
self.z = np.exp(-self.X**2-self.Y**2)
def time_rbf_1d(self, n_samples, function):
interpolate.Rbf(self.x, self.y, function=function)
def time_rbf_2d(self, n_samples, function):
interpolate.Rbf(self.X, self.Y, self.z, function=function)
class RBFInterpolator(Benchmark):
param_names = ['neighbors', 'n_samples', 'kernel']
params = [
[None, 50],
[10, 100, 1000],
['linear', 'thin_plate_spline', 'cubic', 'quintic', 'multiquadric',
'inverse_multiquadric', 'inverse_quadratic', 'gaussian']
]
def setup(self, neighbors, n_samples, kernel):
rng = np.random.RandomState(0)
self.y = rng.uniform(-1, 1, (n_samples, 2))
self.x = rng.uniform(-1, 1, (n_samples, 2))
self.d = np.sum(self.y, axis=1)*np.exp(-6*np.sum(self.y**2, axis=1))
def time_rbf_interpolator(self, neighbors, n_samples, kernel):
interp = interpolate.RBFInterpolator(
self.y,
self.d,
neighbors=neighbors,
epsilon=5.0,
kernel=kernel
)
interp(self.x)
class UnivariateSpline(Benchmark):
param_names = ['n_samples', 'degree']
params = [
[10, 50, 100],
[3, 4, 5]
]
def setup(self, n_samples, degree):
r_samples = n_samples / 2.
self.x = np.arange(-r_samples, r_samples, 0.25)
self.y = np.exp(-self.x**2) + 0.1 * np.random.randn(*self.x.shape)
def time_univariate_spline(self, n_samples, degree):
interpolate.UnivariateSpline(self.x, self.y, k=degree)
class BivariateSpline(Benchmark):
"""
Author: josef-pktd and scipy mailinglist example
'http://scipy-user.10969.n7.nabble.com/BivariateSpline-examples\
-and-my-crashing-python-td14801.html'
"""
param_names = ['n_samples']
params = [
[10, 20, 30]
]
def setup(self, n_samples):
x = np.arange(0, n_samples, 0.5)
y = np.arange(0, n_samples, 0.5)
x, y = np.meshgrid(x, y)
x = x.ravel()
y = y.ravel()
xmin = x.min()-1
xmax = x.max()+1
ymin = y.min()-1
ymax = y.max()+1
s = 1.1
self.yknots = np.linspace(ymin+s, ymax-s, 10)
self.xknots = np.linspace(xmin+s, xmax-s, 10)
self.z = np.sin(x) + 0.1*np.random.normal(size=x.shape)
self.x = x
self.y = y
def time_smooth_bivariate_spline(self, n_samples):
interpolate.SmoothBivariateSpline(self.x, self.y, self.z)
def time_lsq_bivariate_spline(self, n_samples):
interpolate.LSQBivariateSpline(self.x, self.y, self.z, self.xknots.flat, self.yknots.flat)
class Interpolate(Benchmark):
"""
Linear Interpolate in scipy and numpy
"""
param_names = ['n_samples', 'module']
params = [
[10, 50, 100],
['numpy', 'scipy']
]
def setup(self, n_samples, module):
self.x = np.arange(n_samples)
self.y = np.exp(-self.x/3.0)
self.z = np.random.normal(size=self.x.shape)
def time_interpolate(self, n_samples, module):
if module == 'scipy':
interpolate.interp1d(self.x, self.y, kind="linear")
else:
np.interp(self.z, self.x, self.y)
class RegularGridInterpolator(Benchmark):
"""
Benchmark RegularGridInterpolator with method="linear".
"""
param_names = ['ndim', 'max_coord_size', 'n_samples', 'flipped']
params = [
[2, 3, 4],
[10, 40, 200],
[10, 100, 1000, 10000],
[1, -1]
]
def setup(self, ndim, max_coord_size, n_samples, flipped):
rng = np.random.default_rng(314159)
# coordinates halve in size over the dimensions
coord_sizes = [max_coord_size // 2**i for i in range(ndim)]
self.points = [np.sort(rng.random(size=s))[::flipped]
for s in coord_sizes]
self.values = rng.random(size=coord_sizes)
# choose in-bounds sample points xi
bounds = [(p.min(), p.max()) for p in self.points]
xi = [rng.uniform(low, high, size=n_samples)
for low, high in bounds]
self.xi = np.array(xi).T
self.interp = interpolate.RegularGridInterpolator(
self.points,
self.values,
)
def time_rgi_setup_interpolator(self, ndim, max_coord_size,
n_samples, flipped):
self.interp = interpolate.RegularGridInterpolator(
self.points,
self.values,
)
def time_rgi(self, ndim, max_coord_size, n_samples, flipped):
self.interp(self.xi)
class RegularGridInterpolatorValues(interpolate.RegularGridInterpolator):
def __init__(self, points, xi, **kwargs):
# create fake values for initialization
values = np.zeros(tuple([len(pt) for pt in points]))
super().__init__(points, values, **kwargs)
self._is_initialized = False
# precompute values
(self.xi, self.xi_shape, self.ndim,
self.nans, self.out_of_bounds) = self._prepare_xi(xi)
self.indices, self.norm_distances = self._find_indices(xi.T)
self._is_initialized = True
def _prepare_xi(self, xi):
if not self._is_initialized:
return super()._prepare_xi(xi)
else:
# just give back precomputed values
return (self.xi, self.xi_shape, self.ndim,
self.nans, self.out_of_bounds)
def _find_indices(self, xi):
if not self._is_initialized:
return super()._find_indices(xi)
else:
# just give back pre-computed values
return self.indices, self.norm_distances
def __call__(self, values, method=None):
values = self._check_values(values)
# check fillvalue
self._check_fill_value(values, self.fill_value)
# check dimensionality
self._check_dimensionality(self.grid, values)
# flip, if needed
self.values = np.flip(values, axis=self._descending_dimensions)
return super().__call__(self.xi, method=method)
class RegularGridInterpolatorSubclass(Benchmark):
"""
Benchmark RegularGridInterpolator with method="linear".
"""
param_names = ['ndim', 'max_coord_size', 'n_samples', 'flipped']
params = [
[2, 3, 4],
[10, 40, 200],
[10, 100, 1000, 10000],
[1, -1]
]
def setup(self, ndim, max_coord_size, n_samples, flipped):
rng = np.random.default_rng(314159)
# coordinates halve in size over the dimensions
coord_sizes = [max_coord_size // 2**i for i in range(ndim)]
self.points = [np.sort(rng.random(size=s))[::flipped]
for s in coord_sizes]
self.values = rng.random(size=coord_sizes)
# choose in-bounds sample points xi
bounds = [(p.min(), p.max()) for p in self.points]
xi = [rng.uniform(low, high, size=n_samples)
for low, high in bounds]
self.xi = np.array(xi).T
self.interp = RegularGridInterpolatorValues(
self.points,
self.xi,
)
def time_rgi_setup_interpolator(self, ndim, max_coord_size,
n_samples, flipped):
self.interp = RegularGridInterpolatorValues(
self.points,
self.xi,
)
def time_rgi(self, ndim, max_coord_size, n_samples, flipped):
self.interp(self.values)
class CloughTocherInterpolatorValues(interpolate.CloughTocher2DInterpolator):
def __init__(self, points, xi, tol=1e-6, maxiter=400, **kwargs):
interpolate.CloughTocher2DInterpolator.__init__(self, points, None, tol=tol, maxiter=maxiter)
self.xi = None
self._preprocess_xi(*xi)
self.simplices, self.c = interpolate.CloughTocher2DInterpolator._find_simplicies(self, self.xi)
def _preprocess_xi(self, *args):
if self.xi is None:
self.xi, self.interpolation_points_shape = interpolate.CloughTocher2DInterpolator._preprocess_xi(self, *args)
return self.xi, self.interpolation_points_shape
def _find_simplicies(self, xi):
return self.simplices, self.c
def __call__(self, values):
self._set_values(values)
return super().__call__(self.xi)
class CloughTocherInterpolatorSubclass(Benchmark):
"""
Benchmark CloughTocherInterpolatorValues with method="linear".
"""
param_names = ['max_coord_size', 'n_samples', 'flipped', 'tol', 'max_iter']
params = [
[10, 40, 200],
[10, 100, 1000, 10000],
[1, -1],
[1e-3, 1e-6, 1e-8],
[1, 10, 100, 400]
]
def setup(self, max_coord_size, n_samples, flipped, tol, max_iter):
rng = np.random.default_rng(314159)
# coordinates halve in size over the dimensions
coord_sizes = [max_coord_size // 2**i for i in range(2)]
self.points = [np.sort(rng.random(size=s))[::flipped]
for s in coord_sizes]
self.values = [rng.random(size=coord_sizes) for i in range(10)]
# choose in-bounds sample points xi
bounds = [(p.min(), p.max()) for p in self.points]
xi = [rng.uniform(low, high, size=n_samples)
for low, high in bounds]
self.xi = np.array(xi).T
self.tol = tol
self.max_iter = max_iter
self.interp = CloughTocherInterpolatorValues(
self.points,
self.xi,
self.tol,
self.max_iter
)
def time_clough_tocher_setup_interpolator(self, max_coord_size,
n_samples, flipped, tol, max_iter):
self.interp = CloughTocherInterpolatorValues(
self.points,
self.xi,
self.tol,
self.max_iter
)
def time_clough_tocher(self, max_coord_size, n_samples, flipped, tol, max_iter):
for vals in self.values:
self.interp(vals)
| 14,152
| 31.092971
| 121
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/linprog_benchmark_files/__init__.py
|
# -*- coding: utf-8 -*-
"""
==============================================================================
`` -- Problems for testing linear programming routines
==============================================================================
This module provides a comprehensive set of problems for benchmarking linear
programming routines, that is, scipy.optimize.linprog with method =
'interior-point' or 'simplex'.
"""
"""
All problems are from the Netlib LP Test Problem Set, courtesy of CUTEr
ftp://ftp.numerical.rl.ac.uk/pub/cutest/netlib/netlib.html
Converted from SIF (MPS) format by Matt Haberland
"""
__all__ = [s for s in dir() if not s.startswith('_')]
| 671
| 31
| 78
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/tests/test_go_benchmark_functions.py
|
"""
Unit tests for the global optimization benchmark functions
"""
import numpy as np
from .. import go_benchmark_functions as gbf
import inspect
class TestGoBenchmarkFunctions:
def setup_method(self):
bench_members = inspect.getmembers(gbf, inspect.isclass)
self.benchmark_functions = {it[0]:it[1] for it in bench_members if
issubclass(it[1], gbf.Benchmark)}
def teardown_method(self):
pass
def test_optimum_solution(self):
# Check that the function returns the global minimum if given
# the optimal solution
for name, klass in self.benchmark_functions.items():
# LennardJones is filtered here because there are many global
# optimima that give the same minimum energy
if (name in ['Benchmark', 'LennardJones'] or
name.startswith('Problem')):
continue
f = klass()
if name in ['Damavandi', 'Csendes']:
with np.errstate(divide='ignore', invalid='ignore'):
print(name, f.fun(np.asarray(f.global_optimum[0])),
f.fglob)
assert np.isnan(f.fun(np.asarray(f.global_optimum[0])))
continue
print(name, f.fun(np.asarray(f.global_optimum[0])), f.fglob)
assert f.success(f.global_optimum[0])
def test_solution_exists(self):
# Every benchmark function should have a minimum energy
for name, klass in self.benchmark_functions.items():
if name == 'Benchmark':
continue
f = klass()
# should result in an attribute error if it doesn't exist
val = f.fglob
def test_bounds_access_subscriptable(self):
# In Python 2 zip returns a list which is subscriptable
# In Python 3 zip returns a zip object, which is not subscriptable
for name, klass in self.benchmark_functions.items():
if (name == 'Benchmark' or name.startswith('Problem')):
continue
f = klass()
bounds = f.bounds[0]
def test_redimension(self):
# check that problems can be redimensioned, use LJ for this.
LJ = self.benchmark_functions['LennardJones']
L = LJ()
L.change_dimensions(10)
# if we change the size of the problem then the initial vector has to
# resize
x0 = L.initial_vector()
assert len(x0) == 10
# the bounds should be the correct length now.
bounds = L.bounds
assert len(bounds) == 10
assert L.N == 10
| 2,650
| 33.428571
| 77
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/tests/__init__.py
| 0
| 0
| 0
|
py
|
|
scipy
|
scipy-main/benchmarks/benchmarks/go_benchmark_functions/go_funcs_C.py
|
# -*- coding: utf-8 -*-
import numpy as np
from numpy import (abs, asarray, cos, exp, floor, pi, sign, sin, sqrt, sum,
size, tril, isnan, atleast_2d, repeat)
from numpy.testing import assert_almost_equal
from .go_benchmark import Benchmark
class CarromTable(Benchmark):
r"""
CarromTable objective function.
The CarromTable [1]_ global optimization problem is a multimodal
minimization problem defined as follows:
.. math::
f_{\text{CarromTable}}(x) = - \frac{1}{30}\left(\cos(x_1)
cos(x_2) e^{\left|1 - \frac{\sqrt{x_1^2 + x_2^2}}{\pi}\right|}\right)^2
with :math:`x_i \in [-10, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = -24.15681551650653` for :math:`x_i = \pm
9.646157266348881` for :math:`i = 1, 2`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [(9.646157266348881, 9.646134286497169),
(-9.646157266348881, 9.646134286497169),
(9.646157266348881, -9.646134286497169),
(-9.646157266348881, -9.646134286497169)]
self.fglob = -24.15681551650653
def fun(self, x, *args):
self.nfev += 1
u = cos(x[0]) * cos(x[1])
v = sqrt(x[0] ** 2 + x[1] ** 2)
return -((u * exp(abs(1 - v / pi))) ** 2) / 30.
class Chichinadze(Benchmark):
r"""
Chichinadze objective function.
This class defines the Chichinadze [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Chichinadze}}(x) = x_{1}^{2} - 12 x_{1}
+ 8 \sin\left(\frac{5}{2} \pi x_{1}\right)
+ 10 \cos\left(\frac{1}{2} \pi x_{1}\right) + 11
- 0.2 \frac{\sqrt{5}}{e^{\frac{1}{2} \left(x_{2} -0.5 \right)^{2}}}
with :math:`x_i \in [-30, 30]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = -42.94438701899098` for :math:`x =
[6.189866586965680, 0.5]`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
TODO: Jamil#33 has a dividing factor of 2 in the sin term. However, f(x)
for the given solution does not give the global minimum. i.e. the equation
is at odds with the solution.
Only by removing the dividing factor of 2, i.e. `8 * sin(5 * pi * x[0])`
does the given solution result in the given global minimum.
Do we keep the result or equation?
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-30.0] * self.N, [30.0] * self.N))
self.custom_bounds = [(-10, 10), (-10, 10)]
self.global_optimum = [[6.189866586965680, 0.5]]
self.fglob = -42.94438701899098
def fun(self, x, *args):
self.nfev += 1
return (x[0] ** 2 - 12 * x[0] + 11 + 10 * cos(pi * x[0] / 2)
+ 8 * sin(5 * pi * x[0] / 2)
- 1.0 / sqrt(5) * exp(-((x[1] - 0.5) ** 2) / 2))
class Cigar(Benchmark):
r"""
Cigar objective function.
This class defines the Cigar [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Cigar}}(x) = x_1^2 + 10^6\sum_{i=2}^{n} x_i^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \in
[-100, 100]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-100.0] * self.N,
[100.0] * self.N))
self.custom_bounds = [(-5, 5), (-5, 5)]
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
return x[0] ** 2 + 1e6 * sum(x[1:] ** 2)
class Cola(Benchmark):
r"""
Cola objective function.
This class defines the Cola global optimization problem. The 17-dimensional
function computes indirectly the formula :math:`f(n, u)` by setting
:math:`x_0 = y_0, x_1 = u_0, x_i = u_{2(i2)}, y_i = u_{2(i2)+1}` :
.. math::
f_{\text{Cola}}(x) = \sum_{i<j}^{n} \left (r_{i,j} - d_{i,j} \right )^2
Where :math:`r_{i, j}` is given by:
.. math::
r_{i, j} = \sqrt{(x_i - x_j)^2 + (y_i - y_j)^2}
And :math:`d` is a symmetric matrix given by:
.. math::
\{d} = \left [ d_{ij} \right ] = \begin{pmatrix}
1.27 & & & & & & & & \\
1.69 & 1.43 & & & & & & & \\
2.04 & 2.35 & 2.43 & & & & & & \\
3.09 & 3.18 & 3.26 & 2.85 & & & & & \\
3.20 & 3.22 & 3.27 & 2.88 & 1.55 & & & & \\
2.86 & 2.56 & 2.58 & 2.59 & 3.12 & 3.06 & & & \\
3.17 & 3.18 & 3.18 & 3.12 & 1.31 & 1.64 & 3.00 & \\
3.21 & 3.18 & 3.18 & 3.17 & 1.70 & 1.36 & 2.95 & 1.32 & \\
2.38 & 2.31 & 2.42 & 1.94 & 2.85 & 2.81 & 2.56 & 2.91 & 2.97
\end{pmatrix}
This function has bounds :math:`x_0 \in [0, 4]` and :math:`x_i \in [-4, 4]`
for :math:`i = 1, ..., n-1`.
*Global optimum* 11.7464.
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=17):
Benchmark.__init__(self, dimensions)
self._bounds = [[0.0, 4.0]] + list(zip([-4.0] * (self.N - 1),
[4.0] * (self.N - 1)))
self.global_optimum = [[0.651906, 1.30194, 0.099242, -0.883791,
-0.8796, 0.204651, -3.28414, 0.851188,
-3.46245, 2.53245, -0.895246, 1.40992,
-3.07367, 1.96257, -2.97872, -0.807849,
-1.68978]]
self.fglob = 11.7464
self.d = asarray([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1.27, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1.69, 1.43, 0, 0, 0, 0, 0, 0, 0, 0],
[2.04, 2.35, 2.43, 0, 0, 0, 0, 0, 0, 0],
[3.09, 3.18, 3.26, 2.85, 0, 0, 0, 0, 0, 0],
[3.20, 3.22, 3.27, 2.88, 1.55, 0, 0, 0, 0, 0],
[2.86, 2.56, 2.58, 2.59, 3.12, 3.06, 0, 0, 0, 0],
[3.17, 3.18, 3.18, 3.12, 1.31, 1.64, 3.00, 0, 0, 0],
[3.21, 3.18, 3.18, 3.17, 1.70, 1.36, 2.95, 1.32, 0, 0],
[2.38, 2.31, 2.42, 1.94, 2.85, 2.81, 2.56, 2.91, 2.97, 0.]])
def fun(self, x, *args):
self.nfev += 1
xi = atleast_2d(asarray([0.0, x[0]] + list(x[1::2])))
xj = repeat(xi, size(xi, 1), axis=0)
xi = xi.T
yi = atleast_2d(asarray([0.0, 0.0] + list(x[2::2])))
yj = repeat(yi, size(yi, 1), axis=0)
yi = yi.T
inner = (sqrt(((xi - xj) ** 2 + (yi - yj) ** 2)) - self.d) ** 2
inner = tril(inner, -1)
return sum(sum(inner, axis=1))
class Colville(Benchmark):
r"""
Colville objective function.
This class defines the Colville global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Colville}}(x) = \left(x_{1} -1\right)^{2}
+ 100 \left(x_{1}^{2} - x_{2}\right)^{2}
+ 10.1 \left(x_{2} -1\right)^{2} + \left(x_{3} -1\right)^{2}
+ 90 \left(x_{3}^{2} - x_{4}\right)^{2}
+ 10.1 \left(x_{4} -1\right)^{2} + 19.8 \frac{x_{4} -1}{x_{2}}
with :math:`x_i \in [-10, 10]` for :math:`i = 1, ..., 4`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 1` for
:math:`i = 1, ..., 4`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO docstring equation is wrong use Jamil#36
"""
def __init__(self, dimensions=4):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [[1 for _ in range(self.N)]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return (100 * (x[0] - x[1] ** 2) ** 2
+ (1 - x[0]) ** 2 + (1 - x[2]) ** 2
+ 90 * (x[3] - x[2] ** 2) ** 2
+ 10.1 * ((x[1] - 1) ** 2 + (x[3] - 1) ** 2)
+ 19.8 * (x[1] - 1) * (x[3] - 1))
class Corana(Benchmark):
r"""
Corana objective function.
This class defines the Corana [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Corana}}(x) = \begin{cases} \sum_{i=1}^n 0.15 d_i
[z_i - 0.05\textrm{sgn}(z_i)]^2 & \textrm{if }|x_i-z_i| < 0.05 \\
d_ix_i^2 & \textrm{otherwise}\end{cases}
Where, in this exercise:
.. math::
z_i = 0.2 \lfloor |x_i/s_i|+0.49999\rfloor\textrm{sgn}(x_i),
d_i=(1,1000,10,100, ...)
with :math:`x_i \in [-5, 5]` for :math:`i = 1, ..., 4`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for
:math:`i = 1, ..., 4`
..[1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
def __init__(self, dimensions=4):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-5.0] * self.N, [5.0] * self.N))
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
d = [1., 1000., 10., 100.]
r = 0
for j in range(4):
zj = floor(abs(x[j] / 0.2) + 0.49999) * sign(x[j]) * 0.2
if abs(x[j] - zj) < 0.05:
r += 0.15 * ((zj - 0.05 * sign(zj)) ** 2) * d[j]
else:
r += d[j] * x[j] * x[j]
return r
class CosineMixture(Benchmark):
r"""
Cosine Mixture objective function.
This class defines the Cosine Mixture global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{CosineMixture}}(x) = -0.1 \sum_{i=1}^n \cos(5 \pi x_i)
- \sum_{i=1}^n x_i^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \in
[-1, 1]` for :math:`i = 1, ..., N`.
*Global optimum*: :math:`f(x) = -0.1N` for :math:`x_i = 0` for
:math:`i = 1, ..., N`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO, Jamil #38 has wrong minimum and wrong fglob. I plotted it.
-(x**2) term is always negative if x is negative.
cos(5 * pi * x) is equal to -1 for x=-1.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.change_dimensionality = True
self._bounds = list(zip([-1.0] * self.N, [1.0] * self.N))
self.global_optimum = [[-1. for _ in range(self.N)]]
self.fglob = -0.9 * self.N
def fun(self, x, *args):
self.nfev += 1
return -0.1 * sum(cos(5.0 * pi * x)) - sum(x ** 2.0)
class CrossInTray(Benchmark):
r"""
Cross-in-Tray objective function.
This class defines the Cross-in-Tray [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{CrossInTray}}(x) = - 0.0001 \left(\left|{e^{\left|{100
- \frac{\sqrt{x_{1}^{2} + x_{2}^{2}}}{\pi}}\right|}
\sin\left(x_{1}\right) \sin\left(x_{2}\right)}\right| + 1\right)^{0.1}
with :math:`x_i \in [-15, 15]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = -2.062611870822739` for :math:`x_i =
\pm 1.349406608602084` for :math:`i = 1, 2`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [(1.349406685353340, 1.349406608602084),
(-1.349406685353340, 1.349406608602084),
(1.349406685353340, -1.349406608602084),
(-1.349406685353340, -1.349406608602084)]
self.fglob = -2.062611870822739
def fun(self, x, *args):
self.nfev += 1
return (-0.0001 * (abs(sin(x[0]) * sin(x[1])
* exp(abs(100 - sqrt(x[0] ** 2 + x[1] ** 2) / pi)))
+ 1) ** (0.1))
class CrossLegTable(Benchmark):
r"""
Cross-Leg-Table objective function.
This class defines the Cross-Leg-Table [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{CrossLegTable}}(x) = - \frac{1}{\left(\left|{e^{\left|{100
- \frac{\sqrt{x_{1}^{2} + x_{2}^{2}}}{\pi}}\right|}
\sin\left(x_{1}\right) \sin\left(x_{2}\right)}\right| + 1\right)^{0.1}}
with :math:`x_i \in [-10, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = -1`. The global minimum is found on the
planes :math:`x_1 = 0` and :math:`x_2 = 0`
..[1] Mishra, S. Global Optimization by Differential Evolution and Particle
Swarm Methods: Evaluation on Some Benchmark Functions Munich University,
2006
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [[0., 0.]]
self.fglob = -1.0
def fun(self, x, *args):
self.nfev += 1
u = 100 - sqrt(x[0] ** 2 + x[1] ** 2) / pi
v = sin(x[0]) * sin(x[1])
return -(abs(v * exp(abs(u))) + 1) ** (-0.1)
class CrownedCross(Benchmark):
r"""
Crowned Cross objective function.
This class defines the Crowned Cross [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{CrownedCross}}(x) = 0.0001 \left(\left|{e^{\left|{100
- \frac{\sqrt{x_{1}^{2} + x_{2}^{2}}}{\pi}}\right|}
\sin\left(x_{1}\right) \sin\left(x_{2}\right)}\right| + 1\right)^{0.1}
with :math:`x_i \in [-10, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x_i) = 0.0001`. The global minimum is found on
the planes :math:`x_1 = 0` and :math:`x_2 = 0`
..[1] Mishra, S. Global Optimization by Differential Evolution and Particle
Swarm Methods: Evaluation on Some Benchmark Functions Munich University,
2006
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [[0, 0]]
self.fglob = 0.0001
def fun(self, x, *args):
self.nfev += 1
u = 100 - sqrt(x[0] ** 2 + x[1] ** 2) / pi
v = sin(x[0]) * sin(x[1])
return 0.0001 * (abs(v * exp(abs(u))) + 1) ** (0.1)
class Csendes(Benchmark):
r"""
Csendes objective function.
This class defines the Csendes [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Csendes}}(x) = \sum_{i=1}^n x_i^6 \left[ 2 + \sin
\left( \frac{1}{x_i} \right ) \right]
Here, :math:`n` represents the number of dimensions and :math:`x_i \in
[-1, 1]` for :math:`i = 1, ..., N`.
*Global optimum*: :math:`f(x) = 0.0` for :math:`x_i = 0` for
:math:`i = 1, ..., N`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.change_dimensionality = True
self._bounds = list(zip([-1.0] * self.N, [1.0] * self.N))
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = np.nan
def fun(self, x, *args):
self.nfev += 1
try:
return sum((x ** 6.0) * (2.0 + sin(1.0 / x)))
except ZeroDivisionError:
return np.nan
except FloatingPointError:
return np.nan
def success(self, x):
"""Is a candidate solution at the global minimum"""
val = self.fun(asarray(x))
if isnan(val):
return True
try:
assert_almost_equal(val, 0., 4)
return True
except AssertionError:
return False
return False
class Cube(Benchmark):
r"""
Cube objective function.
This class defines the Cube global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Cube}}(x) = 100(x_2 - x_1^3)^2 + (1 - x1)^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \in [-10, 10]` for :math:`i=1,...,N`.
*Global optimum*: :math:`f(x_i) = 0.0` for :math:`x = [1, 1]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO: jamil#41 has the wrong solution.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.custom_bounds = ([0, 2], [0, 2])
self.global_optimum = [[1.0, 1.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return 100.0 * (x[1] - x[0] ** 3.0) ** 2.0 + (1.0 - x[0]) ** 2.0
| 18,458
| 30.880829
| 108
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/go_benchmark_functions/go_funcs_M.py
|
# -*- coding: utf-8 -*-
from numpy import (abs, asarray, cos, exp, log, arange, pi, prod, sin, sqrt,
sum, tan)
from .go_benchmark import Benchmark, safe_import
with safe_import():
from scipy.special import factorial
class Matyas(Benchmark):
r"""
Matyas objective function.
This class defines the Matyas [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Matyas}}(x) = 0.26(x_1^2 + x_2^2) - 0.48 x_1 x_2
with :math:`x_i \in [-10, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [0, 0]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return 0.26 * (x[0] ** 2 + x[1] ** 2) - 0.48 * x[0] * x[1]
class McCormick(Benchmark):
r"""
McCormick objective function.
This class defines the McCormick [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{McCormick}}(x) = - x_{1} + 2 x_{2} + \left(x_{1}
- x_{2}\right)^{2} + \sin\left(x_{1} + x_{2}\right) + 1
with :math:`x_1 \in [-1.5, 4]`, :math:`x_2 \in [-3, 4]`.
*Global optimum*: :math:`f(x) = -1.913222954981037` for
:math:`x = [-0.5471975602214493, -1.547197559268372]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = [(-1.5, 4.0), (-3.0, 3.0)]
self.global_optimum = [[-0.5471975602214493, -1.547197559268372]]
self.fglob = -1.913222954981037
def fun(self, x, *args):
self.nfev += 1
return (sin(x[0] + x[1]) + (x[0] - x[1]) ** 2 - 1.5 * x[0]
+ 2.5 * x[1] + 1)
class Meyer(Benchmark):
r"""
Meyer [1]_ objective function.
..[1] https://www.itl.nist.gov/div898/strd/nls/data/mgh10.shtml
TODO NIST regression standard
"""
def __init__(self, dimensions=3):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([0., 100., 100.],
[1, 1000., 500.]))
self.global_optimum = [[5.6096364710e-3, 6.1813463463e3,
3.4522363462e2]]
self.fglob = 8.7945855171e1
self.a = asarray([3.478E+04, 2.861E+04, 2.365E+04, 1.963E+04, 1.637E+04,
1.372E+04, 1.154E+04, 9.744E+03, 8.261E+03, 7.030E+03,
6.005E+03, 5.147E+03, 4.427E+03, 3.820E+03, 3.307E+03,
2.872E+03])
self.b = asarray([5.000E+01, 5.500E+01, 6.000E+01, 6.500E+01, 7.000E+01,
7.500E+01, 8.000E+01, 8.500E+01, 9.000E+01, 9.500E+01,
1.000E+02, 1.050E+02, 1.100E+02, 1.150E+02, 1.200E+02,
1.250E+02])
def fun(self, x, *args):
self.nfev += 1
vec = x[0] * exp(x[1] / (self.b + x[2]))
return sum((self.a - vec) ** 2)
class Michalewicz(Benchmark):
r"""
Michalewicz objective function.
This class defines the Michalewicz [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Michalewicz}}(x) = - \sum_{i=1}^{2} \sin\left(x_i\right)
\sin^{2 m}\left(\frac{i x_i^{2}}{\pi}\right)
Where, in this exercise, :math:`m = 10`.
with :math:`x_i \in [0, \pi]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x_i) = -1.8013` for :math:`x = [0, 0]`
.. [1] Adorio, E. MVF - "Multivariate Test Functions Library in C for
Unconstrained Global Optimization", 2005
TODO: could change dimensionality, but global minimum might change.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([0.0] * self.N, [pi] * self.N))
self.global_optimum = [[2.20290555, 1.570796]]
self.fglob = -1.8013
def fun(self, x, *args):
self.nfev += 1
m = 10.0
i = arange(1, self.N + 1)
return -sum(sin(x) * sin(i * x ** 2 / pi) ** (2 * m))
class MieleCantrell(Benchmark):
r"""
Miele-Cantrell [1]_ objective function.
This class defines the Miele-Cantrell global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{MieleCantrell}}({x}) = (e^{-x_1} - x_2)^4 + 100(x_2 - x_3)^6
+ \tan^4(x_3 - x_4) + x_1^8
with :math:`x_i \in [-1, 1]` for :math:`i = 1, ..., 4`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [0, 1, 1, 1]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=4):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-1.0] * self.N, [1.0] * self.N))
self.global_optimum = [[0.0, 1.0, 1.0, 1.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return ((exp(-x[0]) - x[1]) ** 4 + 100 * (x[1] - x[2]) ** 6
+ tan(x[2] - x[3]) ** 4 + x[0] ** 8)
class Mishra01(Benchmark):
r"""
Mishra 1 objective function.
This class defines the Mishra 1 [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Mishra01}}(x) = (1 + x_n)^{x_n}
where
.. math::
x_n = n - \sum_{i=1}^{n-1} x_i
with :math:`x_i \in [0, 1]` for :math:`i =1, ..., n`.
*Global optimum*: :math:`f(x) = 2` for :math:`x_i = 1` for
:math:`i = 1, ..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([0.0] * self.N,
[1.0 + 1e-9] * self.N))
self.global_optimum = [[1.0 for _ in range(self.N)]]
self.fglob = 2.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
xn = self.N - sum(x[0:-1])
return (1 + xn) ** xn
class Mishra02(Benchmark):
r"""
Mishra 2 objective function.
This class defines the Mishra 2 [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Mishra02}}({x}) = (1 + x_n)^{x_n}
with
.. math::
x_n = n - \sum_{i=1}^{n-1} \frac{(x_i + x_{i+1})}{2}
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [0, 1]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 2` for :math:`x_i = 1`
for :math:`i = 1, ..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([0.0] * self.N,
[1.0 + 1e-9] * self.N))
self.global_optimum = [[1.0 for _ in range(self.N)]]
self.fglob = 2.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
xn = self.N - sum((x[:-1] + x[1:]) / 2.0)
return (1 + xn) ** xn
class Mishra03(Benchmark):
r"""
Mishra 3 objective function.
This class defines the Mishra 3 [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Mishra03}}(x) = \sqrt{\lvert \cos{\sqrt{\lvert x_1^2
+ x_2^2 \rvert}} \rvert} + 0.01(x_1 + x_2)
with :math:`x_i \in [-10, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = -0.1999` for
:math:`x = [-9.99378322, -9.99918927]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO: I think that Jamil#76 has the wrong global minimum, a smaller one
is possible
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [[-9.99378322, -9.99918927]]
self.fglob = -0.19990562
def fun(self, x, *args):
self.nfev += 1
return ((0.01 * (x[0] + x[1])
+ sqrt(abs(cos(sqrt(abs(x[0] ** 2 + x[1] ** 2)))))))
class Mishra04(Benchmark):
r"""
Mishra 4 objective function.
This class defines the Mishra 4 [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Mishra04}}({x}) = \sqrt{\lvert \sin{\sqrt{\lvert
x_1^2 + x_2^2 \rvert}} \rvert} + 0.01(x_1 + x_2)
with :math:`x_i \in [-10, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = -0.17767` for
:math:`x = [-8.71499636, -9.0533148]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO: I think that Jamil#77 has the wrong minimum, not possible
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [[-8.88055269734, -8.89097599857]]
self.fglob = -0.177715264826
def fun(self, x, *args):
self.nfev += 1
return ((0.01 * (x[0] + x[1])
+ sqrt(abs(sin(sqrt(abs(x[0] ** 2 + x[1] ** 2)))))))
class Mishra05(Benchmark):
r"""
Mishra 5 objective function.
This class defines the Mishra 5 [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Mishra05}}(x) = \left [ \sin^2 ((\cos(x_1) + \cos(x_2))^2)
+ \cos^2 ((\sin(x_1) + \sin(x_2))^2) + x_1 \right ]^2 + 0.01(x_1 + x_2)
with :math:`x_i \in [-10, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = -0.119829` for :math:`x = [-1.98682, -10]`
.. [1] Mishra, S. Global Optimization by Differential Evolution and
Particle Swarm Methods: Evaluation on Some Benchmark Functions.
Munich Personal RePEc Archive, 2006, 1005
TODO Line 381 in paper
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [[-1.98682, -10.0]]
self.fglob = -1.019829519930646
def fun(self, x, *args):
self.nfev += 1
return (0.01 * x[0] + 0.1 * x[1]
+ (sin((cos(x[0]) + cos(x[1])) ** 2) ** 2
+ cos((sin(x[0]) + sin(x[1])) ** 2) ** 2 + x[0]) ** 2)
class Mishra06(Benchmark):
r"""
Mishra 6 objective function.
This class defines the Mishra 6 [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Mishra06}}(x) = -\log{\left [ \sin^2 ((\cos(x_1)
+ \cos(x_2))^2) - \cos^2 ((\sin(x_1) + \sin(x_2))^2) + x_1 \right ]^2}
+ 0.01 \left[(x_1 -1)^2 + (x_2 - 1)^2 \right]
with :math:`x_i \in [-10, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x_i) = -2.28395` for :math:`x = [2.88631, 1.82326]`
.. [1] Mishra, S. Global Optimization by Differential Evolution and
Particle Swarm Methods: Evaluation on Some Benchmark Functions.
Munich Personal RePEc Archive, 2006, 1005
TODO line 397
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [[2.88631, 1.82326]]
self.fglob = -2.28395
def fun(self, x, *args):
self.nfev += 1
a = 0.1 * ((x[0] - 1) ** 2 + (x[1] - 1) ** 2)
u = (cos(x[0]) + cos(x[1])) ** 2
v = (sin(x[0]) + sin(x[1])) ** 2
return a - log((sin(u) ** 2 - cos(v) ** 2 + x[0]) ** 2)
class Mishra07(Benchmark):
r"""
Mishra 7 objective function.
This class defines the Mishra 7 [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Mishra07}}(x) = \left [\prod_{i=1}^{n} x_i - n! \right]^2
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-10, 10]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = \sqrt{n}`
for :math:`i = 1, ..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.custom_bounds = [(-2, 2), (-2, 2)]
self.global_optimum = [[sqrt(self.N)
for i in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
return (prod(x) - factorial(self.N)) ** 2.0
class Mishra08(Benchmark):
r"""
Mishra 8 objective function.
This class defines the Mishra 8 [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Mishra08}}(x) = 0.001 \left[\lvert x_1^{10} - 20x_1^9
+ 180x_1^8 - 960 x_1^7 + 3360x_1^6 - 8064x_1^5 + 13340x_1^4 - 15360x_1^3
+ 11520x_1^2 - 5120x_1 + 2624 \rvert \lvert x_2^4 + 12x_2^3 + 54x_2^2
+ 108x_2 + 81 \rvert \right]^2
with :math:`x_i \in [-10, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [2, -3]`
.. [1] Mishra, S. Global Optimization by Differential Evolution and
Particle Swarm Methods: Evaluation on Some Benchmark Functions.
Munich Personal RePEc Archive, 2006, 1005
TODO Line 1065
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.custom_bounds = [(1.0, 2.0), (-4.0, 1.0)]
self.global_optimum = [[2.0, -3.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
val = abs(x[0] ** 10 - 20 * x[0] ** 9 + 180 * x[0] ** 8
- 960 * x[0] ** 7 + 3360 * x[0] ** 6 - 8064 * x[0] ** 5
+ 13340 * x[0] ** 4 - 15360 * x[0] ** 3 + 11520 * x[0] ** 2
- 5120 * x[0] + 2624)
val += abs(x[1] ** 4 + 12 * x[1] ** 3 +
54 * x[1] ** 2 + 108 * x[1] + 81)
return 0.001 * val ** 2
class Mishra09(Benchmark):
r"""
Mishra 9 objective function.
This class defines the Mishra 9 [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Mishra09}}({x}) = \left[ ab^2c + abc^2 + b^2
+ (x_1 + x_2 - x_3)^2 \right]^2
Where, in this exercise:
.. math::
\begin{cases} a = 2x_1^3 + 5x_1x_2 + 4x_3 - 2x_1^2x_3 - 18 \\
b = x_1 + x_2^3 + x_1x_2^2 + x_1x_3^2 - 22 \\
c = 8x_1^2 + 2x_2x_3 + 2x_2^2 + 3x_2^3 - 52 \end{cases}
with :math:`x_i \in [-10, 10]` for :math:`i = 1, 2, 3`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [1, 2, 3]`
.. [1] Mishra, S. Global Optimization by Differential Evolution and
Particle Swarm Methods: Evaluation on Some Benchmark Functions.
Munich Personal RePEc Archive, 2006, 1005
TODO Line 1103
"""
def __init__(self, dimensions=3):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [[1.0, 2.0, 3.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
a = (2 * x[0] ** 3 + 5 * x[0] * x[1]
+ 4 * x[2] - 2 * x[0] ** 2 * x[2] - 18)
b = x[0] + x[1] ** 3 + x[0] * x[1] ** 2 + x[0] * x[2] ** 2 - 22.0
c = (8 * x[0] ** 2 + 2 * x[1] * x[2]
+ 2 * x[1] ** 2 + 3 * x[1] ** 3 - 52)
return (a * c * b ** 2 + a * b * c ** 2 + b ** 2
+ (x[0] + x[1] - x[2]) ** 2) ** 2
class Mishra10(Benchmark):
r"""
Mishra 10 objective function.
This class defines the Mishra 10 global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
TODO - int(x) should be used instead of floor(x)!!!!!
f_{\text{Mishra10}}({x}) = \left[ \lfloor x_1 \perp x_2 \rfloor -
\lfloor x_1 \rfloor - \lfloor x_2 \rfloor \right]^2
with :math:`x_i \in [-10, 10]` for :math:`i =1, 2`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [2, 2]`
.. [1] Mishra, S. Global Optimization by Differential Evolution and
Particle Swarm Methods: Evaluation on Some Benchmark Functions.
Munich Personal RePEc Archive, 2006, 1005
TODO line 1115
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [[2.0, 2.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
x1, x2 = int(x[0]), int(x[1])
f1 = x1 + x2
f2 = x1 * x2
return (f1 - f2) ** 2.0
class Mishra11(Benchmark):
r"""
Mishra 11 objective function.
This class defines the Mishra 11 [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Mishra11}}(x) = \left [ \frac{1}{n} \sum_{i=1}^{n} \lvert x_i
\rvert - \left(\prod_{i=1}^{n} \lvert x_i \rvert \right )^{\frac{1}{n}}
\right]^2
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-10, 10]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.custom_bounds = [(-3, 3), (-3, 3)]
self.global_optimum = [[0.0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
N = self.N
return ((1.0 / N) * sum(abs(x)) - (prod(abs(x))) ** 1.0 / N) ** 2.0
class MultiModal(Benchmark):
r"""
MultiModal objective function.
This class defines the MultiModal global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{MultiModal}}(x) = \left( \sum_{i=1}^n \lvert x_i \rvert
\right) \left( \prod_{i=1}^n \lvert x_i \rvert \right)
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-10, 10]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.custom_bounds = [(-5, 5), (-5, 5)]
self.global_optimum = [[0.0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
return sum(abs(x)) * prod(abs(x))
| 20,910
| 28.043056
| 82
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/go_benchmark_functions/go_funcs_B.py
|
# -*- coding: utf-8 -*-
from numpy import abs, cos, exp, log, arange, pi, sin, sqrt, sum
from .go_benchmark import Benchmark
class BartelsConn(Benchmark):
r"""
Bartels-Conn objective function.
The BartelsConn [1]_ global optimization problem is a multimodal
minimization problem defined as follows:
.. math::
f_{\text{BartelsConn}}(x) = \lvert {x_1^2 + x_2^2 + x_1x_2} \rvert +
\lvert {\sin(x_1)} \rvert + \lvert {\cos(x_2)} \rvert
with :math:`x_i \in [-500, 500]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 1` for :math:`x = [0, 0]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-500.] * self.N, [500.] * self.N))
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 1.0
def fun(self, x, *args):
self.nfev += 1
return (abs(x[0] ** 2.0 + x[1] ** 2.0 + x[0] * x[1]) + abs(sin(x[0]))
+ abs(cos(x[1])))
class Beale(Benchmark):
r"""
Beale objective function.
The Beale [1]_ global optimization problem is a multimodal
minimization problem defined as follows:
.. math::
f_{\text{Beale}}(x) = \left(x_1 x_2 - x_1 + 1.5\right)^{2} +
\left(x_1 x_2^{2} - x_1 + 2.25\right)^{2} + \left(x_1 x_2^{3} - x_1 +
2.625\right)^{2}
with :math:`x_i \in [-4.5, 4.5]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0` for :math:`x=[3, 0.5]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-4.5] * self.N, [4.5] * self.N))
self.global_optimum = [[3.0, 0.5]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return ((1.5 - x[0] + x[0] * x[1]) ** 2
+ (2.25 - x[0] + x[0] * x[1] ** 2) ** 2
+ (2.625 - x[0] + x[0] * x[1] ** 3) ** 2)
class BiggsExp02(Benchmark):
r"""
BiggsExp02 objective function.
The BiggsExp02 [1]_ global optimization problem is a multimodal minimization
problem defined as follows
.. math::
\begin{matrix}
f_{\text{BiggsExp02}}(x) = \sum_{i=1}^{10} (e^{-t_i x_1}
- 5 e^{-t_i x_2} - y_i)^2 \\
t_i = 0.1 i\\
y_i = e^{-t_i} - 5 e^{-10t_i}\\
\end{matrix}
with :math:`x_i \in [0, 20]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [1, 10]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([0] * 2,
[20] * 2))
self.global_optimum = [[1., 10.]]
self.fglob = 0
def fun(self, x, *args):
self.nfev += 1
t = arange(1, 11.) * 0.1
y = exp(-t) - 5 * exp(-10 * t)
vec = (exp(-t * x[0]) - 5 * exp(-t * x[1]) - y) ** 2
return sum(vec)
class BiggsExp03(Benchmark):
r"""
BiggsExp03 objective function.
The BiggsExp03 [1]_ global optimization problem is a multimodal minimization
problem defined as follows
.. math::
\begin{matrix}\ f_{\text{BiggsExp03}}(x) = \sum_{i=1}^{10}
(e^{-t_i x_1} - x_3e^{-t_i x_2} - y_i)^2\\
t_i = 0.1i\\
y_i = e^{-t_i} - 5e^{-10 t_i}\\
\end{matrix}
with :math:`x_i \in [0, 20]` for :math:`i = 1, 2, 3`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [1, 10, 5]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=3):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([0] * 3,
[20] * 3))
self.global_optimum = [[1., 10., 5.]]
self.fglob = 0
def fun(self, x, *args):
self.nfev += 1
t = arange(1., 11.) * 0.1
y = exp(-t) - 5 * exp(-10 * t)
vec = (exp(-t * x[0]) - x[2] * exp(-t * x[1]) - y) ** 2
return sum(vec)
class BiggsExp04(Benchmark):
r"""
BiggsExp04 objective function.
The BiggsExp04 [1]_ global optimization problem is a multimodal
minimization problem defined as follows
.. math::
\begin{matrix}\ f_{\text{BiggsExp04}}(x) = \sum_{i=1}^{10}
(x_3 e^{-t_i x_1} - x_4 e^{-t_i x_2} - y_i)^2\\
t_i = 0.1i\\
y_i = e^{-t_i} - 5 e^{-10 t_i}\\
\end{matrix}
with :math:`x_i \in [0, 20]` for :math:`i = 1, ..., 4`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [1, 10, 1, 5]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=4):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([0.] * 4,
[20.] * 4))
self.global_optimum = [[1., 10., 1., 5.]]
self.fglob = 0
def fun(self, x, *args):
self.nfev += 1
t = arange(1, 11.) * 0.1
y = exp(-t) - 5 * exp(-10 * t)
vec = (x[2] * exp(-t * x[0]) - x[3] * exp(-t * x[1]) - y) ** 2
return sum(vec)
class BiggsExp05(Benchmark):
r"""
BiggsExp05 objective function.
The BiggsExp05 [1]_ global optimization problem is a multimodal minimization
problem defined as follows
.. math::
\begin{matrix}\ f_{\text{BiggsExp05}}(x) = \sum_{i=1}^{11}
(x_3 e^{-t_i x_1} - x_4 e^{-t_i x_2} + 3 e^{-t_i x_5} - y_i)^2\\
t_i = 0.1i\\
y_i = e^{-t_i} - 5e^{-10 t_i} + 3e^{-4 t_i}\\
\end{matrix}
with :math:`x_i \in [0, 20]` for :math:`i=1, ..., 5`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [1, 10, 1, 5, 4]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=5):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([0.] * 5,
[20.] * 5))
self.global_optimum = [[1., 10., 1., 5., 4.]]
self.fglob = 0
def fun(self, x, *args):
self.nfev += 1
t = arange(1, 12.) * 0.1
y = exp(-t) - 5 * exp(-10 * t) + 3 * exp(-4 * t)
vec = (x[2] * exp(-t * x[0]) - x[3] * exp(-t * x[1])
+ 3 * exp(-t * x[4]) - y) ** 2
return sum(vec)
class Bird(Benchmark):
r"""
Bird objective function.
The Bird global optimization problem is a multimodal minimization
problem defined as follows
.. math::
f_{\text{Bird}}(x) = \left(x_1 - x_2\right)^{2} + e^{\left[1 -
\sin\left(x_1\right) \right]^{2}} \cos\left(x_2\right) + e^{\left[1 -
\cos\left(x_2\right)\right]^{2}} \sin\left(x_1\right)
with :math:`x_i \in [-2\pi, 2\pi]`
*Global optimum*: :math:`f(x) = -106.7645367198034` for :math:`x
= [4.701055751981055, 3.152946019601391]` or :math:`x =
[-1.582142172055011, -3.130246799635430]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-2.0 * pi] * self.N,
[2.0 * pi] * self.N))
self.global_optimum = [[4.701055751981055, 3.152946019601391],
[-1.582142172055011, -3.130246799635430]]
self.fglob = -106.7645367198034
def fun(self, x, *args):
self.nfev += 1
return (sin(x[0]) * exp((1 - cos(x[1])) ** 2)
+ cos(x[1]) * exp((1 - sin(x[0])) ** 2) + (x[0] - x[1]) ** 2)
class Bohachevsky1(Benchmark):
r"""
Bohachevsky 1 objective function.
The Bohachevsky 1 [1]_ global optimization problem is a multimodal
minimization problem defined as follows
.. math::
f_{\text{Bohachevsky}}(x) = \sum_{i=1}^{n-1}\left[x_i^2 + 2 x_{i+1}^2 -
0.3 \cos(3 \pi x_i) - 0.4 \cos(4 \pi x_{i + 1}) + 0.7 \right]
Here, :math:`n` represents the number of dimensions and :math:`x_i \in
[-15, 15]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for :math:`i = 1,
..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO: equation needs to be fixed up in the docstring. see Jamil#17
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-100.0] * self.N, [100.0] * self.N))
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return (x[0] ** 2 + 2 * x[1] ** 2 - 0.3 * cos(3 * pi * x[0])
- 0.4 * cos(4 * pi * x[1]) + 0.7)
class Bohachevsky2(Benchmark):
r"""
Bohachevsky 2 objective function.
The Bohachevsky 2 [1]_ global optimization problem is a multimodal
minimization problem defined as follows
.. math::
f_{\text{Bohachevsky}}(x) = \sum_{i=1}^{n-1}\left[x_i^2 + 2 x_{i+1}^2 -
0.3 \cos(3 \pi x_i) - 0.4 \cos(4 \pi x_{i + 1}) + 0.7 \right]
Here, :math:`n` represents the number of dimensions and :math:`x_i \in
[-15, 15]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for :math:`i = 1,
..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO: equation needs to be fixed up in the docstring. Jamil is also wrong.
There should be no 0.4 factor in front of the cos term
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-100.0] * self.N, [100.0] * self.N))
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return (x[0] ** 2 + 2 * x[1] ** 2 - 0.3 * cos(3 * pi * x[0])
* cos(4 * pi * x[1]) + 0.3)
class Bohachevsky3(Benchmark):
r"""
Bohachevsky 3 objective function.
The Bohachevsky 3 [1]_ global optimization problem is a multimodal
minimization problem defined as follows
.. math::
f_{\text{Bohachevsky}}(x) = \sum_{i=1}^{n-1}\left[x_i^2 + 2 x_{i+1}^2 -
0.3 \cos(3 \pi x_i) - 0.4 \cos(4 \pi x_{i + 1}) + 0.7 \right]
Here, :math:`n` represents the number of dimensions and :math:`x_i \in
[-15, 15]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for :math:`i = 1,
..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO: equation needs to be fixed up in the docstring. Jamil#19
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-100.0] * self.N, [100.0] * self.N))
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return (x[0] ** 2 + 2 * x[1] ** 2
- 0.3 * cos(3 * pi * x[0] + 4 * pi * x[1]) + 0.3)
class BoxBetts(Benchmark):
r"""
BoxBetts objective function.
The BoxBetts global optimization problem is a multimodal
minimization problem defined as follows
.. math::
f_{\text{BoxBetts}}(x) = \sum_{i=1}^k g(x_i)^2
Where, in this exercise:
.. math::
g(x) = e^{-0.1i x_1} - e^{-0.1i x_2} - x_3\left[e^{-0.1i}
- e^{-i}\right]
And :math:`k = 10`.
Here, :math:`x_1 \in [0.9, 1.2], x_2 \in [9, 11.2], x_3 \in [0.9, 1.2]`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [1, 10, 1]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=3):
Benchmark.__init__(self, dimensions)
self._bounds = ([0.9, 1.2], [9.0, 11.2], [0.9, 1.2])
self.global_optimum = [[1.0, 10.0, 1.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
i = arange(1, 11)
g = (exp(-0.1 * i * x[0]) - exp(-0.1 * i * x[1])
- (exp(-0.1 * i) - exp(-i)) * x[2])
return sum(g**2)
class Branin01(Benchmark):
r"""
Branin01 objective function.
The Branin01 global optimization problem is a multimodal minimization
problem defined as follows
.. math::
f_{\text{Branin01}}(x) = \left(- 1.275 \frac{x_1^{2}}{\pi^{2}} + 5
\frac{x_1}{\pi} + x_2 -6\right)^{2} + \left(10 -\frac{5}{4 \pi} \right)
\cos\left(x_1\right) + 10
with :math:`x_1 \in [-5, 10], x_2 \in [0, 15]`
*Global optimum*: :math:`f(x) = 0.39788735772973816` for :math:`x =
[-\pi, 12.275]` or :math:`x = [\pi, 2.275]` or :math:`x = [3\pi, 2.475]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO: Jamil#22, one of the solutions is different
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = [(-5., 10.), (0., 15.)]
self.global_optimum = [[-pi, 12.275], [pi, 2.275], [3 * pi, 2.475]]
self.fglob = 0.39788735772973816
def fun(self, x, *args):
self.nfev += 1
return ((x[1] - (5.1 / (4 * pi ** 2)) * x[0] ** 2
+ 5 * x[0] / pi - 6) ** 2
+ 10 * (1 - 1 / (8 * pi)) * cos(x[0]) + 10)
class Branin02(Benchmark):
r"""
Branin02 objective function.
The Branin02 global optimization problem is a multimodal minimization
problem defined as follows
.. math::
f_{\text{Branin02}}(x) = \left(- 1.275 \frac{x_1^{2}}{\pi^{2}}
+ 5 \frac{x_1}{\pi} + x_2 - 6 \right)^{2} + \left(10 - \frac{5}{4 \pi}
\right) \cos\left(x_1\right) \cos\left(x_2\right)
+ \log(x_1^2+x_2^2 + 1) + 10
with :math:`x_i \in [-5, 15]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 5.559037` for :math:`x = [-3.2, 12.53]`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = [(-5.0, 15.0), (-5.0, 15.0)]
self.global_optimum = [[-3.1969884, 12.52625787]]
self.fglob = 5.5589144038938247
def fun(self, x, *args):
self.nfev += 1
return ((x[1] - (5.1 / (4 * pi ** 2)) * x[0] ** 2
+ 5 * x[0] / pi - 6) ** 2
+ 10 * (1 - 1 / (8 * pi)) * cos(x[0]) * cos(x[1])
+ log(x[0] ** 2.0 + x[1] ** 2.0 + 1.0) + 10)
class Brent(Benchmark):
r"""
Brent objective function.
The Brent [1]_ global optimization problem is a multimodal minimization
problem defined as follows:
.. math::
f_{\text{Brent}}(x) = (x_1 + 10)^2 + (x_2 + 10)^2 + e^{(-x_1^2 -x_2^2)}
with :math:`x_i \in [-10, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [-10, -10]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO solution is different to Jamil#24
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.custom_bounds = ([-10, 2], [-10, 2])
self.global_optimum = [[-10.0, -10.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return ((x[0] + 10.0) ** 2.0 + (x[1] + 10.0) ** 2.0
+ exp(-x[0] ** 2.0 - x[1] ** 2.0))
class Brown(Benchmark):
r"""
Brown objective function.
The Brown [1]_ global optimization problem is a multimodal minimization
problem defined as follows:
.. math::
f_{\text{Brown}}(x) = \sum_{i=1}^{n-1}\left[
\left(x_i^2\right)^{x_{i + 1}^2 + 1}
+ \left(x_{i + 1}^2\right)^{x_i^2 + 1}\right]
with :math:`x_i \in [-1, 4]` for :math:`i=1,...,n`.
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for
:math:`i=1,...,n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-1.0] * self.N, [4.0] * self.N))
self.custom_bounds = ([-1.0, 1.0], [-1.0, 1.0])
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
x0 = x[:-1]
x1 = x[1:]
return sum((x0 ** 2.0) ** (x1 ** 2.0 + 1.0)
+ (x1 ** 2.0) ** (x0 ** 2.0 + 1.0))
class Bukin02(Benchmark):
r"""
Bukin02 objective function.
The Bukin02 [1]_ global optimization problem is a multimodal minimization
problem defined as follows:
.. math::
f_{\text{Bukin02}}(x) = 100 (x_2^2 - 0.01x_1^2 + 1)
+ 0.01(x_1 + 10)^2
with :math:`x_1 \in [-15, -5], x_2 \in [-3, 3]`
*Global optimum*: :math:`f(x) = -124.75` for :math:`x = [-15, 0]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO: I think that Gavana and Jamil are wrong on this function. In both
sources the x[1] term is not squared. As such there will be a minimum at
the smallest value of x[1].
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = [(-15.0, -5.0), (-3.0, 3.0)]
self.global_optimum = [[-15.0, 0.0]]
self.fglob = -124.75
def fun(self, x, *args):
self.nfev += 1
return (100 * (x[1] ** 2 - 0.01 * x[0] ** 2 + 1.0)
+ 0.01 * (x[0] + 10.0) ** 2.0)
class Bukin04(Benchmark):
r"""
Bukin04 objective function.
The Bukin04 [1]_ global optimization problem is a multimodal minimization
problem defined as follows:
.. math::
f_{\text{Bukin04}}(x) = 100 x_2^{2} + 0.01 \lvert{x_1 + 10}
\rvert
with :math:`x_1 \in [-15, -5], x_2 \in [-3, 3]`
*Global optimum*: :math:`f(x) = 0` for :math:`x = [-10, 0]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = [(-15.0, -5.0), (-3.0, 3.0)]
self.global_optimum = [[-10.0, 0.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return 100 * x[1] ** 2 + 0.01 * abs(x[0] + 10)
class Bukin06(Benchmark):
r"""
Bukin06 objective function.
The Bukin06 [1]_ global optimization problem is a multimodal minimization
problem defined as follows:
.. math::
f_{\text{Bukin06}}(x) = 100 \sqrt{ \lvert{x_2 - 0.01 x_1^{2}}
\rvert} + 0.01 \lvert{x_1 + 10} \rvert
with :math:`x_1 \in [-15, -5], x_2 \in [-3, 3]`
*Global optimum*: :math:`f(x) = 0` for :math:`x = [-10, 1]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = [(-15.0, -5.0), (-3.0, 3.0)]
self.global_optimum = [[-10.0, 1.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return 100 * sqrt(abs(x[1] - 0.01 * x[0] ** 2)) + 0.01 * abs(x[0] + 10)
| 21,664
| 27.506579
| 80
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/go_benchmark_functions/go_funcs_Y.py
|
# -*- coding: utf-8 -*-
from numpy import abs, sum, cos, pi
from .go_benchmark import Benchmark
class YaoLiu04(Benchmark):
r"""
Yao-Liu 4 objective function.
This class defines the Yao-Liu function 4 [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{YaoLiu04}}(x) = {max}_i \left\{ \left | x_i \right | ,
1 \leq i \leq n \right\}
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-10, 10]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Yao X., Liu Y. (1997) Fast evolution strategies.
In: Angeline P.J., Reynolds R.G., McDonnell J.R., Eberhart R. (eds)
Evolutionary Programming VI. EP 1997.
Lecture Notes in Computer Science, vol 1213. Springer, Berlin, Heidelberg
.. [2] Mishra, S. Global Optimization by Differential Evolution and
Particle Swarm Methods: Evaluation on Some Benchmark Functions.
Munich Personal RePEc Archive, 2006, 1005
TODO line 1201. Gavana code and documentation differ.
max(abs(x)) != abs(max(x))
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
return abs(x).max()
class YaoLiu09(Benchmark):
r"""
Yao-Liu 9 objective function.
This class defines the Yao-Liu [1]_ function 9 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{YaoLiu09}}(x) = \sum_{i=1}^n \left [ x_i^2
- 10 \cos(2 \pi x_i ) + 10 \right ]
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-5.12, 5.12]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Yao X., Liu Y. (1997) Fast evolution strategies.
In: Angeline P.J., Reynolds R.G., McDonnell J.R., Eberhart R. (eds)
Evolutionary Programming VI. EP 1997.
Lecture Notes in Computer Science, vol 1213. Springer, Berlin, Heidelberg
.. [2] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-5.12] * self.N, [5.12] * self.N))
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
return sum(x ** 2.0 - 10.0 * cos(2 * pi * x) + 10)
| 2,881
| 29.989247
| 84
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/go_benchmark_functions/go_funcs_H.py
|
# -*- coding: utf-8 -*-
import numpy as np
from numpy import abs, arctan2, asarray, cos, exp, arange, pi, sin, sqrt, sum
from .go_benchmark import Benchmark
class Hansen(Benchmark):
r"""
Hansen objective function.
This class defines the Hansen [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Hansen}}(x) = \left[ \sum_{i=0}^4(i+1)\cos(ix_1+i+1)\right ]
\left[\sum_{j=0}^4(j+1)\cos[(j+2)x_2+j+1])\right ]
with :math:`x_i \in [-10, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = -176.54179` for
:math:`x = [-7.58989583, -7.70831466]`.
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO Jamil #61 is missing the starting value of i.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [[-7.58989583, -7.70831466]]
self.fglob = -176.54179
def fun(self, x, *args):
self.nfev += 1
i = arange(5.)
a = (i + 1) * cos(i * x[0] + i + 1)
b = (i + 1) * cos((i + 2) * x[1] + i + 1)
return sum(a) * sum(b)
class Hartmann3(Benchmark):
r"""
Hartmann3 objective function.
This class defines the Hartmann3 [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Hartmann3}}(x) = -\sum\limits_{i=1}^{4} c_i
e^{-\sum\limits_{j=1}^{n}a_{ij}(x_j - p_{ij})^2}
Where, in this exercise:
.. math::
\begin{array}{l|ccc|c|ccr}
\hline
i & & a_{ij}& & c_i & & p_{ij} & \\
\hline
1 & 3.0 & 10.0 & 30.0 & 1.0 & 0.3689 & 0.1170 & 0.2673 \\
2 & 0.1 & 10.0 & 35.0 & 1.2 & 0.4699 & 0.4387 & 0.7470 \\
3 & 3.0 & 10.0 & 30.0 & 3.0 & 0.1091 & 0.8732 & 0.5547 \\
4 & 0.1 & 10.0 & 35.0 & 3.2 & 0.03815 & 0.5743 & 0.8828 \\
\hline
\end{array}
with :math:`x_i \in [0, 1]` for :math:`i = 1, 2, 3`.
*Global optimum*: :math:`f(x) = -3.8627821478`
for :math:`x = [0.11461292, 0.55564907, 0.85254697]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO Jamil #62 has an incorrect coefficient. p[1, 1] should be 0.4387
"""
def __init__(self, dimensions=3):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([0.0] * self.N, [1.0] * self.N))
self.global_optimum = [[0.11461292, 0.55564907, 0.85254697]]
self.fglob = -3.8627821478
self.a = asarray([[3.0, 10., 30.],
[0.1, 10., 35.],
[3.0, 10., 30.],
[0.1, 10., 35.]])
self.p = asarray([[0.3689, 0.1170, 0.2673],
[0.4699, 0.4387, 0.7470],
[0.1091, 0.8732, 0.5547],
[0.03815, 0.5743, 0.8828]])
self.c = asarray([1., 1.2, 3., 3.2])
def fun(self, x, *args):
self.nfev += 1
XX = np.atleast_2d(x)
d = sum(self.a * (XX - self.p) ** 2, axis=1)
return -sum(self.c * exp(-d))
class Hartmann6(Benchmark):
r"""
Hartmann6 objective function.
This class defines the Hartmann6 [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Hartmann6}}(x) = -\sum\limits_{i=1}^{4} c_i
e^{-\sum\limits_{j=1}^{n}a_{ij}(x_j - p_{ij})^2}
Where, in this exercise:
.. math::
\begin{array}{l|cccccc|r}
\hline
i & & & a_{ij} & & & & c_i \\
\hline
1 & 10.0 & 3.0 & 17.0 & 3.50 & 1.70 & 8.00 & 1.0 \\
2 & 0.05 & 10.0 & 17.0 & 0.10 & 8.00 & 14.00 & 1.2 \\
3 & 3.00 & 3.50 & 1.70 & 10.0 & 17.00 & 8.00 & 3.0 \\
4 & 17.00 & 8.00 & 0.05 & 10.00 & 0.10 & 14.00 & 3.2 \\
\hline
\end{array}
\newline
\
\newline
\begin{array}{l|cccccr}
\hline
i & & & p_{ij} & & & \\
\hline
1 & 0.1312 & 0.1696 & 0.5569 & 0.0124 & 0.8283 & 0.5886 \\
2 & 0.2329 & 0.4135 & 0.8307 & 0.3736 & 0.1004 & 0.9991 \\
3 & 0.2348 & 0.1451 & 0.3522 & 0.2883 & 0.3047 & 0.6650 \\
4 & 0.4047 & 0.8828 & 0.8732 & 0.5743 & 0.1091 & 0.0381 \\
\hline
\end{array}
with :math:`x_i \in [0, 1]` for :math:`i = 1, ..., 6`.
*Global optimum*: :math:`f(x_i) = -3.32236801141551` for
:math:`{x} = [0.20168952, 0.15001069, 0.47687398, 0.27533243, 0.31165162,
0.65730054]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=6):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([0.0] * self.N, [1.0] * self.N))
self.global_optimum = [[0.20168952, 0.15001069, 0.47687398, 0.27533243,
0.31165162, 0.65730054]]
self.fglob = -3.32236801141551
self.a = asarray([[10., 3., 17., 3.5, 1.7, 8.],
[0.05, 10., 17., 0.1, 8., 14.],
[3., 3.5, 1.7, 10., 17., 8.],
[17., 8., 0.05, 10., 0.1, 14.]])
self.p = asarray([[0.1312, 0.1696, 0.5569, 0.0124, 0.8283, 0.5886],
[0.2329, 0.4135, 0.8307, 0.3736, 0.1004, 0.9991],
[0.2348, 0.1451, 0.3522, 0.2883, 0.3047, 0.665],
[0.4047, 0.8828, 0.8732, 0.5743, 0.1091, 0.0381]])
self.c = asarray([1.0, 1.2, 3.0, 3.2])
def fun(self, x, *args):
self.nfev += 1
XX = np.atleast_2d(x)
d = sum(self.a * (XX - self.p) ** 2, axis=1)
return -sum(self.c * exp(-d))
class HelicalValley(Benchmark):
r"""
HelicalValley objective function.
This class defines the HelicalValley [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{HelicalValley}}({x}) = 100{[z-10\Psi(x_1,x_2)]^2
+(\sqrt{x_1^2+x_2^2}-1)^2}+x_3^2
Where, in this exercise:
.. math::
2\pi\Psi(x,y) = \begin{cases} \arctan(y/x) & \textrm{for} x > 0 \\
\pi + \arctan(y/x) & \textrm{for } x < 0 \end{cases}
with :math:`x_i \in [-100, 100]` for :math:`i = 1, 2, 3`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [1, 0, 0]`
.. [1] Fletcher, R. & Powell, M. A Rapidly Convergent Descent Method for
Minimzation, Computer Journal, 1963, 62, 163-168
TODO: Jamil equation is different to original reference. The above paper
can be obtained from
http://galton.uchicago.edu/~lekheng/courses/302/classics/
fletcher-powell.pdf
"""
def __init__(self, dimensions=3):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.] * self.N, [10.] * self.N))
self.global_optimum = [[1.0, 0.0, 0.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
r = sqrt(x[0] ** 2 + x[1] ** 2)
theta = 1 / (2. * pi) * arctan2(x[1], x[0])
return x[2] ** 2 + 100 * ((x[2] - 10 * theta) ** 2 + (r - 1) ** 2)
class HimmelBlau(Benchmark):
r"""
HimmelBlau objective function.
This class defines the HimmelBlau [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{HimmelBlau}}({x}) = (x_1^2 + x_2 - 11)^2 + (x_1 + x_2^2 - 7)^2
with :math:`x_i \in [-6, 6]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [3, 2]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-5.] * self.N, [5.] * self.N))
self.global_optimum = [[3.0, 2.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return (x[0] ** 2 + x[1] - 11) ** 2 + (x[0] + x[1] ** 2 - 7) ** 2
class HolderTable(Benchmark):
r"""
HolderTable objective function.
This class defines the HolderTable [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{HolderTable}}({x}) = - \left|{e^{\left|{1
- \frac{\sqrt{x_{1}^{2} + x_{2}^{2}}}{\pi} }\right|}
\sin\left(x_{1}\right) \cos\left(x_{2}\right)}\right|
with :math:`x_i \in [-10, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = -19.20850256788675` for
:math:`x_i = \pm 9.664590028909654` for :math:`i = 1, 2`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
TODO: Jamil #146 equation is wrong - should be squaring the x1 and x2
terms, but isn't. Gavana does.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [(8.055023472141116, 9.664590028909654),
(-8.055023472141116, 9.664590028909654),
(8.055023472141116, -9.664590028909654),
(-8.055023472141116, -9.664590028909654)]
self.fglob = -19.20850256788675
def fun(self, x, *args):
self.nfev += 1
return -abs(sin(x[0]) * cos(x[1])
* exp(abs(1 - sqrt(x[0] ** 2 + x[1] ** 2) / pi)))
class Hosaki(Benchmark):
r"""
Hosaki objective function.
This class defines the Hosaki [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Hosaki}}(x) = \left ( 1 - 8 x_1 + 7 x_1^2 - \frac{7}{3} x_1^3
+ \frac{1}{4} x_1^4 \right ) x_2^2 e^{-x_1}
with :math:`x_i \in [0, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = -2.3458115` for :math:`x = [4, 2]`.
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = ([0., 5.], [0., 6.])
self.custom_bounds = [(0, 5), (0, 5)]
self.global_optimum = [[4, 2]]
self.fglob = -2.3458115
def fun(self, x, *args):
self.nfev += 1
val = (1 - 8 * x[0] + 7 * x[0] ** 2 - 7 / 3. * x[0] ** 3
+ 0.25 * x[0] ** 4)
return val * x[1] ** 2 * exp(-x[1])
| 11,278
| 28.99734
| 80
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/go_benchmark_functions/go_funcs_Q.py
|
# -*- coding: utf-8 -*-
from numpy import abs, sum, arange, sqrt
from .go_benchmark import Benchmark
class Qing(Benchmark):
r"""
Qing objective function.
This class defines the Qing [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Qing}}(x) = \sum_{i=1}^{n} (x_i^2 - i)^2
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-500, 500]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = \pm \sqrt(i)` for
:math:`i = 1, ..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-500.0] * self.N,
[500.0] * self.N))
self.custom_bounds = [(-2, 2), (-2, 2)]
self.global_optimum = [[sqrt(_) for _ in range(1, self.N + 1)]]
self.fglob = 0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
i = arange(1, self.N + 1)
return sum((x ** 2.0 - i) ** 2.0)
class Quadratic(Benchmark):
r"""
Quadratic objective function.
This class defines the Quadratic [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Quadratic}}(x) = -3803.84 - 138.08x_1 - 232.92x_2 + 128.08x_1^2
+ 203.64x_2^2 + 182.25x_1x_2
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-10, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = -3873.72418` for
:math:`x = [0.19388, 0.48513]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.custom_bounds = [(0, 1), (0, 1)]
self.global_optimum = [[0.19388, 0.48513]]
self.fglob = -3873.72418
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
return (-3803.84 - 138.08 * x[0] - 232.92 * x[1] + 128.08 * x[0] ** 2.0
+ 203.64 * x[1] ** 2.0 + 182.25 * x[0] * x[1])
class Quintic(Benchmark):
r"""
Quintic objective function.
This class defines the Quintic [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Quintic}}(x) = \sum_{i=1}^{n} \left|{x_{i}^{5} - 3 x_{i}^{4}
+ 4 x_{i}^{3} + 2 x_{i}^{2} - 10 x_{i} -4}\right|
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-10, 10]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = -1` for
:math:`i = 1, ..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.custom_bounds = [(-2, 2), (-2, 2)]
self.global_optimum = [[-1.0 for _ in range(self.N)]]
self.fglob = 0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
return sum(abs(x ** 5 - 3 * x ** 4 + 4 * x ** 3 + 2 * x ** 2
- 10 * x - 4))
| 3,859
| 29.634921
| 80
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/go_benchmark_functions/go_funcs_L.py
|
# -*- coding: utf-8 -*-
from numpy import sum, cos, exp, pi, arange, sin
from .go_benchmark import Benchmark
class Langermann(Benchmark):
r"""
Langermann objective function.
This class defines the Langermann [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Langermann}}(x) = - \sum_{i=1}^{5}
\frac{c_i \cos\left\{\pi \left[\left(x_{1}- a_i\right)^{2}
+ \left(x_{2} - b_i \right)^{2}\right]\right\}}{e^{\frac{\left( x_{1}
- a_i\right)^{2} + \left( x_{2} - b_i\right)^{2}}{\pi}}}
Where:
.. math::
\begin{matrix}
a = [3, 5, 2, 1, 7]\\
b = [5, 2, 1, 4, 9]\\
c = [1, 2, 5, 2, 3] \\
\end{matrix}
Here :math:`x_i \in [0, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = -5.1621259`
for :math:`x = [2.00299219, 1.006096]`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
TODO: Langermann from Gavana is _not the same_ as Jamil #68.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([0.0] * self.N, [10.0] * self.N))
self.global_optimum = [[2.00299219, 1.006096]]
self.fglob = -5.1621259
def fun(self, x, *args):
self.nfev += 1
a = [3, 5, 2, 1, 7]
b = [5, 2, 1, 4, 9]
c = [1, 2, 5, 2, 3]
return (-sum(c * exp(-(1 / pi) * ((x[0] - a) ** 2 +
(x[1] - b) ** 2)) * cos(pi * ((x[0] - a) ** 2
+ (x[1] - b) ** 2))))
class LennardJones(Benchmark):
r"""
LennardJones objective function.
This class defines the Lennard-Jones global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{LennardJones}}(\mathbf{x}) = \sum_{i=0}^{n-2}\sum_{j>1}^{n-1}
\frac{1}{r_{ij}^{12}} - \frac{1}{r_{ij}^{6}}
Where, in this exercise:
.. math::
r_{ij} = \sqrt{(x_{3i}-x_{3j})^2 + (x_{3i+1}-x_{3j+1})^2)
+ (x_{3i+2}-x_{3j+2})^2}
Valid for any dimension, :math:`n = 3*k, k=2 , 3, 4, ..., 20`. :math:`k`
is the number of atoms in 3-D space constraints: unconstrained type:
multi-modal with one global minimum; non-separable
Value-to-reach: :math:`minima[k-2] + 0.0001`. See array of minima below;
additional minima available at the Cambridge cluster database:
http://www-wales.ch.cam.ac.uk/~jon/structures/LJ/tables.150.html
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-4, 4]` for :math:`i = 1 ,..., n`.
*Global optimum*:
.. math::
\text{minima} = [-1.,-3.,-6.,-9.103852,-12.712062,-16.505384,\\
-19.821489, -24.113360, -28.422532,-32.765970,\\
-37.967600,-44.326801, -47.845157,-52.322627,\\
-56.815742,-61.317995, -66.530949, -72.659782,\\
-77.1777043]\\
"""
def __init__(self, dimensions=6):
# dimensions is in [6:60]
# max dimensions is going to be 60.
if dimensions not in range(6, 61):
raise ValueError("LJ dimensions must be in (6, 60)")
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-4.0] * self.N, [4.0] * self.N))
self.global_optimum = [[]]
self.minima = [-1.0, -3.0, -6.0, -9.103852, -12.712062,
-16.505384, -19.821489, -24.113360, -28.422532,
-32.765970, -37.967600, -44.326801, -47.845157,
-52.322627, -56.815742, -61.317995, -66.530949,
-72.659782, -77.1777043]
k = int(dimensions / 3)
self.fglob = self.minima[k - 2]
self.change_dimensionality = True
def change_dimensions(self, ndim):
if ndim not in range(6, 61):
raise ValueError("LJ dimensions must be in (6, 60)")
Benchmark.change_dimensions(self, ndim)
self.fglob = self.minima[int(self.N / 3) - 2]
def fun(self, x, *args):
self.nfev += 1
k = int(self.N / 3)
s = 0.0
for i in range(k - 1):
for j in range(i + 1, k):
a = 3 * i
b = 3 * j
xd = x[a] - x[b]
yd = x[a + 1] - x[b + 1]
zd = x[a + 2] - x[b + 2]
ed = xd * xd + yd * yd + zd * zd
ud = ed * ed * ed
if ed > 0.0:
s += (1.0 / ud - 2.0) / ud
return s
class Leon(Benchmark):
r"""
Leon objective function.
This class defines the Leon [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Leon}}(\mathbf{x}) = \left(1 - x_{1}\right)^{2}
+ 100 \left(x_{2} - x_{1}^{2} \right)^{2}
with :math:`x_i \in [-1.2, 1.2]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [1, 1]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-1.2] * self.N, [1.2] * self.N))
self.global_optimum = [[1 for _ in range(self.N)]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return 100. * (x[1] - x[0] ** 2.0) ** 2.0 + (1 - x[0]) ** 2.0
class Levy03(Benchmark):
r"""
Levy 3 objective function.
This class defines the Levy 3 [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Levy03}}(\mathbf{x}) = \sin^2(\pi y_1)+\sum_{i=1}^{n-1}(y_i-1)^2[1+10\sin^2(\pi y_{i+1})]+(y_n-1)^2
Where, in this exercise:
.. math::
y_i=1+\frac{x_i-1}{4}
Here, :math:`n` represents the number of dimensions and :math:`x_i \in [-10, 10]` for :math:`i=1,...,n`.
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 1` for :math:`i=1,...,n`
.. [1] Mishra, S. Global Optimization by Differential Evolution and
Particle Swarm Methods: Evaluation on Some Benchmark Functions.
Munich Personal RePEc Archive, 2006, 1005
TODO: not clear what the Levy function definition is. Gavana, Mishra,
Adorio have different forms. Indeed Levy 3 docstring from Gavana
disagrees with the Gavana code! The following code is from the Mishra
listing of Levy08.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.custom_bounds = [(-5, 5), (-5, 5)]
self.global_optimum = [[1 for _ in range(self.N)]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
y = 1 + (x - 1) / 4
v = sum((y[:-1] - 1) ** 2 * (1 + 10 * sin(pi * y[1:]) ** 2))
z = (y[-1] - 1) ** 2
return sin(pi * y[0]) ** 2 + v + z
class Levy05(Benchmark):
r"""
Levy 5 objective function.
This class defines the Levy 5 [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Levy05}}(\mathbf{x}) = \sum_{i=1}^{5} i \cos \left[(i-1)x_1 + i \right] \times \sum_{j=1}^{5} j \cos \left[(j+1)x_2 + j \right] + (x_1 + 1.42513)^2 + (x_2 + 0.80032)^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \in [-10, 10]` for :math:`i=1,...,n`.
*Global optimum*: :math:`f(x_i) = -176.1375779` for :math:`\mathbf{x} = [-1.30685, -1.42485]`.
.. [1] Mishra, S. Global Optimization by Differential Evolution and
Particle Swarm Methods: Evaluation on Some Benchmark Functions.
Munich Personal RePEc Archive, 2006, 1005
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.custom_bounds = ([-2.0, 2.0], [-2.0, 2.0])
self.global_optimum = [[-1.30685, -1.42485]]
self.fglob = -176.1375779
def fun(self, x, *args):
self.nfev += 1
i = arange(1, 6)
a = i * cos((i - 1) * x[0] + i)
b = i * cos((i + 1) * x[1] + i)
return sum(a) * sum(b) + (x[0] + 1.42513) ** 2 + (x[1] + 0.80032) ** 2
class Levy13(Benchmark):
r"""
Levy13 objective function.
This class defines the Levy13 [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Levy13}}(x) = \left(x_{1} -1\right)^{2} \left[\sin^{2}
\left(3 \pi x_{2}\right) + 1\right] + \left(x_{2}
- 1\right)^{2} \left[\sin^{2}\left(2 \pi x_{2}\right)
+ 1\right] + \sin^{2}\left(3 \pi x_{1}\right)
with :math:`x_i \in [-10, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [1, 1]`
.. [1] Mishra, S. Some new test functions for global optimization and
performance of repulsive particle swarm method.
Munich Personal RePEc Archive, 2006, 2718
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.custom_bounds = [(-5, 5), (-5, 5)]
self.global_optimum = [[1 for _ in range(self.N)]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
u = sin(3 * pi * x[0]) ** 2
v = (x[0] - 1) ** 2 * (1 + (sin(3 * pi * x[1])) ** 2)
w = (x[1] - 1) ** 2 * (1 + (sin(2 * pi * x[1])) ** 2)
return u + v + w
| 9,861
| 28.975684
| 184
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/go_benchmark_functions/go_funcs_I.py
|
# -*- coding: utf-8 -*-
from numpy import sin, sum
from .go_benchmark import Benchmark
class Infinity(Benchmark):
r"""
Infinity objective function.
This class defines the Infinity [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Infinity}}(x) = \sum_{i=1}^{n} x_i^{6}
\left [ \sin\left ( \frac{1}{x_i} \right ) + 2 \right ]
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-1, 1]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-1.0] * self.N, [1.0] * self.N))
self.global_optimum = [[1e-16 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
return sum(x ** 6.0 * (sin(1.0 / x) + 2.0))
| 1,115
| 25.571429
| 77
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/go_benchmark_functions/go_funcs_T.py
|
# -*- coding: utf-8 -*-
from numpy import abs, asarray, cos, exp, arange, pi, sin, sum, atleast_2d
from .go_benchmark import Benchmark
class TestTubeHolder(Benchmark):
r"""
TestTubeHolder objective function.
This class defines the TestTubeHolder [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{TestTubeHolder}}(x) = - 4 \left | {e^{\left|{\cos
\left(\frac{1}{200} x_{1}^{2} + \frac{1}{200} x_{2}^{2}\right)}
\right|}\sin\left(x_{1}\right) \cos\left(x_{2}\right)}\right|
with :math:`x_i \in [-10, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = -10.872299901558` for
:math:`x= [-\pi/2, 0]`
.. [1] Mishra, S. Global Optimization by Differential Evolution and
Particle Swarm Methods: Evaluation on Some Benchmark Functions.
Munich Personal RePEc Archive, 2006, 1005
TODO Jamil#148 has got incorrect equation, missing an abs around the square
brackets
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [[-pi / 2, 0.0]]
self.fglob = -10.87229990155800
def fun(self, x, *args):
self.nfev += 1
u = sin(x[0]) * cos(x[1])
v = (x[0] ** 2 + x[1] ** 2) / 200
return -4 * abs(u * exp(abs(cos(v))))
class Thurber(Benchmark):
r"""
Thurber [1]_ objective function.
.. [1] https://www.itl.nist.gov/div898/strd/nls/data/thurber.shtml
"""
def __init__(self, dimensions=7):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip(
[500., 500., 100., 10., 0.1, 0.1, 0.],
[2000., 2000., 1000., 150., 2., 1., 0.2]))
self.global_optimum = [[1.288139680e3, 1.4910792535e3, 5.8323836877e2,
75.416644291, 0.96629502864, 0.39797285797,
4.9727297349e-2]]
self.fglob = 5642.7082397
self.a = asarray([80.574, 84.248, 87.264, 87.195, 89.076, 89.608,
89.868, 90.101, 92.405, 95.854, 100.696, 101.06,
401.672, 390.724, 567.534, 635.316, 733.054, 759.087,
894.206, 990.785, 1090.109, 1080.914, 1122.643,
1178.351, 1260.531, 1273.514, 1288.339, 1327.543,
1353.863, 1414.509, 1425.208, 1421.384, 1442.962,
1464.350, 1468.705, 1447.894, 1457.628])
self.b = asarray([-3.067, -2.981, -2.921, -2.912, -2.840, -2.797,
-2.702, -2.699, -2.633, -2.481, -2.363, -2.322,
-1.501, -1.460, -1.274, -1.212, -1.100, -1.046,
-0.915, -0.714, -0.566, -0.545, -0.400, -0.309,
-0.109, -0.103, 0.010, 0.119, 0.377, 0.790, 0.963,
1.006, 1.115, 1.572, 1.841, 2.047, 2.200])
def fun(self, x, *args):
self.nfev += 1
vec = x[0] + x[1] * self.b + x[2] * self.b ** 2 + x[3] * self.b ** 3
vec /= 1 + x[4] * self.b + x[5] * self.b ** 2 + x[6] * self.b ** 3
return sum((self.a - vec) ** 2)
class Treccani(Benchmark):
r"""
Treccani objective function.
This class defines the Treccani [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Treccani}}(x) = x_1^4 + 4x_1^3 + 4x_1^2 + x_2^2
with :math:`x_i \in
[-5, 5]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [-2, 0]` or
:math:`x = [0, 0]`.
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-5.0] * self.N, [5.0] * self.N))
self.custom_bounds = [(-2, 2), (-2, 2)]
self.global_optimum = [[-2.0, 0.0]]
self.fglob = 0
def fun(self, x, *args):
self.nfev += 1
return x[0] ** 4 + 4.0 * x[0] ** 3 + 4.0 * x[0] ** 2 + x[1] ** 2
class Trefethen(Benchmark):
r"""
Trefethen objective function.
This class defines the Trefethen [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Trefethen}}(x) = 0.25 x_{1}^{2} + 0.25 x_{2}^{2}
+ e^{\sin\left(50 x_{1}\right)}
- \sin\left(10 x_{1} + 10 x_{2}\right)
+ \sin\left(60 e^{x_{2}}\right)
+ \sin\left[70 \sin\left(x_{1}\right)\right]
+ \sin\left[\sin\left(80 x_{2}\right)\right]
with :math:`x_i \in [-10, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = -3.3068686474` for
:math:`x = [-0.02440307923, 0.2106124261]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.custom_bounds = [(-5, 5), (-5, 5)]
self.global_optimum = [[-0.02440307923, 0.2106124261]]
self.fglob = -3.3068686474
def fun(self, x, *args):
self.nfev += 1
val = 0.25 * x[0] ** 2 + 0.25 * x[1] ** 2
val += exp(sin(50. * x[0])) - sin(10 * x[0] + 10 * x[1])
val += sin(60 * exp(x[1]))
val += sin(70 * sin(x[0]))
val += sin(sin(80 * x[1]))
return val
class ThreeHumpCamel(Benchmark):
r"""
Three Hump Camel objective function.
This class defines the Three Hump Camel [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{ThreeHumpCamel}}(x) = 2x_1^2 - 1.05x_1^4 + \frac{x_1^6}{6}
+ x_1x_2 + x_2^2
with :math:`x_i \in [-5, 5]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [0, 0]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-5.0] * self.N, [5.0] * self.N))
self.custom_bounds = [(-2, 2), (-1.5, 1.5)]
self.global_optimum = [[0.0, 0.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return (2.0 * x[0] ** 2.0 - 1.05 * x[0] ** 4.0 + x[0] ** 6 / 6.0
+ x[0] * x[1] + x[1] ** 2.0)
class Trid(Benchmark):
r"""
Trid objective function.
This class defines the Trid [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Trid}}(x) = \sum_{i=1}^{n} (x_i - 1)^2
- \sum_{i=2}^{n} x_i x_{i-1}
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-20, 20]` for :math:`i = 1, ..., 6`.
*Global optimum*: :math:`f(x) = -50` for :math:`x = [6, 10, 12, 12, 10, 6]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO Jamil#150, starting index of second summation term should be 2.
"""
def __init__(self, dimensions=6):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-20.0] * self.N, [20.0] * self.N))
self.global_optimum = [[6, 10, 12, 12, 10, 6]]
self.fglob = -50.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
return sum((x - 1.0) ** 2.0) - sum(x[1:] * x[:-1])
class Trigonometric01(Benchmark):
r"""
Trigonometric 1 objective function.
This class defines the Trigonometric 1 [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Trigonometric01}}(x) = \sum_{i=1}^{n} \left [n -
\sum_{j=1}^{n} \cos(x_j)
+ i \left(1 - cos(x_i)
- sin(x_i) \right ) \right]^2
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [0, \pi]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO: equaiton uncertain here. Is it just supposed to be the cos term
in the inner sum, or the whole of the second line in Jamil #153.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([0.0] * self.N, [pi] * self.N))
self.global_optimum = [[0.0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
i = atleast_2d(arange(1.0, self.N + 1)).T
inner = cos(x) + i * (1 - cos(x) - sin(x))
return sum((self.N - sum(inner, axis=1)) ** 2)
class Trigonometric02(Benchmark):
r"""
Trigonometric 2 objective function.
This class defines the Trigonometric 2 [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Trigonometric2}}(x) = 1 + \sum_{i=1}^{n} 8 \sin^2
\left[7(x_i - 0.9)^2 \right]
+ 6 \sin^2 \left[14(x_i - 0.9)^2 \right]
+ (x_i - 0.9)^2
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-500, 500]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 1` for :math:`x_i = 0.9` for
:math:`i = 1, ..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-500.0] * self.N,
[500.0] * self.N))
self.custom_bounds = [(0, 2), (0, 2)]
self.global_optimum = [[0.9 for _ in range(self.N)]]
self.fglob = 1.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
vec = (8 * sin(7 * (x - 0.9) ** 2) ** 2
+ 6 * sin(14 * (x - 0.9) ** 2) ** 2
+ (x - 0.9) ** 2)
return 1.0 + sum(vec)
class Tripod(Benchmark):
r"""
Tripod objective function.
This class defines the Tripod [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Tripod}}(x) = p(x_2) \left[1 + p(x_1) \right] +
\lvert x_1 + 50p(x_2) \left[1 - 2p(x_1) \right]
\rvert + \lvert x_2 + 50\left[1 - 2p(x_2)\right]
\rvert
with :math:`x_i \in [-100, 100]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [0, -50]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-100.0] * self.N,
[100.0] * self.N))
self.global_optimum = [[0.0, -50.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
p1 = float(x[0] >= 0)
p2 = float(x[1] >= 0)
return (p2 * (1.0 + p1) + abs(x[0] + 50.0 * p2 * (1.0 - 2.0 * p1))
+ abs(x[1] + 50.0 * (1.0 - 2.0 * p2)))
| 12,710
| 31.592308
| 82
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/go_benchmark_functions/go_funcs_G.py
|
# -*- coding: utf-8 -*-
import numpy as np
from numpy import abs, sin, cos, exp, floor, log, arange, prod, sqrt, sum
from .go_benchmark import Benchmark
class Gear(Benchmark):
r"""
Gear objective function.
This class defines the Gear [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Gear}}({x}) = \left \{ \frac{1.0}{6.931}
- \frac{\lfloor x_1\rfloor \lfloor x_2 \rfloor }
{\lfloor x_3 \rfloor \lfloor x_4 \rfloor } \right\}^2
with :math:`x_i \in [12, 60]` for :math:`i = 1, ..., 4`.
*Global optimum*: :math:`f(x) = 2.7 \cdot 10^{-12}` for :math:`x =
[16, 19, 43, 49]`, where the various :math:`x_i` may be permuted.
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
def __init__(self, dimensions=4):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([12.0] * self.N, [60.0] * self.N))
self.global_optimum = [[16, 19, 43, 49]]
self.fglob = 2.7e-12
def fun(self, x, *args):
self.nfev += 1
return (1. / 6.931
- floor(x[0]) * floor(x[1]) / floor(x[2]) / floor(x[3])) ** 2
class Giunta(Benchmark):
r"""
Giunta objective function.
This class defines the Giunta [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Giunta}}({x}) = 0.6 + \sum_{i=1}^{n} \left[\sin^{2}\left(1
- \frac{16}{15} x_i\right) - \frac{1}{50} \sin\left(4
- \frac{64}{15} x_i\right) - \sin\left(1
- \frac{16}{15} x_i\right)\right]
with :math:`x_i \in [-1, 1]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0.06447042053690566` for
:math:`x = [0.4673200277395354, 0.4673200169591304]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO Jamil has the wrong fglob. I think there is a lower value.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-1.0] * self.N, [1.0] * self.N))
self.global_optimum = [[0.4673200277395354, 0.4673200169591304]]
self.fglob = 0.06447042053690566
def fun(self, x, *args):
self.nfev += 1
arg = 16 * x / 15.0 - 1
return 0.6 + sum(sin(arg) + sin(arg) ** 2 + sin(4 * arg) / 50.)
class GoldsteinPrice(Benchmark):
r"""
Goldstein-Price objective function.
This class defines the Goldstein-Price [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{GoldsteinPrice}}(x) = \left[ 1 + (x_1 + x_2 + 1)^2
(19 - 14 x_1 + 3 x_1^2 - 14 x_2 + 6 x_1 x_2 + 3 x_2^2) \right]
\left[ 30 + ( 2x_1 - 3 x_2)^2 (18 - 32 x_1 + 12 x_1^2
+ 48 x_2 - 36 x_1 x_2 + 27 x_2^2) \right]
with :math:`x_i \in [-2, 2]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 3` for :math:`x = [0, -1]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-2.0] * self.N, [2.0] * self.N))
self.global_optimum = [[0., -1.]]
self.fglob = 3.0
def fun(self, x, *args):
self.nfev += 1
a = (1 + (x[0] + x[1] + 1) ** 2
* (19 - 14 * x[0] + 3 * x[0] ** 2
- 14 * x[1] + 6 * x[0] * x[1] + 3 * x[1] ** 2))
b = (30 + (2 * x[0] - 3 * x[1]) ** 2
* (18 - 32 * x[0] + 12 * x[0] ** 2
+ 48 * x[1] - 36 * x[0] * x[1] + 27 * x[1] ** 2))
return a * b
class Griewank(Benchmark):
r"""
Griewank objective function.
This class defines the Griewank global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Griewank}}(x) = \frac{1}{4000}\sum_{i=1}^n x_i^2
- \prod_{i=1}^n\cos\left(\frac{x_i}{\sqrt{i}}\right) + 1
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-600, 600]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-100.0] * self.N,
[100.0] * self.N))
self.custom_bounds = [(-50, 50), (-50, 50)]
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
i = arange(1., np.size(x) + 1.)
return sum(x ** 2 / 4000) - prod(cos(x / sqrt(i))) + 1
class Gulf(Benchmark):
r"""
Gulf objective function.
This class defines the Gulf [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Gulf}}(x) = \sum_{i=1}^99 \left( e^{-\frac{\lvert y_i
- x_2 \rvert^{x_3}}{x_1}} - t_i \right)
Where, in this exercise:
.. math::
t_i = i/100 \\
y_i = 25 + [-50 \log(t_i)]^{2/3}
with :math:`x_i \in [0, 60]` for :math:`i = 1, 2, 3`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [50, 25, 1.5]`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
TODO Gavana has absolute of (u - x[1]) term. Jamil doesn't... Leaving it in.
"""
def __init__(self, dimensions=3):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([0.0] * self.N, [50.0] * self.N))
self.global_optimum = [[50.0, 25.0, 1.5]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
m = 99.
i = arange(1., m + 1)
u = 25 + (-50 * log(i / 100.)) ** (2 / 3.)
vec = (exp(-((abs(u - x[1])) ** x[2] / x[0])) - i / 100.)
return sum(vec ** 2)
| 6,489
| 28.103139
| 81
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/go_benchmark_functions/go_funcs_W.py
|
# -*- coding: utf-8 -*-
from numpy import atleast_2d, arange, sum, cos, exp, pi
from .go_benchmark import Benchmark
class Watson(Benchmark):
r"""
Watson objective function.
This class defines the Watson [1]_ global optimization problem. This is a
unimodal minimization problem defined as follows:
.. math::
f_{\text{Watson}}(x) = \sum_{i=0}^{29} \left\{
\sum_{j=0}^4 ((j + 1)a_i^j x_{j+1})
- \left[ \sum_{j=0}^5 a_i^j
x_{j+1} \right ]^2 - 1 \right\}^2
+ x_1^2
Where, in this exercise, :math:`a_i = i/29`.
with :math:`x_i \in [-5, 5]` for :math:`i = 1, ..., 6`.
*Global optimum*: :math:`f(x) = 0.002288` for
:math:`x = [-0.0158, 1.012, -0.2329, 1.260, -1.513, 0.9928]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO Jamil #161 writes equation using (j - 1). According to code in Adorio
and Gavana it should be (j+1). However the equations in those papers
contain (j - 1) as well. However, I've got the right global minimum!!!
"""
def __init__(self, dimensions=6):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-5.0] * self.N, [5.0] * self.N))
self.global_optimum = [[-0.0158, 1.012, -0.2329, 1.260, -1.513,
0.9928]]
self.fglob = 0.002288
def fun(self, x, *args):
self.nfev += 1
i = atleast_2d(arange(30.)).T
a = i / 29.
j = arange(5.)
k = arange(6.)
t1 = sum((j + 1) * a ** j * x[1:], axis=1)
t2 = sum(a ** k * x, axis=1)
inner = (t1 - t2 ** 2 - 1) ** 2
return sum(inner) + x[0] ** 2
class Wavy(Benchmark):
r"""
Wavy objective function.
This class defines the W / Wavy [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Wavy}}(x) = 1 - \frac{1}{n} \sum_{i=1}^{n}
\cos(kx_i)e^{-\frac{x_i^2}{2}}
Where, in this exercise, :math:`k = 10`. The number of local minima is
:math:`kn` and :math:`(k + 1)n` for odd and even :math:`k` respectively.
Here, :math:`x_i \in [-\pi, \pi]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [0, 0]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-pi] * self.N, [pi] * self.N))
self.global_optimum = [[0.0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
return 1.0 - (1.0 / self.N) * sum(cos(10 * x) * exp(-x ** 2.0 / 2.0))
class WayburnSeader01(Benchmark):
r"""
Wayburn and Seader 1 objective function.
This class defines the Wayburn and Seader 1 [1]_ global optimization
problem. This is a unimodal minimization problem defined as follows:
.. math::
f_{\text{WayburnSeader01}}(x) = (x_1^6 + x_2^4 - 17)^2
+ (2x_1 + x_2 - 4)^2
with :math:`x_i \in [-5, 5]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [1, 2]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-5.0] * self.N, [5.0] * self.N))
self.custom_bounds = ([-2, 2], [-2, 2])
self.global_optimum = [[1.0, 2.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return (x[0] ** 6 + x[1] ** 4 - 17) ** 2 + (2 * x[0] + x[1] - 4) ** 2
class WayburnSeader02(Benchmark):
r"""
Wayburn and Seader 2 objective function.
This class defines the Wayburn and Seader 2 [1]_ global optimization
problem. This is a unimodal minimization problem defined as follows:
.. math::
f_{\text{WayburnSeader02}}(x) = \left[ 1.613 - 4(x_1 - 0.3125)^2
- 4(x_2 - 1.625)^2 \right]^2
+ (x_2 - 1)^2
with :math:`x_i \in [-500, 500]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [0.2, 1]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-500.0] * self.N,
[500.0] * self.N))
self.custom_bounds = ([-1, 2], [-1, 2])
self.global_optimum = [[0.2, 1.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
u = (1.613 - 4 * (x[0] - 0.3125) ** 2 - 4 * (x[1] - 1.625) ** 2) ** 2
v = (x[1] - 1) ** 2
return u + v
class Weierstrass(Benchmark):
r"""
Weierstrass objective function.
This class defines the Weierstrass [1]_ global optimization problem.
This is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Weierstrass}}(x) = \sum_{i=1}^{n} \left [
\sum_{k=0}^{kmax} a^k \cos
\left( 2 \pi b^k (x_i + 0.5) \right) - n
\sum_{k=0}^{kmax} a^k \cos(\pi b^k) \right ]
Where, in this exercise, :math:`kmax = 20`, :math:`a = 0.5` and
:math:`b = 3`.
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-0.5, 0.5]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 4` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Mishra, S. Global Optimization by Differential Evolution and
Particle Swarm Methods: Evaluation on Some Benchmark Functions.
Munich Personal RePEc Archive, 2006, 1005
TODO line 1591.
TODO Jamil, Gavana have got it wrong. The second term is not supposed to
be included in the outer sum. Mishra code has it right as does the
reference referred to in Jamil#166.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-0.5] * self.N, [0.5] * self.N))
self.global_optimum = [[0.0 for _ in range(self.N)]]
self.fglob = 0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
kmax = 20
a, b = 0.5, 3.0
k = atleast_2d(arange(kmax + 1.)).T
t1 = a ** k * cos(2 * pi * b ** k * (x + 0.5))
t2 = self.N * sum(a ** k.T * cos(pi * b ** k.T))
return sum(sum(t1, axis=0)) - t2
class Whitley(Benchmark):
r"""
Whitley objective function.
This class defines the Whitley [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Whitley}}(x) = \sum_{i=1}^n \sum_{j=1}^n
\left[\frac{(100(x_i^2-x_j)^2
+ (1-x_j)^2)^2}{4000} - \cos(100(x_i^2-x_j)^2
+ (1-x_j)^2)+1 \right]
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-10.24, 10.24]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 1` for
:math:`i = 1, ..., n`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
TODO Jamil#167 has '+ 1' inside the cos term, when it should be outside it.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.24] * self.N,
[10.24] * self.N))
self.custom_bounds = ([-1, 2], [-1, 2])
self.global_optimum = [[1.0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
XI = x
XJ = atleast_2d(x).T
temp = 100.0 * ((XI ** 2.0) - XJ) + (1.0 - XJ) ** 2.0
inner = (temp ** 2.0 / 4000.0) - cos(temp) + 1.0
return sum(sum(inner, axis=0))
class Wolfe(Benchmark):
r"""
Wolfe objective function.
This class defines the Wolfe [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Wolfe}}(x) = \frac{4}{3}(x_1^2 + x_2^2 - x_1x_2)^{0.75} + x_3
with :math:`x_i \in [0, 2]` for :math:`i = 1, 2, 3`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [0, 0, 0]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=3):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([0.0] * self.N, [2.0] * self.N))
self.global_optimum = [[0.0 for _ in range(self.N)]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return 4 / 3 * (x[0] ** 2 + x[1] ** 2 - x[0] * x[1]) ** 0.75 + x[2]
| 9,789
| 29.216049
| 79
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/go_benchmark_functions/go_funcs_univariate.py
|
# -*- coding: utf-8 -*-
from numpy import cos, exp, log, pi, sin, sqrt
from .go_benchmark import Benchmark, safe_import
with safe_import():
try:
from scipy.special import factorial # new
except ImportError:
from scipy.misc import factorial # old
#-----------------------------------------------------------------------
# UNIVARIATE SINGLE-OBJECTIVE PROBLEMS
#-----------------------------------------------------------------------
class Problem02(Benchmark):
"""
Univariate Problem02 objective function.
This class defines the Univariate Problem02 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Problem02}}(x) = \\sin(x) + \\sin \\left(\\frac{10}{3}x \\right)
Bound constraints: :math:`x \\in [2.7, 7.5]`
.. figure:: figures/Problem02.png
:alt: Univariate Problem02 function
:align: center
**Univariate Problem02 function**
*Global optimum*: :math:`f(x)=-1.899599` for :math:`x = 5.145735`
"""
def __init__(self, dimensions=1):
Benchmark.__init__(self, dimensions)
self._bounds = [(2.7, 7.5)]
self.global_optimum = 5.145735
self.fglob = -1.899599
def fun(self, x, *args):
self.nfev += 1
x = x[0]
return sin(x) + sin(10.0 / 3.0 * x)
class Problem03(Benchmark):
"""
Univariate Problem03 objective function.
This class defines the Univariate Problem03 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Problem03}}(x) = - \\sum_{k=1}^6 k \\sin[(k+1)x+k]
Bound constraints: :math:`x \\in [-10, 10]`
.. figure:: figures/Problem03.png
:alt: Univariate Problem03 function
:align: center
**Univariate Problem03 function**
*Global optimum*: :math:`f(x)=-12.03124` for :math:`x = -6.7745761`
"""
def __init__(self, dimensions=1):
Benchmark.__init__(self, dimensions)
self._bounds = [(-10, 10)]
self.global_optimum = -6.7745761
self.fglob = -12.03124
def fun(self, x, *args):
self.nfev += 1
x = x[0]
y = 0.0
for k in range(1, 6):
y += k * sin((k + 1) * x + k)
return -y
class Problem04(Benchmark):
"""
Univariate Problem04 objective function.
This class defines the Univariate Problem04 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Problem04}}(x) = - \\left(16x^2 - 24x + 5 \\right) e^{-x}
Bound constraints: :math:`x \\in [1.9, 3.9]`
.. figure:: figures/Problem04.png
:alt: Univariate Problem04 function
:align: center
**Univariate Problem04 function**
*Global optimum*: :math:`f(x)=-3.85045` for :math:`x = 2.868034`
"""
def __init__(self, dimensions=1):
Benchmark.__init__(self, dimensions)
self._bounds = [(1.9, 3.9)]
self.global_optimum = 2.868034
self.fglob = -3.85045
def fun(self, x, *args):
self.nfev += 1
x = x[0]
return -(16 * x ** 2 - 24 * x + 5) * exp(-x)
class Problem05(Benchmark):
"""
Univariate Problem05 objective function.
This class defines the Univariate Problem05 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Problem05}}(x) = - \\left(1.4 - 3x \\right) \\sin(18x)
Bound constraints: :math:`x \\in [0, 1.2]`
.. figure:: figures/Problem05.png
:alt: Univariate Problem05 function
:align: center
**Univariate Problem05 function**
*Global optimum*: :math:`f(x)=-1.48907` for :math:`x = 0.96609`
"""
def __init__(self, dimensions=1):
Benchmark.__init__(self, dimensions)
self._bounds = [(0.0, 1.2)]
self.global_optimum = 0.96609
self.fglob = -1.48907
def fun(self, x, *args):
self.nfev += 1
x = x[0]
return -(1.4 - 3 * x) * sin(18.0 * x)
class Problem06(Benchmark):
"""
Univariate Problem06 objective function.
This class defines the Univariate Problem06 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Problem06}}(x) = - \\left[x + \\sin(x) \\right] e^{-x^2}
Bound constraints: :math:`x \\in [-10, 10]`
.. figure:: figures/Problem06.png
:alt: Univariate Problem06 function
:align: center
**Univariate Problem06 function**
*Global optimum*: :math:`f(x)=-0.824239` for :math:`x = 0.67956`
"""
def __init__(self, dimensions=1):
Benchmark.__init__(self, dimensions)
self._bounds = [(-10.0, 10.0)]
self.global_optimum = 0.67956
self.fglob = -0.824239
def fun(self, x, *args):
self.nfev += 1
x = x[0]
return -(x + sin(x)) * exp(-x ** 2.0)
class Problem07(Benchmark):
"""
Univariate Problem07 objective function.
This class defines the Univariate Problem07 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Problem07}}(x) = \\sin(x) + \\sin \\left(\\frac{10}{3}x \\right) + \\log(x) - 0.84x + 3
Bound constraints: :math:`x \\in [2.7, 7.5]`
.. figure:: figures/Problem07.png
:alt: Univariate Problem07 function
:align: center
**Univariate Problem07 function**
*Global optimum*: :math:`f(x)=-1.6013` for :math:`x = 5.19978`
"""
def __init__(self, dimensions=1):
Benchmark.__init__(self, dimensions)
self._bounds = [(2.7, 7.5)]
self.global_optimum = 5.19978
self.fglob = -1.6013
def fun(self, x, *args):
self.nfev += 1
x = x[0]
return sin(x) + sin(10.0 / 3.0 * x) + log(x) - 0.84 * x + 3
class Problem08(Benchmark):
"""
Univariate Problem08 objective function.
This class defines the Univariate Problem08 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Problem08}}(x) = - \\sum_{k=1}^6 k \\cos[(k+1)x+k]
Bound constraints: :math:`x \\in [-10, 10]`
.. figure:: figures/Problem08.png
:alt: Univariate Problem08 function
:align: center
**Univariate Problem08 function**
*Global optimum*: :math:`f(x)=-14.508` for :math:`x = -7.083506`
"""
def __init__(self, dimensions=1):
Benchmark.__init__(self, dimensions)
self._bounds = [(-10, 10)]
self.global_optimum = -7.083506
self.fglob = -14.508
def fun(self, x, *args):
self.nfev += 1
x = x[0]
y = 0.0
for k in range(1, 6):
y += k * cos((k + 1) * x + k)
return -y
class Problem09(Benchmark):
"""
Univariate Problem09 objective function.
This class defines the Univariate Problem09 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Problem09}}(x) = \\sin(x) + \\sin \\left(\\frac{2}{3} x \\right)
Bound constraints: :math:`x \\in [3.1, 20.4]`
.. figure:: figures/Problem09.png
:alt: Univariate Problem09 function
:align: center
**Univariate Problem09 function**
*Global optimum*: :math:`f(x)=-1.90596` for :math:`x = 17.039`
"""
def __init__(self, dimensions=1):
Benchmark.__init__(self, dimensions)
self._bounds = [(3.1, 20.4)]
self.global_optimum = 17.039
self.fglob = -1.90596
def fun(self, x, *args):
self.nfev += 1
x = x[0]
return sin(x) + sin(2.0 / 3.0 * x)
class Problem10(Benchmark):
"""
Univariate Problem10 objective function.
This class defines the Univariate Problem10 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Problem10}}(x) = -x\\sin(x)
Bound constraints: :math:`x \\in [0, 10]`
.. figure:: figures/Problem10.png
:alt: Univariate Problem10 function
:align: center
**Univariate Problem10 function**
*Global optimum*: :math:`f(x)=-7.916727` for :math:`x = 7.9787`
"""
def __init__(self, dimensions=1):
Benchmark.__init__(self, dimensions)
self._bounds = [(0, 10)]
self.global_optimum = 7.9787
self.fglob = -7.916727
def fun(self, x, *args):
self.nfev += 1
x = x[0]
return -x * sin(x)
class Problem11(Benchmark):
"""
Univariate Problem11 objective function.
This class defines the Univariate Problem11 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Problem11}}(x) = 2\\cos(x) + \\cos(2x)
Bound constraints: :math:`x \\in [-\\pi/2, 2\\pi]`
.. figure:: figures/Problem11.png
:alt: Univariate Problem11 function
:align: center
**Univariate Problem11 function**
*Global optimum*: :math:`f(x)=-1.5` for :math:`x = 2.09439`
"""
def __init__(self, dimensions=1):
Benchmark.__init__(self, dimensions)
self._bounds = [(-pi / 2, 2 * pi)]
self.global_optimum = 2.09439
self.fglob = -1.5
def fun(self, x, *args):
self.nfev += 1
x = x[0]
return 2 * cos(x) + cos(2 * x)
class Problem12(Benchmark):
"""
Univariate Problem12 objective function.
This class defines the Univariate Problem12 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Problem12}}(x) = \\sin^3(x) + \\cos^3(x)
Bound constraints: :math:`x \\in [0, 2\\pi]`
.. figure:: figures/Problem12.png
:alt: Univariate Problem12 function
:align: center
**Univariate Problem12 function**
*Global optimum*: :math:`f(x)=-1` for :math:`x = \\pi`
"""
def __init__(self, dimensions=1):
Benchmark.__init__(self, dimensions)
self._bounds = [(0, 2 * pi)]
self.global_optimum = pi
self.fglob = -1
def fun(self, x, *args):
self.nfev += 1
x = x[0]
return (sin(x)) ** 3.0 + (cos(x)) ** 3.0
class Problem13(Benchmark):
"""
Univariate Problem13 objective function.
This class defines the Univariate Problem13 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Problem13}}(x) = -x^{2/3} - (1 - x^2)^{1/3}
Bound constraints: :math:`x \\in [0.001, 0.99]`
.. figure:: figures/Problem13.png
:alt: Univariate Problem13 function
:align: center
**Univariate Problem13 function**
*Global optimum*: :math:`f(x)=-1.5874` for :math:`x = 1/\\sqrt(2)`
"""
def __init__(self, dimensions=1):
Benchmark.__init__(self, dimensions)
self._bounds = [(0.001, 0.99)]
self.global_optimum = 1.0 / sqrt(2)
self.fglob = -1.5874
def fun(self, x, *args):
self.nfev += 1
x = x[0]
return -x ** (2.0 / 3.0) - (1.0 - x ** 2) ** (1.0 / 3.0)
class Problem14(Benchmark):
"""
Univariate Problem14 objective function.
This class defines the Univariate Problem14 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Problem14}}(x) = -e^{-x} \\sin(2\\pi x)
Bound constraints: :math:`x \\in [0, 4]`
.. figure:: figures/Problem14.png
:alt: Univariate Problem14 function
:align: center
**Univariate Problem14 function**
*Global optimum*: :math:`f(x)=-0.788685` for :math:`x = 0.224885`
"""
def __init__(self, dimensions=1):
Benchmark.__init__(self, dimensions)
self._bounds = [(0.0, 4.0)]
self.global_optimum = 0.224885
self.fglob = -0.788685
def fun(self, x, *args):
self.nfev += 1
x = x[0]
return -exp(-x) * sin(2.0 * pi * x)
class Problem15(Benchmark):
"""
Univariate Problem15 objective function.
This class defines the Univariate Problem15 global optimization problem.
This is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Problem15}}(x) = \\frac{x^{2} - 5 x + 6}{x^{2} + 1}
Bound constraints: :math:`x \\in [-5, 5]`
.. figure:: figures/Problem15.png
:alt: Univariate Problem15 function
:align: center
**Univariate Problem15 function**
*Global optimum*: :math:`f(x)=-0.03553` for :math:`x = 2.41422`
"""
def __init__(self, dimensions=1):
Benchmark.__init__(self, dimensions)
self._bounds = [(-5.0, 5.0)]
self.global_optimum = 2.41422
self.fglob = -0.03553
def fun(self, x, *args):
self.nfev += 1
x = x[0]
return -(-x ** 2.0 + 5 * x - 6) / (x ** 2 + 1)
class Problem18(Benchmark):
"""
Univariate Problem18 objective function.
This class defines the Univariate Problem18 global optimization problem.
This is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Problem18}}(x) = \\begin{cases}(x-2)^2 & \\textrm{if} \\hspace{5pt} x \\leq 3 \\\\
2\\log(x-2)+1&\\textrm{otherwise}\\end{cases}
Bound constraints: :math:`x \\in [0, 6]`
.. figure:: figures/Problem18.png
:alt: Univariate Problem18 function
:align: center
**Univariate Problem18 function**
*Global optimum*: :math:`f(x)=0` for :math:`x = 2`
"""
def __init__(self, dimensions=1):
Benchmark.__init__(self, dimensions)
self._bounds = [(0.0, 6.0)]
self.global_optimum = 2
self.fglob = 0
def fun(self, x, *args):
self.nfev += 1
x = x[0]
if x <= 3:
return (x - 2.0) ** 2.0
return 2 * log(x - 2.0) + 1
class Problem20(Benchmark):
"""
Univariate Problem20 objective function.
This class defines the Univariate Problem20 global optimization problem.
This is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Problem20}}(x) = -[x-\\sin(x)]e^{-x^2}
Bound constraints: :math:`x \\in [-10, 10]`
.. figure:: figures/Problem20.png
:alt: Univariate Problem20 function
:align: center
**Univariate Problem20 function**
*Global optimum*: :math:`f(x)=-0.0634905` for :math:`x = 1.195137`
"""
def __init__(self, dimensions=1):
Benchmark.__init__(self, dimensions)
self._bounds = [(-10, 10)]
self.global_optimum = 1.195137
self.fglob = -0.0634905
def fun(self, x, *args):
self.nfev += 1
x = x[0]
return -(x - sin(x)) * exp(-x ** 2.0)
class Problem21(Benchmark):
"""
Univariate Problem21 objective function.
This class defines the Univariate Problem21 global optimization problem.
This is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Problem21}}(x) = x \\sin(x) + x \\cos(2x)
Bound constraints: :math:`x \\in [0, 10]`
.. figure:: figures/Problem21.png
:alt: Univariate Problem21 function
:align: center
**Univariate Problem21 function**
*Global optimum*: :math:`f(x)=-9.50835` for :math:`x = 4.79507`
"""
def __init__(self, dimensions=1):
Benchmark.__init__(self, dimensions)
self._bounds = [(0, 10)]
self.global_optimum = 4.79507
self.fglob = -9.50835
def fun(self, x, *args):
self.nfev += 1
x = x[0]
return x * sin(x) + x * cos(2.0 * x)
class Problem22(Benchmark):
"""
Univariate Problem22 objective function.
This class defines the Univariate Problem22 global optimization problem.
This is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Problem22}}(x) = e^{-3x} - \\sin^3(x)
Bound constraints: :math:`x \\in [0, 20]`
.. figure:: figures/Problem22.png
:alt: Univariate Problem22 function
:align: center
**Univariate Problem22 function**
*Global optimum*: :math:`f(x)=e^{-27\\pi/2} - 1` for :math:`x = 9\\pi/2`
"""
def __init__(self, dimensions=1):
Benchmark.__init__(self, dimensions)
self._bounds = [(0, 20)]
self.global_optimum = 9.0 * pi / 2.0
self.fglob = exp(-27.0 * pi / 2.0) - 1.0
def fun(self, x, *args):
self.nfev += 1
x = x[0]
return exp(-3.0 * x) - (sin(x)) ** 3.0
| 16,917
| 22.175342
| 104
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/go_benchmark_functions/go_funcs_R.py
|
# -*- coding: utf-8 -*-
from numpy import abs, sum, sin, cos, asarray, arange, pi, exp, log, sqrt
from scipy.optimize import rosen
from .go_benchmark import Benchmark
class Rana(Benchmark):
r"""
Rana objective function.
This class defines the Rana [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Rana}}(x) = \sum_{i=1}^{n} \left[x_{i}
\sin\left(\sqrt{\lvert{x_{1} - x_{i} + 1}\rvert}\right)
\cos\left(\sqrt{\lvert{x_{1} + x_{i} + 1}\rvert}\right) +
\left(x_{1} + 1\right) \sin\left(\sqrt{\lvert{x_{1} + x_{i} +
1}\rvert}\right) \cos\left(\sqrt{\lvert{x_{1} - x_{i} +
1}\rvert}\right)\right]
Here, :math:`n` represents the number of dimensions and :math:`x_i \in
[-500.0, 500.0]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x_i) = -928.5478` for
:math:`x = [-300.3376, 500]`.
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO: homemade global minimum here.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-500.000001] * self.N,
[500.000001] * self.N))
self.global_optimum = [[-300.3376, 500.]]
self.fglob = -500.8021602966615
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
t1 = sqrt(abs(x[1:] + x[: -1] + 1))
t2 = sqrt(abs(x[1:] - x[: -1] + 1))
v = (x[1:] + 1) * cos(t2) * sin(t1) + x[:-1] * cos(t1) * sin(t2)
return sum(v)
class Rastrigin(Benchmark):
r"""
Rastrigin objective function.
This class defines the Rastrigin [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Rastrigin}}(x) = 10n \sum_{i=1}^n \left[ x_i^2
- 10 \cos(2\pi x_i) \right]
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-5.12, 5.12]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-5.12] * self.N, [5.12] * self.N))
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
return 10.0 * self.N + sum(x ** 2.0 - 10.0 * cos(2.0 * pi * x))
class Ratkowsky01(Benchmark):
"""
Ratkowsky objective function.
.. [1] https://www.itl.nist.gov/div898/strd/nls/data/ratkowsky3.shtml
"""
# TODO, this is a NIST regression standard dataset
def __init__(self, dimensions=4):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([0., 1., 0., 0.1],
[1000, 20., 3., 6.]))
self.global_optimum = [[6.996415127e2, 5.2771253025, 7.5962938329e-1,
1.2792483859]]
self.fglob = 8.786404908e3
self.a = asarray([16.08, 33.83, 65.80, 97.20, 191.55, 326.20, 386.87,
520.53, 590.03, 651.92, 724.93, 699.56, 689.96,
637.56, 717.41])
self.b = arange(1, 16.)
def fun(self, x, *args):
self.nfev += 1
vec = x[0] / ((1 + exp(x[1] - x[2] * self.b)) ** (1 / x[3]))
return sum((self.a - vec) ** 2)
class Ratkowsky02(Benchmark):
r"""
Ratkowsky02 objective function.
This class defines the Ratkowsky 2 [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Ratkowsky02}}(x) = \sum_{m=1}^{9}(a_m - x[0] / (1 + exp(x[1]
- b_m x[2]))^2
where
.. math::
\begin{cases}
a=[8.93, 10.8, 18.59, 22.33, 39.35, 56.11, 61.73, 64.62, 67.08]\\
b=[9., 14., 21., 28., 42., 57., 63., 70., 79.]\\
\end{cases}
Here :math:`x_1 \in [1, 100]`, :math:`x_2 \in [0.1, 5]` and
:math:`x_3 \in [0.01, 0.5]`
*Global optimum*: :math:`f(x) = 8.0565229338` for
:math:`x = [7.2462237576e1, 2.6180768402, 6.7359200066e-2]`
.. [1] https://www.itl.nist.gov/div898/strd/nls/data/ratkowsky2.shtml
"""
def __init__(self, dimensions=3):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([10, 0.5, 0.01],
[200, 5., 0.5]))
self.global_optimum = [[7.2462237576e1, 2.6180768402, 6.7359200066e-2]]
self.fglob = 8.0565229338
self.a = asarray([8.93, 10.8, 18.59, 22.33, 39.35, 56.11, 61.73, 64.62,
67.08])
self.b = asarray([9., 14., 21., 28., 42., 57., 63., 70., 79.])
def fun(self, x, *args):
self.nfev += 1
vec = x[0] / (1 + exp(x[1] - x[2] * self.b))
return sum((self.a - vec) ** 2)
class Ripple01(Benchmark):
r"""
Ripple 1 objective function.
This class defines the Ripple 1 [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Ripple01}}(x) = \sum_{i=1}^2 -e^{-2 \log 2
(\frac{x_i-0.1}{0.8})^2} \left[\sin^6(5 \pi x_i)
+ 0.1\cos^2(500 \pi x_i) \right]
with :math:`x_i \in [0, 1]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = -2.2` for :math:`x_i = 0.1` for
:math:`i = 1, 2`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([0.0] * self.N, [1.0] * self.N))
self.global_optimum = [[0.1 for _ in range(self.N)]]
self.fglob = -2.2
def fun(self, x, *args):
self.nfev += 1
u = -2.0 * log(2.0) * ((x - 0.1) / 0.8) ** 2.0
v = sin(5.0 * pi * x) ** 6.0 + 0.1 * cos(500.0 * pi * x) ** 2.0
return sum(-exp(u) * v)
class Ripple25(Benchmark):
r"""
Ripple 25 objective function.
This class defines the Ripple 25 [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Ripple25}}(x) = \sum_{i=1}^2 -e^{-2
\log 2 (\frac{x_i-0.1}{0.8})^2}
\left[\sin^6(5 \pi x_i) \right]
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [0, 1]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = -2` for :math:`x_i = 0.1` for
:math:`i = 1, ..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([0.0] * self.N, [1.0] * self.N))
self.global_optimum = [[0.1 for _ in range(self.N)]]
self.fglob = -2.0
def fun(self, x, *args):
self.nfev += 1
u = -2.0 * log(2.0) * ((x - 0.1) / 0.8) ** 2.0
v = sin(5.0 * pi * x) ** 6.0
return sum(-exp(u) * v)
class Rosenbrock(Benchmark):
r"""
Rosenbrock objective function.
This class defines the Rosenbrock [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Rosenbrock}}(x) = \sum_{i=1}^{n-1} [100(x_i^2
- x_{i+1})^2 + (x_i - 1)^2]
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-5, 10]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 1` for
:math:`i = 1, ..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-30.] * self.N, [30.0] * self.N))
self.custom_bounds = [(-2, 2), (-2, 2)]
self.global_optimum = [[1 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
return rosen(x)
class RosenbrockModified(Benchmark):
r"""
Modified Rosenbrock objective function.
This class defines the Modified Rosenbrock [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{RosenbrockModified}}(x) = 74 + 100(x_2 - x_1^2)^2
+ (1 - x_1)^2 - 400 e^{-\frac{(x_1+1)^2 + (x_2 + 1)^2}{0.1}}
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-2, 2]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 34.04024310` for
:math:`x = [-0.90955374, -0.95057172]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO: We have different global minimum compared to Jamil #106. This is
possibly because of the (1-x) term is using the wrong parameter.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-2.0] * self.N, [2.0] * self.N))
self.custom_bounds = ([-1.0, 0.5], [-1.0, 1.0])
self.global_optimum = [[-0.90955374, -0.95057172]]
self.fglob = 34.040243106640844
def fun(self, x, *args):
self.nfev += 1
a = 74 + 100. * (x[1] - x[0] ** 2) ** 2 + (1 - x[0]) ** 2
a -= 400 * exp(-((x[0] + 1.) ** 2 + (x[1] + 1.) ** 2) / 0.1)
return a
class RotatedEllipse01(Benchmark):
r"""
Rotated Ellipse 1 objective function.
This class defines the Rotated Ellipse 1 [1]_ global optimization problem. This
is a unimodal minimization problem defined as follows:
.. math::
f_{\text{RotatedEllipse01}}(x) = 7x_1^2 - 6 \sqrt{3} x_1x_2 + 13x_2^2
with :math:`x_i \in [-500, 500]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [0, 0]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-500.0] * self.N,
[500.0] * self.N))
self.custom_bounds = ([-2.0, 2.0], [-2.0, 2.0])
self.global_optimum = [[0.0, 0.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return (7.0 * x[0] ** 2.0 - 6.0 * sqrt(3) * x[0] * x[1]
+ 13 * x[1] ** 2.0)
class RotatedEllipse02(Benchmark):
r"""
Rotated Ellipse 2 objective function.
This class defines the Rotated Ellipse 2 [1]_ global optimization problem. This
is a unimodal minimization problem defined as follows:
.. math::
f_{\text{RotatedEllipse02}}(x) = x_1^2 - x_1 x_2 + x_2^2
with :math:`x_i \in [-500, 500]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [0, 0]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-500.0] * self.N,
[500.0] * self.N))
self.custom_bounds = ([-2.0, 2.0], [-2.0, 2.0])
self.global_optimum = [[0.0, 0.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return x[0] ** 2.0 - x[0] * x[1] + x[1] ** 2.0
| 12,436
| 29.408313
| 85
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/go_benchmark_functions/go_funcs_S.py
|
# -*- coding: utf-8 -*-
from numpy import (abs, asarray, cos, floor, arange, pi, prod, roll, sin,
sqrt, sum, repeat, atleast_2d, tril)
from numpy.random import uniform
from .go_benchmark import Benchmark
class Salomon(Benchmark):
r"""
Salomon objective function.
This class defines the Salomon [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Salomon}}(x) = 1 - \cos \left (2 \pi
\sqrt{\sum_{i=1}^{n} x_i^2} \right) + 0.1 \sqrt{\sum_{i=1}^n x_i^2}
Here, :math:`n` represents the number of dimensions and :math:`x_i \in
[-100, 100]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-100.0] * self.N,
[100.0] * self.N))
self.custom_bounds = [(-50, 50), (-50, 50)]
self.global_optimum = [[0.0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
u = sqrt(sum(x ** 2))
return 1 - cos(2 * pi * u) + 0.1 * u
class Sargan(Benchmark):
r"""
Sargan objective function.
This class defines the Sargan [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Sargan}}(x) = \sum_{i=1}^{n} n \left (x_i^2
+ 0.4 \sum_{i \neq j}^{n} x_ix_j \right)
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-100, 100]` for
:math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-100.0] * self.N,
[100.0] * self.N))
self.custom_bounds = [(-5, 5), (-5, 5)]
self.global_optimum = [[0.0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
x0 = x[:-1]
x1 = roll(x, -1)[:-1]
return sum(self.N * (x ** 2 + 0.4 * sum(x0 * x1)))
class Schaffer01(Benchmark):
r"""
Schaffer 1 objective function.
This class defines the Schaffer 1 [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Schaffer01}}(x) = 0.5 + \frac{\sin^2 (x_1^2 + x_2^2)^2 - 0.5}
{1 + 0.001(x_1^2 + x_2^2)^2}
with :math:`x_i \in [-100, 100]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [0, 0]` for
:math:`i = 1, 2`
.. [1] Mishra, S. Some new test functions for global optimization and
performance of repulsive particle swarm method.
Munich Personal RePEc Archive, 2006, 2718
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-100.0] * self.N,
[100.0] * self.N))
self.custom_bounds = [(-10, 10), (-10, 10)]
self.global_optimum = [[0.0 for _ in range(self.N)]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
u = (x[0] ** 2 + x[1] ** 2)
num = sin(u) ** 2 - 0.5
den = (1 + 0.001 * u) ** 2
return 0.5 + num / den
class Schaffer02(Benchmark):
r"""
Schaffer 2 objective function.
This class defines the Schaffer 2 [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Schaffer02}}(x) = 0.5 + \frac{\sin^2 (x_1^2 - x_2^2)^2 - 0.5}
{1 + 0.001(x_1^2 + x_2^2)^2}
with :math:`x_i \in [-100, 100]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [0, 0]` for
:math:`i = 1, 2`
.. [1] Mishra, S. Some new test functions for global optimization and
performance of repulsive particle swarm method.
Munich Personal RePEc Archive, 2006, 2718
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-100.0] * self.N,
[100.0] * self.N))
self.custom_bounds = [(-10, 10), (-10, 10)]
self.global_optimum = [[0.0 for _ in range(self.N)]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
num = sin((x[0] ** 2 - x[1] ** 2)) ** 2 - 0.5
den = (1 + 0.001 * (x[0] ** 2 + x[1] ** 2)) ** 2
return 0.5 + num / den
class Schaffer03(Benchmark):
r"""
Schaffer 3 objective function.
This class defines the Schaffer 3 [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Schaffer03}}(x) = 0.5 + \frac{\sin^2 \left( \cos \lvert x_1^2
- x_2^2 \rvert \right ) - 0.5}{1 + 0.001(x_1^2 + x_2^2)^2}
with :math:`x_i \in [-100, 100]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0.00156685` for :math:`x = [0, 1.253115]`
.. [1] Mishra, S. Some new test functions for global optimization and
performance of repulsive particle swarm method.
Munich Personal RePEc Archive, 2006, 2718
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-100.0] * self.N,
[100.0] * self.N))
self.custom_bounds = [(-10, 10), (-10, 10)]
self.global_optimum = [[0.0, 1.253115]]
self.fglob = 0.00156685
def fun(self, x, *args):
self.nfev += 1
num = sin(cos(abs(x[0] ** 2 - x[1] ** 2))) ** 2 - 0.5
den = (1 + 0.001 * (x[0] ** 2 + x[1] ** 2)) ** 2
return 0.5 + num / den
class Schaffer04(Benchmark):
r"""
Schaffer 4 objective function.
This class defines the Schaffer 4 [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Schaffer04}}(x) = 0.5 + \frac{\cos^2 \left( \sin(x_1^2 - x_2^2)
\right ) - 0.5}{1 + 0.001(x_1^2 + x_2^2)^2}^2
with :math:`x_i \in [-100, 100]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0.292579` for :math:`x = [0, 1.253115]`
.. [1] Mishra, S. Some new test functions for global optimization and
performance of repulsive particle swarm method.
Munich Personal RePEc Archive, 2006, 2718
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-100.0] * self.N,
[100.0] * self.N))
self.custom_bounds = [(-10, 10), (-10, 10)]
self.global_optimum = [[0.0, 1.253115]]
self.fglob = 0.292579
def fun(self, x, *args):
self.nfev += 1
num = cos(sin(abs(x[0] ** 2 - x[1] ** 2))) ** 2 - 0.5
den = (1 + 0.001 * (x[0] ** 2 + x[1] ** 2)) ** 2
return 0.5 + num / den
# class SchmidtVetters(Benchmark):
#
# r"""
# Schmidt-Vetters objective function.
#
# This class defines the Schmidt-Vetters global optimization problem. This
# is a multimodal minimization problem defined as follows:
#
# .. math::
#
# f_{\text{SchmidtVetters}}(x) = \frac{1}{1 + (x_1 - x_2)^2}
# + \sin \left(\frac{\pi x_2 + x_3}{2} \right)
# + e^{\left(\frac{x_1+x_2}{x_2} - 2\right)^2}
#
# with :math:`x_i \in [0, 10]` for :math:`i = 1, 2, 3`.
#
# *Global optimum*: :math:`f(x) = 2.99643266` for
# :math:`x = [0.79876108, 0.79962581, 0.79848824]`
#
# TODO equation seems right, but [7.07083412 , 10., 3.14159293] produces a
# lower minimum, 0.193973
# """
#
# def __init__(self, dimensions=3):
# Benchmark.__init__(self, dimensions)
# self._bounds = zip([0.0] * self.N, [10.0] * self.N)
#
# self.global_optimum = [[0.79876108, 0.79962581, 0.79848824]]
# self.fglob = 2.99643266
#
# def fun(self, x, *args):
# self.nfev += 1
#
# return (1 / (1 + (x[0] - x[1]) ** 2) + sin((pi * x[1] + x[2]) / 2)
# + exp(((x[0] + x[1]) / x[1] - 2) ** 2))
class Schwefel01(Benchmark):
r"""
Schwefel 1 objective function.
This class defines the Schwefel 1 [1]_ global optimization problem. This is a
unimodal minimization problem defined as follows:
.. math::
f_{\text{Schwefel01}}(x) = \left(\sum_{i=1}^n x_i^2 \right)^{\alpha}
Where, in this exercise, :math:`\alpha = \sqrt{\pi}`.
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-100, 100]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0`
for :math:`i = 1, ..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-100.0] * self.N,
[100.0] * self.N))
self.custom_bounds = ([-4.0, 4.0], [-4.0, 4.0])
self.global_optimum = [[0.0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
alpha = sqrt(pi)
return (sum(x ** 2.0)) ** alpha
class Schwefel02(Benchmark):
r"""
Schwefel 2 objective function.
This class defines the Schwefel 2 [1]_ global optimization problem. This
is a unimodal minimization problem defined as follows:
.. math::
f_{\text{Schwefel02}}(x) = \sum_{i=1}^n \left(\sum_{j=1}^i
x_i \right)^2
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-100, 100]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-100.0] * self.N,
[100.0] * self.N))
self.custom_bounds = ([-4.0, 4.0], [-4.0, 4.0])
self.global_optimum = [[0.0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
mat = repeat(atleast_2d(x), self.N, axis=0)
inner = sum(tril(mat), axis=1)
return sum(inner ** 2)
class Schwefel04(Benchmark):
r"""
Schwefel 4 objective function.
This class defines the Schwefel 4 [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Schwefel04}}(x) = \sum_{i=1}^n \left[(x_i - 1)^2
+ (x_1 - x_i^2)^2 \right]
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [0, 10]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for:math:`x_i = 1` for
:math:`i = 1, ..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([0.0] * self.N, [10.0] * self.N))
self.custom_bounds = ([0.0, 2.0], [0.0, 2.0])
self.global_optimum = [[1.0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
return sum((x - 1.0) ** 2.0 + (x[0] - x ** 2.0) ** 2.0)
class Schwefel06(Benchmark):
r"""
Schwefel 6 objective function.
This class defines the Schwefel 6 [1]_ global optimization problem. This
is a unimodal minimization problem defined as follows:
.. math::
f_{\text{Schwefel06}}(x) = \max(\lvert x_1 + 2x_2 - 7 \rvert,
\lvert 2x_1 + x_2 - 5 \rvert)
with :math:`x_i \in [-100, 100]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [1, 3]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-100.0] * self.N,
[100.0] * self.N))
self.custom_bounds = ([-10.0, 10.0], [-10.0, 10.0])
self.global_optimum = [[1.0, 3.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return max(abs(x[0] + 2 * x[1] - 7), abs(2 * x[0] + x[1] - 5))
class Schwefel20(Benchmark):
r"""
Schwefel 20 objective function.
This class defines the Schwefel 20 [1]_ global optimization problem. This
is a unimodal minimization problem defined as follows:
.. math::
f_{\text{Schwefel20}}(x) = \sum_{i=1}^n \lvert x_i \rvert
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-100, 100]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO: Jamil #122 is incorrect. There shouldn't be a leading minus sign.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-100.0] * self.N,
[100.0] * self.N))
self.global_optimum = [[0.0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
return sum(abs(x))
class Schwefel21(Benchmark):
r"""
Schwefel 21 objective function.
This class defines the Schwefel 21 [1]_ global optimization problem. This
is a unimodal minimization problem defined as follows:
.. math::
f_{\text{Schwefel21}}(x) = \smash{\displaystyle\max_{1 \leq i \leq n}}
\lvert x_i \rvert
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-100, 100]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-100.0] * self.N,
[100.0] * self.N))
self.global_optimum = [[0.0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
return max(abs(x))
class Schwefel22(Benchmark):
r"""
Schwefel 22 objective function.
This class defines the Schwefel 22 [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Schwefel22}}(x) = \sum_{i=1}^n \lvert x_i \rvert
+ \prod_{i=1}^n \lvert x_i \rvert
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-100, 100]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-100.0] * self.N,
[100.0] * self.N))
self.custom_bounds = ([-10.0, 10.0], [-10.0, 10.0])
self.global_optimum = [[0.0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
return sum(abs(x)) + prod(abs(x))
class Schwefel26(Benchmark):
r"""
Schwefel 26 objective function.
This class defines the Schwefel 26 [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Schwefel26}}(x) = 418.9829n - \sum_{i=1}^n x_i
\sin(\sqrt{|x_i|})
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-500, 500]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 420.968746` for
:math:`i = 1, ..., n`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-500.0] * self.N,
[500.0] * self.N))
self.global_optimum = [[420.968746 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
return 418.982887 * self.N - sum(x * sin(sqrt(abs(x))))
class Schwefel36(Benchmark):
r"""
Schwefel 36 objective function.
This class defines the Schwefel 36 [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Schwefel36}}(x) = -x_1x_2(72 - 2x_1 - 2x_2)
with :math:`x_i \in [0, 500]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = -3456` for :math:`x = [12, 12]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([0.0] * self.N, [500.0] * self.N))
self.custom_bounds = ([0.0, 20.0], [0.0, 20.0])
self.global_optimum = [[12.0, 12.0]]
self.fglob = -3456.0
def fun(self, x, *args):
self.nfev += 1
return -x[0] * x[1] * (72 - 2 * x[0] - 2 * x[1])
class Shekel05(Benchmark):
r"""
Shekel 5 objective function.
This class defines the Shekel 5 [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Shekel05}}(x) = \sum_{i=1}^{m} \frac{1}{c_{i}
+ \sum_{j=1}^{n} (x_{j} - a_{ij})^2 }`
Where, in this exercise:
.. math::
a =
\begin{bmatrix}
4.0 & 4.0 & 4.0 & 4.0 \\ 1.0 & 1.0 & 1.0 & 1.0 \\
8.0 & 8.0 & 8.0 & 8.0 \\ 6.0 & 6.0 & 6.0 & 6.0 \\
3.0 & 7.0 & 3.0 & 7.0
\end{bmatrix}
.. math::
c = \begin{bmatrix} 0.1 \\ 0.2 \\ 0.2 \\ 0.4 \\ 0.4 \end{bmatrix}
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [0, 10]` for :math:`i = 1, ..., 4`.
*Global optimum*: :math:`f(x) = -10.15319585` for :math:`x_i = 4` for
:math:`i = 1, ..., 4`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO: this is a different global minimum compared to Jamil#130. The
minimum is found by doing lots of optimisations. The solution is supposed
to be at [4] * N, is there any numerical overflow?
"""
def __init__(self, dimensions=4):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([0.0] * self.N, [10.0] * self.N))
self.global_optimum = [[4.00003715092,
4.00013327435,
4.00003714871,
4.0001332742]]
self.fglob = -10.1531996791
self.A = asarray([[4.0, 4.0, 4.0, 4.0],
[1.0, 1.0, 1.0, 1.0],
[8.0, 8.0, 8.0, 8.0],
[6.0, 6.0, 6.0, 6.0],
[3.0, 7.0, 3.0, 7.0]])
self.C = asarray([0.1, 0.2, 0.2, 0.4, 0.4])
def fun(self, x, *args):
self.nfev += 1
return -sum(1 / (sum((x - self.A) ** 2, axis=1) + self.C))
class Shekel07(Benchmark):
r"""
Shekel 7 objective function.
This class defines the Shekel 7 [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Shekel07}}(x) = \sum_{i=1}^{m} \frac{1}{c_{i}
+ \sum_{j=1}^{n} (x_{j} - a_{ij})^2 }`
Where, in this exercise:
.. math::
a =
\begin{bmatrix}
4.0 & 4.0 & 4.0 & 4.0 \\ 1.0 & 1.0 & 1.0 & 1.0 \\
8.0 & 8.0 & 8.0 & 8.0 \\ 6.0 & 6.0 & 6.0 & 6.0 \\
3.0 & 7.0 & 3.0 & 7.0 \\ 2.0 & 9.0 & 2.0 & 9.0 \\
5.0 & 5.0 & 3.0 & 3.0
\end{bmatrix}
.. math::
c =
\begin{bmatrix}
0.1 \\ 0.2 \\ 0.2 \\ 0.4 \\ 0.4 \\ 0.6 \\ 0.3
\end{bmatrix}
with :math:`x_i \in [0, 10]` for :math:`i = 1, ..., 4`.
*Global optimum*: :math:`f(x) = -10.4028188` for :math:`x_i = 4` for
:math:`i = 1, ..., 4`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO: this is a different global minimum compared to Jamil#131. This
minimum is obtained after running lots of minimisations! Is there any
numerical overflow that causes the minimum solution to not be [4] * N?
"""
def __init__(self, dimensions=4):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([0.0] * self.N, [10.0] * self.N))
self.global_optimum = [[4.00057291078,
4.0006893679,
3.99948971076,
3.99960615785]]
self.fglob = -10.4029405668
self.A = asarray([[4.0, 4.0, 4.0, 4.0],
[1.0, 1.0, 1.0, 1.0],
[8.0, 8.0, 8.0, 8.0],
[6.0, 6.0, 6.0, 6.0],
[3.0, 7.0, 3.0, 7.0],
[2.0, 9.0, 2.0, 9.0],
[5.0, 5.0, 3.0, 3.0]])
self.C = asarray([0.1, 0.2, 0.2, 0.4, 0.4, 0.6, 0.3])
def fun(self, x, *args):
self.nfev += 1
return -sum(1 / (sum((x - self.A) ** 2, axis=1) + self.C))
class Shekel10(Benchmark):
r"""
Shekel 10 objective function.
This class defines the Shekel 10 [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Shekel10}}(x) = \sum_{i=1}^{m} \frac{1}{c_{i}
+ \sum_{j=1}^{n} (x_{j} - a_{ij})^2 }`
Where, in this exercise:
.. math::
a =
\begin{bmatrix}
4.0 & 4.0 & 4.0 & 4.0 \\ 1.0 & 1.0 & 1.0 & 1.0 \\
8.0 & 8.0 & 8.0 & 8.0 \\ 6.0 & 6.0 & 6.0 & 6.0 \\
3.0 & 7.0 & 3.0 & 7.0 \\ 2.0 & 9.0 & 2.0 & 9.0 \\
5.0 & 5.0 & 3.0 & 3.0 \\ 8.0 & 1.0 & 8.0 & 1.0 \\
6.0 & 2.0 & 6.0 & 2.0 \\ 7.0 & 3.6 & 7.0 & 3.6
\end{bmatrix}
.. math::
c =
\begin{bmatrix}
0.1 \\ 0.2 \\ 0.2 \\ 0.4 \\ 0.4 \\ 0.6 \\ 0.3 \\ 0.7 \\ 0.5 \\ 0.5
\end{bmatrix}
with :math:`x_i \in [0, 10]` for :math:`i = 1, ..., 4`.
*Global optimum*: :math:`f(x) = -10.5362837` for :math:`x_i = 4` for
:math:`i = 1, ..., 4`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO Found a lower global minimum than Jamil#132... Is this numerical overflow?
"""
def __init__(self, dimensions=4):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([0.0] * self.N, [10.0] * self.N))
self.global_optimum = [[4.0007465377266271,
4.0005929234621407,
3.9996633941680968,
3.9995098017834123]]
self.fglob = -10.536409816692023
self.A = asarray([[4.0, 4.0, 4.0, 4.0],
[1.0, 1.0, 1.0, 1.0],
[8.0, 8.0, 8.0, 8.0],
[6.0, 6.0, 6.0, 6.0],
[3.0, 7.0, 3.0, 7.0],
[2.0, 9.0, 2.0, 9.0],
[5.0, 5.0, 3.0, 3.0],
[8.0, 1.0, 8.0, 1.0],
[6.0, 2.0, 6.0, 2.0],
[7.0, 3.6, 7.0, 3.6]])
self.C = asarray([0.1, 0.2, 0.2, 0.4, 0.4, 0.6, 0.3, 0.7, 0.5, 0.5])
def fun(self, x, *args):
self.nfev += 1
return -sum(1 / (sum((x - self.A) ** 2, axis=1) + self.C))
class Shubert01(Benchmark):
r"""
Shubert 1 objective function.
This class defines the Shubert 1 [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Shubert01}}(x) = \prod_{i=1}^{n}\left(\sum_{j=1}^{5}
cos(j+1)x_i+j \right )
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-10, 10]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = -186.7309` for
:math:`x = [-7.0835, 4.8580]` (and many others).
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
TODO: Jamil#133 is missing a prefactor of j before the cos function.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [[-7.0835, 4.8580]]
self.fglob = -186.7309
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
j = atleast_2d(arange(1, 6)).T
y = j * cos((j + 1) * x + j)
return prod(sum(y, axis=0))
class Shubert03(Benchmark):
r"""
Shubert 3 objective function.
This class defines the Shubert 3 [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Shubert03}}(x) = \sum_{i=1}^n \sum_{j=1}^5 -j
\sin((j+1)x_i + j)
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-10, 10]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = -24.062499` for
:math:`x = [5.791794, 5.791794]` (and many others).
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
TODO: Jamil#134 has wrong global minimum value, and is missing a minus sign
before the whole thing.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [[5.791794, 5.791794]]
self.fglob = -24.062499
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
j = atleast_2d(arange(1, 6)).T
y = -j * sin((j + 1) * x + j)
return sum(sum(y))
class Shubert04(Benchmark):
r"""
Shubert 4 objective function.
This class defines the Shubert 4 [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Shubert04}}(x) = \left(\sum_{i=1}^n \sum_{j=1}^5 -j
\cos ((j+1)x_i + j)\right)
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-10, 10]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = -29.016015` for
:math:`x = [-0.80032121, -7.08350592]` (and many others).
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
TODO: Jamil#135 has wrong global minimum value, and is missing a minus sign
before the whole thing.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [[-0.80032121, -7.08350592]]
self.fglob = -29.016015
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
j = atleast_2d(arange(1, 6)).T
y = -j * cos((j + 1) * x + j)
return sum(sum(y))
class SineEnvelope(Benchmark):
r"""
SineEnvelope objective function.
This class defines the SineEnvelope [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{SineEnvelope}}(x) = -\sum_{i=1}^{n-1}\left[\frac{\sin^2(
\sqrt{x_{i+1}^2+x_{i}^2}-0.5)}
{(0.001(x_{i+1}^2+x_{i}^2)+1)^2}
+ 0.5\right]
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-100, 100]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
TODO: Jamil #136
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-100.0] * self.N,
[100.0] * self.N))
self.custom_bounds = [(-20, 20), (-20, 20)]
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
X0 = x[:-1]
X1 = x[1:]
X02X12 = X0 ** 2 + X1 ** 2
return sum((sin(sqrt(X02X12)) ** 2 - 0.5) / (1 + 0.001 * X02X12) ** 2
+ 0.5)
class SixHumpCamel(Benchmark):
r"""
Six Hump Camel objective function.
This class defines the Six Hump Camel [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{SixHumpCamel}}(x) = 4x_1^2+x_1x_2-4x_2^2-2.1x_1^4+
4x_2^4+\frac{1}{3}x_1^6
with :math:`x_i \in [-5, 5]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = -1.031628453489877` for
:math:`x = [0.08984201368301331 , -0.7126564032704135]` or
:math:`x = [-0.08984201368301331, 0.7126564032704135]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-5.0] * self.N, [5.0] * self.N))
self.custom_bounds = [(-2, 2), (-1.5, 1.5)]
self.global_optimum = [(0.08984201368301331, -0.7126564032704135),
(-0.08984201368301331, 0.7126564032704135)]
self.fglob = -1.031628
def fun(self, x, *args):
self.nfev += 1
return ((4 - 2.1 * x[0] ** 2 + x[0] ** 4 / 3) * x[0] ** 2 + x[0] * x[1]
+ (4 * x[1] ** 2 - 4) * x[1] ** 2)
class Sodp(Benchmark):
r"""
Sodp objective function.
This class defines the Sum Of Different Powers [1]_ global optimization
problem. This is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Sodp}}(x) = \sum_{i=1}^{n} \lvert{x_{i}}\rvert^{i + 1}
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-1, 1]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-1.0] * self.N, [1.0] * self.N))
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
i = arange(1, self.N + 1)
return sum(abs(x) ** (i + 1))
class Sphere(Benchmark):
r"""
Sphere objective function.
This class defines the Sphere [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Sphere}}(x) = \sum_{i=1}^{n} x_i^2
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-1, 1]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO Jamil has stupid limits
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-5.12] * self.N, [5.12] * self.N))
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
return sum(x ** 2)
class Step(Benchmark):
r"""
Step objective function.
This class defines the Step [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Step}}(x) = \sum_{i=1}^{n} \left ( \lfloor x_i
+ 0.5 \rfloor \right )^2
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-100, 100]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0.5` for
:math:`i = 1, ..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-100.0] * self.N,
[100.0] * self.N))
self.custom_bounds = ([-5, 5], [-5, 5])
self.global_optimum = [[0. for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
return sum(floor(abs(x)))
class Step2(Benchmark):
r"""
Step objective function.
This class defines the Step 2 [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Step}}(x) = \sum_{i=1}^{n} \left ( \lfloor x_i
+ 0.5 \rfloor \right )^2
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-100, 100]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0.5` for
:math:`i = 1, ..., n`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-100.0] * self.N,
[100.0] * self.N))
self.custom_bounds = ([-5, 5], [-5, 5])
self.global_optimum = [[0.5 for _ in range(self.N)]]
self.fglob = 0.5
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
return sum((floor(x) + 0.5) ** 2.0)
class Stochastic(Benchmark):
r"""
Stochastic objective function.
This class defines the Stochastic [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Stochastic}}(x) = \sum_{i=1}^{n} \epsilon_i
\left | {x_i - \frac{1}{i}} \right |
The variable :math:`\epsilon_i, (i=1,...,n)` is a random variable uniformly
distributed in :math:`[0, 1]`.
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-5, 5]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = [1/n]` for
:math:`i = 1, ..., n`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-5.0] * self.N, [5.0] * self.N))
self.global_optimum = [[1.0 / _ for _ in range(1, self.N + 1)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
rnd = uniform(0.0, 1.0, size=(self.N, ))
i = arange(1, self.N + 1)
return sum(rnd * abs(x - 1.0 / i))
class StretchedV(Benchmark):
r"""
StretchedV objective function.
This class defines the Stretched V [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{StretchedV}}(x) = \sum_{i=1}^{n-1} t^{1/4}
[\sin (50t^{0.1}) + 1]^2
Where, in this exercise:
.. math::
t = x_{i+1}^2 + x_i^2
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-10, 10]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [0., 0.]` when
:math:`n = 2`.
.. [1] Adorio, E. MVF - "Multivariate Test Functions Library in C for
Unconstrained Global Optimization", 2005
TODO All the sources disagree on the equation, in some the 1 is in the
brackets, in others it is outside. In Jamil#142 it's not even 1. Here
we go with the Adorio option.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10] * self.N, [10] * self.N))
self.global_optimum = [[0, 0]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
t = x[1:] ** 2 + x[: -1] ** 2
return sum(t ** 0.25 * (sin(50.0 * t ** 0.1 + 1) ** 2))
class StyblinskiTang(Benchmark):
r"""
StyblinskiTang objective function.
This class defines the Styblinski-Tang [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{StyblinskiTang}}(x) = \sum_{i=1}^{n} \left(x_i^4
- 16x_i^2 + 5x_i \right)
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-5, 5]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = -39.16616570377142n` for
:math:`x_i = -2.903534018185960` for :math:`i = 1, ..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-5.0] * self.N, [5.0] * self.N))
self.global_optimum = [[-2.903534018185960 for _ in range(self.N)]]
self.fglob = -39.16616570377142 * self.N
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
return sum(x ** 4 - 16 * x ** 2 + 5 * x) / 2
| 40,893
| 28.915143
| 83
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/go_benchmark_functions/go_funcs_N.py
|
# -*- coding: utf-8 -*-
from numpy import cos, sqrt, sin, abs
from .go_benchmark import Benchmark
class NeedleEye(Benchmark):
r"""
NeedleEye objective function.
This class defines the Needle-Eye [1]_ global optimization problem. This is a
a multimodal minimization problem defined as follows:
.. math::
f_{\text{NeedleEye}}(x) =
\begin{cases}
1 & \textrm{if }\hspace{5pt} \lvert x_i \rvert < eye \hspace{5pt}
\forall i \\
\sum_{i=1}^n (100 + \lvert x_i \rvert) & \textrm{if } \hspace{5pt}
\lvert x_i \rvert > eye \\
0 & \textrm{otherwise}\\
\end{cases}
Where, in this exercise, :math:`eye = 0.0001`.
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-10, 10]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 1` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [[0.0 for _ in range(self.N)]]
self.fglob = 1.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
f = fp = 0.0
eye = 0.0001
for val in x:
if abs(val) >= eye:
fp = 1.0
f += 100.0 + abs(val)
else:
f += 1.0
if fp < 1e-6:
f = f / self.N
return f
class NewFunction01(Benchmark):
r"""
NewFunction01 objective function.
This class defines the NewFunction01 [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{NewFunction01}}(x) = \left | {\cos\left(\sqrt{\left|{x_{1}^{2}
+ x_{2}}\right|}\right)} \right |^{0.5} + (x_{1} + x_{2})/100
with :math:`x_i \in [-10, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = -0.18459899925` for
:math:`x = [-8.46669057, -9.99982177]`
.. [1] Mishra, S. Global Optimization by Differential Evolution and
Particle Swarm Methods: Evaluation on Some Benchmark Functions.
Munich Personal RePEc Archive, 2006, 1005
TODO line 355
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [[-8.46668984648, -9.99980944557]]
self.fglob = -0.184648852475
def fun(self, x, *args):
self.nfev += 1
return ((abs(cos(sqrt(abs(x[0] ** 2 + x[1]))))) ** 0.5
+ 0.01 * (x[0] + x[1]))
class NewFunction02(Benchmark):
r"""
NewFunction02 objective function.
This class defines the NewFunction02 global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{NewFunction02}}(x) = \left | {\sin\left(\sqrt{\lvert{x_{1}^{2}
+ x_{2}}\rvert}\right)} \right |^{0.5} + (x_{1} + x_{2})/100
with :math:`x_i \in [-10, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = -0.19933159253` for
:math:`x = [-9.94103375, -9.99771235]`
.. [1] Mishra, S. Global Optimization by Differential Evolution and
Particle Swarm Methods: Evaluation on Some Benchmark Functions.
Munich Personal RePEc Archive, 2006, 1005
TODO Line 368
TODO WARNING, minimum value is estimated from running many optimisations and
choosing the best.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [[-9.94114736324, -9.99997128772]]
self.fglob = -0.199409030092
def fun(self, x, *args):
self.nfev += 1
return ((abs(sin(sqrt(abs(x[0] ** 2 + x[1]))))) ** 0.5
+ 0.01 * (x[0] + x[1]))
#Newfunction 3 from Gavana is entered as Mishra05.
| 4,127
| 26.52
| 84
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/go_benchmark_functions/go_funcs_X.py
|
# -*- coding: utf-8 -*-
import numpy as np
from numpy import abs, sum, sin, cos, pi, exp, arange, prod, sqrt
from .go_benchmark import Benchmark
class XinSheYang01(Benchmark):
r"""
Xin-She Yang 1 objective function.
This class defines the Xin-She Yang 1 [1]_ global optimization problem.
This is a multimodal minimization problem defined as follows:
.. math::
f_{\text{XinSheYang01}}(x) = \sum_{i=1}^{n} \epsilon_i \lvert x_i
\rvert^i
The variable :math:`\epsilon_i, (i = 1, ..., n)` is a random variable
uniformly distributed in :math:`[0, 1]`.
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-5, 5]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-5.0] * self.N, [5.0] * self.N))
self.custom_bounds = ([-2, 2], [-2, 2])
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
i = arange(1.0, self.N + 1.0)
return sum(np.random.random(self.N) * (abs(x) ** i))
class XinSheYang02(Benchmark):
r"""
Xin-She Yang 2 objective function.
This class defines the Xin-She Yang 2 [1]_ global optimization problem.
This is a multimodal minimization problem defined as follows:
.. math::
f_{\text{XinSheYang02}}(\x) = \frac{\sum_{i=1}^{n} \lvert{x_{i}}\rvert}
{e^{\sum_{i=1}^{n} \sin\left(x_{i}^{2.0}
\right)}}
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-2\pi, 2\pi]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-2 * pi] * self.N,
[2 * pi] * self.N))
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
return sum(abs(x)) * exp(-sum(sin(x ** 2.0)))
class XinSheYang03(Benchmark):
r"""
Xin-She Yang 3 objective function.
This class defines the Xin-She Yang 3 [1]_ global optimization problem.
This is a multimodal minimization problem defined as follows:
.. math::
f_{\text{XinSheYang03}}(x) = e^{-\sum_{i=1}^{n} (x_i/\beta)^{2m}}
- 2e^{-\sum_{i=1}^{n} x_i^2}
\prod_{i=1}^{n} \cos^2(x_i)
Where, in this exercise, :math:`\beta = 15` and :math:`m = 3`.
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-20, 20]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = -1` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-20.0] * self.N, [20.0] * self.N))
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = -1.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
beta, m = 15.0, 5.0
u = sum((x / beta) ** (2 * m))
v = sum(x ** 2)
w = prod(cos(x) ** 2)
return exp(-u) - 2 * exp(-v) * w
class XinSheYang04(Benchmark):
r"""
Xin-She Yang 4 objective function.
This class defines the Xin-She Yang 4 [1]_ global optimization problem.
This is a multimodal minimization problem defined as follows:
.. math::
f_{\text{XinSheYang04}}(x) = \left[ \sum_{i=1}^{n} \sin^2(x_i)
- e^{-\sum_{i=1}^{n} x_i^2} \right ]
e^{-\sum_{i=1}^{n} \sin^2 \sqrt{ \lvert
x_i \rvert }}
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-10, 10]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = -1` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = -1.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
u = sum(sin(x) ** 2)
v = sum(x ** 2)
w = sum(sin(sqrt(abs(x))) ** 2)
return (u - exp(-v)) * exp(-w)
class Xor(Benchmark):
r"""
Xor objective function.
This class defines the Xor [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Xor}}(x) = \left[ 1 + \exp \left( - \frac{x_7}{1 +
\exp(-x_1 - x_2 - x_5)} - \frac{x_8}{1 + \exp(-x_3 - x_4 - x_6)}
- x_9 \right ) \right ]^{-2} \\
+ \left [ 1 + \exp \left( -\frac{x_7}{1 + \exp(-x_5)}
- \frac{x_8}{1 + \exp(-x_6)} - x_9 \right ) \right] ^{-2} \\
+ \left [1 - \left\{1 + \exp \left(-\frac{x_7}{1 + \exp(-x_1 - x_5)}
- \frac{x_8}{1 + \exp(-x_3 - x_6)} - x_9 \right ) \right\}^{-1}
\right ]^2 \\
+ \left [1 - \left\{1 + \exp \left(-\frac{x_7}{1 + \exp(-x_2 - x_5)}
- \frac{x_8}{1 + \exp(-x_4 - x_6)} - x_9 \right ) \right\}^{-1}
\right ]^2
with :math:`x_i \in [-1, 1]` for :math:`i=1,...,9`.
*Global optimum*: :math:`f(x) = 0.9597588` for
:math:`\x = [1, -1, 1, -1, -1, 1, 1, -1, 0.421134]`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
def __init__(self, dimensions=9):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-1.0] * self.N, [1.0] * self.N))
self.global_optimum = [[1.0, -1.0, 1.0,
-1.0, -1.0, 1.0, 1.0, -1.0, 0.421134]]
self.fglob = 0.9597588
def fun(self, x, *args):
self.nfev += 1
F11 = x[6] / (1.0 + exp(-x[0] - x[1] - x[4]))
F12 = x[7] / (1.0 + exp(-x[2] - x[3] - x[5]))
F1 = (1.0 + exp(-F11 - F12 - x[8])) ** (-2)
F21 = x[6] / (1.0 + exp(-x[4]))
F22 = x[7] / (1.0 + exp(-x[5]))
F2 = (1.0 + exp(-F21 - F22 - x[8])) ** (-2)
F31 = x[6] / (1.0 + exp(-x[0] - x[4]))
F32 = x[7] / (1.0 + exp(-x[2] - x[5]))
F3 = (1.0 - (1.0 + exp(-F31 - F32 - x[8])) ** (-1)) ** 2
F41 = x[6] / (1.0 + exp(-x[1] - x[4]))
F42 = x[7] / (1.0 + exp(-x[3] - x[5]))
F4 = (1.0 - (1.0 + exp(-F41 - F42 - x[8])) ** (-1)) ** 2
return F1 + F2 + F3 + F4
| 7,769
| 31.107438
| 79
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/go_benchmark_functions/go_funcs_E.py
|
# -*- coding: utf-8 -*-
from numpy import abs, asarray, cos, exp, arange, pi, sin, sqrt, sum
from .go_benchmark import Benchmark
class Easom(Benchmark):
r"""
Easom objective function.
This class defines the Easom [1]_ global optimization problem. This is a
a multimodal minimization problem defined as follows:
.. math::
f_{\text{Easom}}({x}) = a - \frac{a}{e^{b \sqrt{\frac{\sum_{i=1}^{n}
x_i^{2}}{n}}}} + e - e^{\frac{\sum_{i=1}^{n} \cos\left(c x_i\right)}
{n}}
Where, in this exercise, :math:`a = 20, b = 0.2` and :math:`c = 2 \pi`.
Here, :math:`x_i \in [-100, 100]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [0, 0]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO Gavana website disagrees with Jamil, etc. Gavana equation in docstring is totally wrong.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-100.0] * self.N,
[100.0] * self.N))
self.global_optimum = [[pi for _ in range(self.N)]]
self.fglob = -1.0
def fun(self, x, *args):
self.nfev += 1
a = (x[0] - pi)**2 + (x[1] - pi)**2
return -cos(x[0]) * cos(x[1]) * exp(-a)
class Eckerle4(Benchmark):
r"""
Eckerle4 objective function.
Eckerle, K., NIST (1979).
Circular Interference Transmittance Study.
..[1] https://www.itl.nist.gov/div898/strd/nls/data/eckerle4.shtml
#TODO, this is a NIST regression standard dataset, docstring needs
improving
"""
def __init__(self, dimensions=3):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([0., 1., 10.],
[20, 20., 600.]))
self.global_optimum = [[1.5543827178, 4.0888321754, 4.5154121844e2]]
self.fglob = 1.4635887487E-03
self.a = asarray([1.5750000E-04, 1.6990000E-04, 2.3500000E-04,
3.1020000E-04, 4.9170000E-04, 8.7100000E-04,
1.7418000E-03, 4.6400000E-03, 6.5895000E-03,
9.7302000E-03, 1.4900200E-02, 2.3731000E-02,
4.0168300E-02, 7.1255900E-02, 1.2644580E-01,
2.0734130E-01, 2.9023660E-01, 3.4456230E-01,
3.6980490E-01, 3.6685340E-01, 3.1067270E-01,
2.0781540E-01, 1.1643540E-01, 6.1676400E-02,
3.3720000E-02, 1.9402300E-02, 1.1783100E-02,
7.4357000E-03, 2.2732000E-03, 8.8000000E-04,
4.5790000E-04, 2.3450000E-04, 1.5860000E-04,
1.1430000E-04, 7.1000000E-05])
self.b = asarray([4.0000000E+02, 4.0500000E+02, 4.1000000E+02,
4.1500000E+02, 4.2000000E+02, 4.2500000E+02,
4.3000000E+02, 4.3500000E+02, 4.3650000E+02,
4.3800000E+02, 4.3950000E+02, 4.4100000E+02,
4.4250000E+02, 4.4400000E+02, 4.4550000E+02,
4.4700000E+02, 4.4850000E+02, 4.5000000E+02,
4.5150000E+02, 4.5300000E+02, 4.5450000E+02,
4.5600000E+02, 4.5750000E+02, 4.5900000E+02,
4.6050000E+02, 4.6200000E+02, 4.6350000E+02,
4.6500000E+02, 4.7000000E+02, 4.7500000E+02,
4.8000000E+02, 4.8500000E+02, 4.9000000E+02,
4.9500000E+02, 5.0000000E+02])
def fun(self, x, *args):
self.nfev += 1
vec = x[0] / x[1] * exp(-(self.b - x[2]) ** 2 / (2 * x[1] ** 2))
return sum((self.a - vec) ** 2)
class EggCrate(Benchmark):
r"""
Egg Crate objective function.
This class defines the Egg Crate [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{EggCrate}}(x) = x_1^2 + x_2^2 + 25 \left[ \sin^2(x_1)
+ \sin^2(x_2) \right]
with :math:`x_i \in [-5, 5]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [0, 0]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-5.0] * self.N, [5.0] * self.N))
self.global_optimum = [[0.0, 0.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return x[0] ** 2 + x[1] ** 2 + 25 * (sin(x[0]) ** 2 + sin(x[1]) ** 2)
class EggHolder(Benchmark):
r"""
Egg Holder [1]_ objective function.
This class defines the Egg Holder global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{EggHolder}}=\sum_{1}^{n - 1}\left[-\left(x_{i + 1}
+ 47 \right ) \sin\sqrt{\lvert x_{i+1} + x_i/2 + 47 \rvert}
- x_i \sin\sqrt{\lvert x_i - (x_{i + 1} + 47)\rvert}\right ]
Here, :math:`n` represents the number of dimensions and :math:`x_i \in
[-512, 512]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = -959.640662711` for
:math:`{x} = [512, 404.2319]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO: Jamil is missing a minus sign on the fglob value
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-512.1] * self.N,
[512.0] * self.N))
self.global_optimum = [[512.0, 404.2319]]
self.fglob = -959.640662711
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
vec = (-(x[1:] + 47) * sin(sqrt(abs(x[1:] + x[:-1] / 2. + 47)))
- x[:-1] * sin(sqrt(abs(x[:-1] - (x[1:] + 47)))))
return sum(vec)
class ElAttarVidyasagarDutta(Benchmark):
r"""
El-Attar-Vidyasagar-Dutta [1]_ objective function.
This class defines the El-Attar-Vidyasagar-Dutta function global
optimization problem. This is a multimodal minimization problem defined as
follows:
.. math::
f_{\text{ElAttarVidyasagarDutta}}(x) = (x_1^2 + x_2 - 10)^2
+ (x_1 + x_2^2 - 7)^2 + (x_1^2 + x_2^3 - 1)^2
with :math:`x_i \in [-100, 100]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 1.712780354` for
:math:`x= [3.40918683, -2.17143304]`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-100.0] * self.N,
[100.0] * self.N))
self.custom_bounds = [(-4, 4), (-4, 4)]
self.global_optimum = [[3.40918683, -2.17143304]]
self.fglob = 1.712780354
def fun(self, x, *args):
self.nfev += 1
return ((x[0] ** 2 + x[1] - 10) ** 2 + (x[0] + x[1] ** 2 - 7) ** 2
+ (x[0] ** 2 + x[1] ** 3 - 1) ** 2)
class Exp2(Benchmark):
r"""
Exp2 objective function.
This class defines the Exp2 global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Exp2}}(x) = \sum_{i=0}^9 \left ( e^{-ix_1/10} - 5e^{-ix_2/10}
- e^{-i/10} + 5e^{-i} \right )^2
with :math:`x_i \in [0, 20]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [1, 10.]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([0.0] * self.N, [20.0] * self.N))
self.custom_bounds = [(0, 2), (0, 20)]
self.global_optimum = [[1.0, 10.]]
self.fglob = 0.
def fun(self, x, *args):
self.nfev += 1
i = arange(10.)
vec = (exp(-i * x[0] / 10.) - 5 * exp(-i * x[1] / 10.) - exp(-i / 10.)
+ 5 * exp(-i)) ** 2
return sum(vec)
class Exponential(Benchmark):
r"""
Exponential [1] objective function.
This class defines the Exponential global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Exponential}}(x) = -e^{-0.5 \sum_{i=1}^n x_i^2}
Here, :math:`n` represents the number of dimensions and :math:`x_i \in
[-1, 1]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x_i) = -1` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO Jamil are missing a minus sign on fglob
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-1.0] * self.N, [1.0] * self.N))
self.global_optimum = [[0.0 for _ in range(self.N)]]
self.fglob = -1.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
return -exp(-0.5 * sum(x ** 2.0))
| 9,797
| 31.12459
| 97
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/go_benchmark_functions/go_funcs_V.py
|
# -*- coding: utf-8 -*-
from numpy import sum, cos, sin, log
from .go_benchmark import Benchmark
class VenterSobiezcczanskiSobieski(Benchmark):
r"""
Venter Sobiezcczanski-Sobieski objective function.
This class defines the Venter Sobiezcczanski-Sobieski [1]_ global optimization
problem. This is a multimodal minimization problem defined as follows:
.. math::
f_{\text{VenterSobiezcczanskiSobieski}}(x) = x_1^2 - 100 \cos^2(x_1)
- 100 \cos(x_1^2/30)
+ x_2^2 - 100 \cos^2(x_2)
- 100 \cos(x_2^2/30)
with :math:`x_i \in [-50, 50]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = -400` for :math:`x = [0, 0]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO Jamil #160 hasn't written the equation very well. Normally a cos
squared term is written as cos^2(x) rather than cos(x)^2
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-50.0] * self.N, [50.0] * self.N))
self.custom_bounds = ([-10, 10], [-10, 10])
self.global_optimum = [[0.0 for _ in range(self.N)]]
self.fglob = -400
def fun(self, x, *args):
self.nfev += 1
u = x[0] ** 2.0 - 100.0 * cos(x[0]) ** 2.0
v = -100.0 * cos(x[0] ** 2.0 / 30.0) + x[1] ** 2.0
w = - 100.0 * cos(x[1]) ** 2.0 - 100.0 * cos(x[1] ** 2.0 / 30.0)
return u + v + w
class Vincent(Benchmark):
r"""
Vincent objective function.
This class defines the Vincent [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Vincent}}(x) = - \sum_{i=1}^{n} \sin(10 \log(x))
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [0.25, 10]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = -n` for :math:`x_i = 7.70628098`
for :math:`i = 1, ..., n`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([0.25] * self.N, [10.0] * self.N))
self.global_optimum = [[7.70628098 for _ in range(self.N)]]
self.fglob = -float(self.N)
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
return -sum(sin(10.0 * log(x)))
| 2,709
| 30.511628
| 82
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/go_benchmark_functions/__init__.py
|
# -*- coding: utf-8 -*-
"""
==============================================================================
`go_benchmark_functions` -- Problems for testing global optimization routines
==============================================================================
This module provides a comprehensive set of problems for benchmarking global
optimization routines, such as scipy.optimize.basinhopping, or
scipy.optimize.differential_evolution. The purpose is to see whether a given
optimization routine can find the global minimum, and how many function
evaluations it requires to do so.
The range of problems is extensive, with a range of difficulty. The problems are
multivariate, with N=2 to N=17 provided.
References
----------
.. [1] Momin Jamil and Xin-She Yang, A literature survey of benchmark
functions for global optimization problems, Int. Journal of Mathematical
Modelling and Numerical Optimisation, Vol. 4, No. 2, pp. 150--194 (2013).
https://arxiv.org/abs/1308.4008v1
(and references contained within)
.. [2] http://infinity77.net/global_optimization/
.. [3] S. K. Mishra, Global Optimization By Differential Evolution and
Particle Swarm Methods: Evaluation On Some Benchmark Functions, Munich
Research Papers in Economics
.. [4] E. P. Adorio, U. P. Dilman, MVF - Multivariate Test Function Library
in C for Unconstrained Global Optimization Methods, [Available Online]:
https://www.geocities.ws/eadorio/mvf.pdf
.. [5] S. K. Mishra, Some New Test Functions For Global Optimization And
Performance of Repulsive Particle Swarm Method, [Available Online]:
https://mpra.ub.uni-muenchen.de/2718/
.. [6] NIST StRD Nonlinear Regression Problems, retrieved on 1 Oct, 2014
https://www.itl.nist.gov/div898/strd/nls/nls_main.shtml
"""
"""
Copyright 2013 Andrea Gavana
Author: <andrea.gavana@gmail.com>
Modifications 2014 Andrew Nelson
<andyfaff@gmail.com>
"""
from .go_funcs_A import *
from .go_funcs_B import *
from .go_funcs_C import *
from .go_funcs_D import *
from .go_funcs_E import *
from .go_funcs_F import *
from .go_funcs_G import *
from .go_funcs_H import *
from .go_funcs_I import *
from .go_funcs_J import *
from .go_funcs_K import *
from .go_funcs_L import *
from .go_funcs_M import *
from .go_funcs_N import *
from .go_funcs_O import *
from .go_funcs_P import *
from .go_funcs_Q import *
from .go_funcs_R import *
from .go_funcs_S import *
from .go_funcs_T import *
from .go_funcs_U import *
from .go_funcs_V import *
from .go_funcs_W import *
from .go_funcs_X import *
from .go_funcs_Y import *
from .go_funcs_Z import *
__all__ = [s for s in dir() if not s.startswith('_')]
| 2,646
| 35.260274
| 80
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/go_benchmark_functions/go_funcs_U.py
|
# -*- coding: utf-8 -*-
from numpy import abs, sin, cos, pi, sqrt
from .go_benchmark import Benchmark
class Ursem01(Benchmark):
r"""
Ursem 1 objective function.
This class defines the Ursem 1 [1]_ global optimization problem. This is a
unimodal minimization problem defined as follows:
.. math::
f_{\text{Ursem01}}(x) = - \sin(2x_1 - 0.5 \pi) - 3 \cos(x_2) - 0.5 x_1
with :math:`x_1 \in [-2.5, 3]` and :math:`x_2 \in [-2, 2]`.
*Global optimum*: :math:`f(x) = -4.81681406371` for
:math:`x = [1.69714, 0.0]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = [(-2.5, 3.0), (-2.0, 2.0)]
self.global_optimum = [[1.69714, 0.0]]
self.fglob = -4.81681406371
def fun(self, x, *args):
self.nfev += 1
return -sin(2 * x[0] - 0.5 * pi) - 3.0 * cos(x[1]) - 0.5 * x[0]
class Ursem03(Benchmark):
r"""
Ursem 3 objective function.
This class defines the Ursem 3 [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Ursem03}}(x) = - \sin(2.2 \pi x_1 + 0.5 \pi)
\frac{2 - \lvert x_1 \rvert}{2}
\frac{3 - \lvert x_1 \rvert}{2}
- \sin(2.2 \pi x_2 + 0.5 \pi)
\frac{2 - \lvert x_2 \rvert}{2}
\frac{3 - \lvert x_2 \rvert}{2}
with :math:`x_1 \in [-2, 2]`, :math:`x_2 \in [-1.5, 1.5]`.
*Global optimum*: :math:`f(x) = -3` for :math:`x = [0, 0]`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
TODO Gavana and Jamil #157 disagree on the formulae here. Jamil squares the
x[1] term in the sine expression. Gavana doesn't. Go with Gavana here.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = [(-2, 2), (-1.5, 1.5)]
self.global_optimum = [[0.0 for _ in range(self.N)]]
self.fglob = -3.0
def fun(self, x, *args):
self.nfev += 1
u = -(sin(2.2 * pi * x[0] + 0.5 * pi)
* ((2.0 - abs(x[0])) / 2.0) * ((3.0 - abs(x[0])) / 2))
v = -(sin(2.2 * pi * x[1] + 0.5 * pi)
* ((2.0 - abs(x[1])) / 2) * ((3.0 - abs(x[1])) / 2))
return u + v
class Ursem04(Benchmark):
r"""
Ursem 4 objective function.
This class defines the Ursem 4 [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Ursem04}}(x) = -3 \sin(0.5 \pi x_1 + 0.5 \pi)
\frac{2 - \sqrt{x_1^2 + x_2 ^ 2}}{4}
with :math:`x_i \in [-2, 2]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = -1.5` for :math:`x = [0, 0]` for
:math:`i = 1, 2`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-2.0] * self.N, [2.0] * self.N))
self.global_optimum = [[0.0 for _ in range(self.N)]]
self.fglob = -1.5
def fun(self, x, *args):
self.nfev += 1
return (-3 * sin(0.5 * pi * x[0] + 0.5 * pi)
* (2 - sqrt(x[0] ** 2 + x[1] ** 2)) / 4)
class UrsemWaves(Benchmark):
r"""
Ursem Waves objective function.
This class defines the Ursem Waves [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{UrsemWaves}}(x) = -0.9x_1^2 + (x_2^2 - 4.5x_2^2)x_1x_2
+ 4.7 \cos \left[ 2x_1 - x_2^2(2 + x_1)
\right ] \sin(2.5 \pi x_1)
with :math:`x_1 \in [-0.9, 1.2]`, :math:`x_2 \in [-1.2, 1.2]`.
*Global optimum*: :math:`f(x) = -8.5536` for :math:`x = [1.2, 1.2]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO Jamil #159, has an x_2^2 - 4.5 x_2^2 in the brackets. Why wasn't this
rationalised to -5.5 x_2^2? This makes me wonder if the equation is listed
correctly?
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = [(-0.9, 1.2), (-1.2, 1.2)]
self.global_optimum = [[1.2 for _ in range(self.N)]]
self.fglob = -8.5536
def fun(self, x, *args):
self.nfev += 1
u = -0.9 * x[0] ** 2
v = (x[1] ** 2 - 4.5 * x[1] ** 2) * x[0] * x[1]
w = 4.7 * cos(3 * x[0] - x[1] ** 2 * (2 + x[0])) * sin(2.5 * pi * x[0])
return u + v + w
| 5,167
| 29.946108
| 79
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/go_benchmark_functions/go_funcs_D.py
|
# -*- coding: utf-8 -*-
import numpy as np
from numpy import abs, cos, exp, arange, pi, sin, sqrt, sum, zeros, tanh
from numpy.testing import assert_almost_equal
from .go_benchmark import Benchmark
class Damavandi(Benchmark):
r"""
Damavandi objective function.
This class defines the Damavandi [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Damavandi}}(x) = \left[ 1 - \lvert{\frac{
\sin[\pi (x_1 - 2)]\sin[\pi (x2 - 2)]}{\pi^2 (x_1 - 2)(x_2 - 2)}}
\rvert^5 \right] \left[2 + (x_1 - 7)^2 + 2(x_2 - 7)^2 \right]
Here, :math:`n` represents the number of dimensions and :math:`x_i \in
[0, 14]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0.0` for :math:`x_i = 2` for
:math:`i = 1, ..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, 2)
self._bounds = list(zip([0.0] * self.N, [14.0] * self.N))
self.global_optimum = [[2 for _ in range(self.N)]]
self.fglob = np.nan
def fun(self, x, *args):
self.nfev += 1
try:
num = sin(pi * (x[0] - 2.0)) * sin(pi * (x[1] - 2.0))
den = (pi ** 2) * (x[0] - 2.0) * (x[1] - 2.0)
factor1 = 1.0 - (abs(num / den)) ** 5.0
factor2 = 2 + (x[0] - 7.0) ** 2.0 + 2 * (x[1] - 7.0) ** 2.0
return factor1 * factor2
except ZeroDivisionError:
return np.nan
def success(self, x):
"""Is a candidate solution at the global minimum"""
val = self.fun(x)
if np.isnan(val):
return True
try:
assert_almost_equal(val, 0., 4)
return True
except AssertionError:
return False
return False
class Deb01(Benchmark):
r"""
Deb 1 objective function.
This class defines the Deb 1 [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Deb01}}(x) = - \frac{1}{N} \sum_{i=1}^n \sin^6(5 \pi x_i)
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-1, 1]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x_i) = 0.0`. The number of global minima is
:math:`5^n` that are evenly spaced in the function landscape, where
:math:`n` represents the dimension of the problem.
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.change_dimensionality = True
self._bounds = list(zip([-1.0] * self.N, [1.0] * self.N))
self.global_optimum = [[0.3, -0.3]]
self.fglob = -1.0
def fun(self, x, *args):
self.nfev += 1
return -(1.0 / self.N) * sum(sin(5 * pi * x) ** 6.0)
class Deb03(Benchmark):
r"""
Deb 3 objective function.
This class defines the Deb 3 [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Deb03}}(x) = - \frac{1}{N} \sum_{i=1}^n \sin^6 \left[ 5 \pi
\left ( x_i^{3/4} - 0.05 \right) \right ]
Here, :math:`n` represents the number of dimensions and :math:`x_i \in
[0, 1]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0.0`. The number of global minima is
:math:`5^n` that are evenly spaced in the function landscape, where
:math:`n` represents the dimension of the problem.
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.change_dimensionality = True
# lower limit changed to zero because of fractional power
self._bounds = list(zip([0.0] * self.N, [1.0] * self.N))
self.global_optimum = [[0.93388314, 0.68141781]]
self.fglob = -1.0
def fun(self, x, *args):
self.nfev += 1
return -(1.0 / self.N) * sum(sin(5 * pi * (x ** 0.75 - 0.05)) ** 6.0)
class Decanomial(Benchmark):
r"""
Decanomial objective function.
This class defines the Decanomial function global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Decanomial}}(x) = 0.001 \left(\lvert{x_{2}^{4} + 12 x_{2}^{3}
+ 54 x_{2}^{2} + 108 x_{2} + 81.0}\rvert + \lvert{x_{1}^{10}
- 20 x_{1}^{9} + 180 x_{1}^{8} - 960 x_{1}^{7} + 3360 x_{1}^{6}
- 8064 x_{1}^{5} + 13340 x_{1}^{4} - 15360 x_{1}^{3} + 11520 x_{1}^{2}
- 5120 x_{1} + 2624.0}\rvert\right)^{2}
with :math:`x_i \in [-10, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [2, -3]`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.custom_bounds = [(0, 2.5), (-2, -4)]
self.global_optimum = [[2.0, -3.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
val = x[1] ** 4 + 12 * x[1] ** 3 + 54 * x[1] ** 2 + 108 * x[1] + 81.0
val2 = x[0] ** 10. - 20 * x[0] ** 9 + 180 * x[0] ** 8 - 960 * x[0] ** 7
val2 += 3360 * x[0] ** 6 - 8064 * x[0] ** 5 + 13340 * x[0] ** 4
val2 += - 15360 * x[0] ** 3 + 11520 * x[0] ** 2 - 5120 * x[0] + 2624
return 0.001 * (abs(val) + abs(val2)) ** 2.
class Deceptive(Benchmark):
r"""
Deceptive objective function.
This class defines the Deceptive [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Deceptive}}(x) = - \left [\frac{1}{n}
\sum_{i=1}^{n} g_i(x_i) \right ]^{\beta}
Where :math:`\beta` is a fixed non-linearity factor; in this exercise,
:math:`\beta = 2`. The function :math:`g_i(x_i)` is given by:
.. math::
g_i(x_i) = \begin{cases}
- \frac{x}{\alpha_i} + \frac{4}{5} &
\textrm{if} \hspace{5pt} 0 \leq x_i \leq \frac{4}{5} \alpha_i \\
\frac{5x}{\alpha_i} -4 &
\textrm{if} \hspace{5pt} \frac{4}{5} \alpha_i \le x_i \leq \alpha_i \\
\frac{5(x - \alpha_i)}{\alpha_i-1} &
\textrm{if} \hspace{5pt} \alpha_i \le x_i \leq \frac{1 + 4\alpha_i}{5} \\
\frac{x - 1}{1 - \alpha_i} &
\textrm{if} \hspace{5pt} \frac{1 + 4\alpha_i}{5} \le x_i \leq 1
\end{cases}
Here, :math:`n` represents the number of dimensions and :math:`x_i \in
[0, 1]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = -1` for :math:`x_i = \alpha_i` for
:math:`i = 1, ..., n`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
TODO: this function was taken from the Gavana website. The following code
is based on his code. His code and the website don't match, the equations
are wrong.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([0.0] * self.N, [1.0] * self.N))
alpha = arange(1.0, self.N + 1.0) / (self.N + 1.0)
self.global_optimum = [alpha]
self.fglob = -1.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
alpha = arange(1.0, self.N + 1.0) / (self.N + 1.0)
beta = 2.0
g = zeros((self.N, ))
for i in range(self.N):
if x[i] <= 0.0:
g[i] = x[i]
elif x[i] < 0.8 * alpha[i]:
g[i] = -x[i] / alpha[i] + 0.8
elif x[i] < alpha[i]:
g[i] = 5.0 * x[i] / alpha[i] - 4.0
elif x[i] < (1.0 + 4 * alpha[i]) / 5.0:
g[i] = 5.0 * (x[i] - alpha[i]) / (alpha[i] - 1.0) + 1.0
elif x[i] <= 1.0:
g[i] = (x[i] - 1.0) / (1.0 - alpha[i]) + 4.0 / 5.0
else:
g[i] = x[i] - 1.0
return -((1.0 / self.N) * sum(g)) ** beta
class DeckkersAarts(Benchmark):
r"""
Deckkers-Aarts objective function.
This class defines the Deckkers-Aarts [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{DeckkersAarts}}(x) = 10^5x_1^2 + x_2^2 - (x_1^2 + x_2^2)^2
+ 10^{-5}(x_1^2 + x_2^2)^4
with :math:`x_i \in [-20, 20]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = -24776.518242168` for
:math:`x = [0, \pm 14.9451209]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO: jamil solution and global minimum are slightly wrong.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-20.0] * self.N, [20.0] * self.N))
self.custom_bounds = ([-1, 1], [14, 16])
self.global_optimum = [[0.0, 14.9451209]]
self.fglob = -24776.518342168
def fun(self, x, *args):
self.nfev += 1
return (1.e5 * x[0] ** 2 + x[1] ** 2 - (x[0] ** 2 + x[1] ** 2) ** 2
+ 1.e-5 * (x[0] ** 2 + x[1] ** 2) ** 4)
class DeflectedCorrugatedSpring(Benchmark):
r"""
DeflectedCorrugatedSpring objective function.
This class defines the Deflected Corrugated Spring [1]_ function global
optimization problem. This is a multimodal minimization problem defined as
follows:
.. math::
f_{\text{DeflectedCorrugatedSpring}}(x) = 0.1\sum_{i=1}^n \left[ (x_i -
\alpha)^2 - \cos \left( K \sqrt {\sum_{i=1}^n (x_i - \alpha)^2}
\right ) \right ]
Where, in this exercise, :math:`K = 5` and :math:`\alpha = 5`.
Here, :math:`n` represents the number of dimensions and :math:`x_i \in
[0, 2\alpha]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = -1` for :math:`x_i = \alpha` for
:math:`i = 1, ..., n`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
TODO: website has a different equation to the gavana codebase. The function
below is different to the equation above. Also, the global minimum is
wrong.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
alpha = 5.0
self._bounds = list(zip([0] * self.N, [2 * alpha] * self.N))
self.global_optimum = [[alpha for _ in range(self.N)]]
self.fglob = -1.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
K, alpha = 5.0, 5.0
return (-cos(K * sqrt(sum((x - alpha) ** 2)))
+ 0.1 * sum((x - alpha) ** 2))
class DeVilliersGlasser01(Benchmark):
r"""
DeVilliers-Glasser 1 objective function.
This class defines the DeVilliers-Glasser 1 [1]_ function global optimization
problem. This is a multimodal minimization problem defined as follows:
.. math::
f_{\text{DeVilliersGlasser01}}(x) = \sum_{i=1}^{24} \left[ x_1x_2^{t_i}
\sin(x_3t_i + x_4) - y_i \right ]^2
Where, in this exercise, :math:`t_i = 0.1(i - 1)` and
:math:`y_i = 60.137(1.371^{t_i}) \sin(3.112t_i + 1.761)`.
Here, :math:`n` represents the number of dimensions and :math:`x_i \in
[1, 100]` for :math:`i = 1, ..., 4`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for
:math:`x = [60.137, 1.371, 3.112, 1.761]`.
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=4):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([1.0] * self.N, [100.0] * self.N))
self.global_optimum = [[60.137, 1.371, 3.112, 1.761]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
t = 0.1 * arange(24)
y = 60.137 * (1.371 ** t) * sin(3.112 * t + 1.761)
return sum((x[0] * (x[1] ** t) * sin(x[2] * t + x[3]) - y) ** 2.0)
class DeVilliersGlasser02(Benchmark):
r"""
DeVilliers-Glasser 2 objective function.
This class defines the DeVilliers-Glasser 2 [1]_ function global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{DeVilliersGlasser01}}(x) = \sum_{i=1}^{24} \left[ x_1x_2^{t_i}
\tanh \left [x_3t_i + \sin(x_4t_i) \right] \cos(t_ie^{x_5}) -
y_i \right ]^2
Where, in this exercise, :math:`t_i = 0.1(i - 1)` and
:math:`y_i = 53.81(1.27^{t_i}) \tanh (3.012t_i + \sin(2.13t_i))
\cos(e^{0.507}t_i)`.
with :math:`x_i \in [1, 60]` for :math:`i = 1, ..., 5`.
*Global optimum*: :math:`f(x) = 0` for
:math:`x = [53.81, 1.27, 3.012, 2.13, 0.507]`.
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=5):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([1.0] * self.N, [60.0] * self.N))
self.global_optimum = [[53.81, 1.27, 3.012, 2.13, 0.507]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
t = 0.1 * arange(16)
y = (53.81 * 1.27 ** t * tanh(3.012 * t + sin(2.13 * t))
* cos(exp(0.507) * t))
return sum((x[0] * (x[1] ** t) * tanh(x[2] * t + sin(x[3] * t))
* cos(t * exp(x[4])) - y) ** 2.0)
class DixonPrice(Benchmark):
r"""
Dixon and Price objective function.
This class defines the Dixon and Price global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{DixonPrice}}(x) = (x_i - 1)^2
+ \sum_{i=2}^n i(2x_i^2 - x_{i-1})^2
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-10, 10]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x_i) = 0` for
:math:`x_i = 2^{- \frac{(2^i - 2)}{2^i}}` for :math:`i = 1, ..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO: Gavana code not correct. i array should start from 2.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.custom_bounds = [(-2, 3), (-2, 3)]
self.global_optimum = [[2.0 ** (-(2.0 ** i - 2.0) / 2.0 ** i)
for i in range(1, self.N + 1)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
i = arange(2, self.N + 1)
s = i * (2.0 * x[1:] ** 2.0 - x[:-1]) ** 2.0
return sum(s) + (x[0] - 1.0) ** 2.0
class Dolan(Benchmark):
r"""
Dolan objective function.
This class defines the Dolan [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Dolan}}(x) = \lvert (x_1 + 1.7 x_2)\sin(x_1) - 1.5 x_3
- 0.1 x_4\cos(x_5 + x_5 - x_1) + 0.2 x_5^2 - x_2 - 1 \rvert
with :math:`x_i \in [-100, 100]` for :math:`i = 1, ..., 5`.
*Global optimum*: :math:`f(x_i) = 10^{-5}` for
:math:`x = [8.39045925, 4.81424707, 7.34574133, 68.88246895, 3.85470806]`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
TODO Jamil equation is missing the absolute brackets around the entire
expression.
"""
def __init__(self, dimensions=5):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-100.0] * self.N,
[100.0] * self.N))
self.global_optimum = [[-74.10522498, 44.33511286, 6.21069214,
18.42772233, -16.5839403]]
self.fglob = 0
def fun(self, x, *args):
self.nfev += 1
return (abs((x[0] + 1.7 * x[1]) * sin(x[0]) - 1.5 * x[2]
- 0.1 * x[3] * cos(x[3] + x[4] - x[0]) + 0.2 * x[4] ** 2
- x[1] - 1))
class DropWave(Benchmark):
r"""
DropWave objective function.
This class defines the DropWave [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{DropWave}}(x) = - \frac{1 + \cos\left(12 \sqrt{\sum_{i=1}^{n}
x_i^{2}}\right)}{2 + 0.5 \sum_{i=1}^{n} x_i^{2}}
with :math:`x_i \in [-5.12, 5.12]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = -1` for :math:`x = [0, 0]`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-5.12] * self.N, [5.12] * self.N))
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = -1.0
def fun(self, x, *args):
self.nfev += 1
norm_x = sum(x ** 2)
return -(1 + cos(12 * sqrt(norm_x))) / (0.5 * norm_x + 2)
| 17,905
| 30.414035
| 95
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/go_benchmark_functions/go_funcs_Z.py
|
# -*- coding: utf-8 -*-
from numpy import abs, sum, sign, arange
from .go_benchmark import Benchmark
class Zacharov(Benchmark):
r"""
Zacharov objective function.
This class defines the Zacharov [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Zacharov}}(x) = \sum_{i=1}^{n} x_i^2 + \left ( \frac{1}{2}
\sum_{i=1}^{n} i x_i \right )^2
+ \left ( \frac{1}{2} \sum_{i=1}^{n} i x_i
\right )^4
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-5, 10]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-5.0] * self.N, [10.0] * self.N))
self.custom_bounds = ([-1, 1], [-1, 1])
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
u = sum(x ** 2)
v = sum(arange(1, self.N + 1) * x)
return u + (0.5 * v) ** 2 + (0.5 * v) ** 4
class ZeroSum(Benchmark):
r"""
ZeroSum objective function.
This class defines the ZeroSum [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{ZeroSum}}(x) = \begin{cases}
0 & \textrm{if} \sum_{i=1}^n x_i = 0 \\
1 + \left(10000 \left |\sum_{i=1}^n x_i\right|
\right)^{0.5} & \textrm{otherwise}
\end{cases}
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-10, 10]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` where :math:`\sum_{i=1}^n x_i = 0`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [[]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
if abs(sum(x)) < 3e-16:
return 0.0
return 1.0 + (10000.0 * abs(sum(x))) ** 0.5
class Zettl(Benchmark):
r"""
Zettl objective function.
This class defines the Zettl [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Zettl}}(x) = \frac{1}{4} x_{1} + \left(x_{1}^{2} - 2 x_{1}
+ x_{2}^{2}\right)^{2}
with :math:`x_i \in [-1, 5]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = -0.0037912` for :math:`x = [-0.029896, 0.0]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-5.0] * self.N, [10.0] * self.N))
self.global_optimum = [[-0.02989597760285287, 0.0]]
self.fglob = -0.003791237220468656
def fun(self, x, *args):
self.nfev += 1
return (x[0] ** 2 + x[1] ** 2 - 2 * x[0]) ** 2 + 0.25 * x[0]
class Zimmerman(Benchmark):
r"""
Zimmerman objective function.
This class defines the Zimmerman [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Zimmerman}}(x) = \max \left[Zh1(x), Zp(Zh2(x))
\textrm{sgn}(Zh2(x)), Zp(Zh3(x))
\textrm{sgn}(Zh3(x)),
Zp(-x_1)\textrm{sgn}(x_1),
Zp(-x_2)\textrm{sgn}(x_2) \right]
Where, in this exercise:
.. math::
\begin{cases}
Zh1(x) = 9 - x_1 - x_2 \\
Zh2(x) = (x_1 - 3)^2 + (x_2 - 2)^2 \\
Zh3(x) = x_1x_2 - 14 \\
Zp(t) = 100(1 + t)
\end{cases}
Where :math:`x` is a vector and :math:`t` is a scalar.
Here, :math:`x_i \in [0, 100]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [7, 2]`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
TODO implementation from Gavana
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([0.0] * self.N, [100.0] * self.N))
self.custom_bounds = ([0.0, 8.0], [0.0, 8.0])
self.global_optimum = [[7.0, 2.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
Zh1 = lambda x: 9.0 - x[0] - x[1]
Zh2 = lambda x: (x[0] - 3.0) ** 2.0 + (x[1] - 2.0) ** 2.0 - 16.0
Zh3 = lambda x: x[0] * x[1] - 14.0
Zp = lambda x: 100.0 * (1.0 + x)
return max(Zh1(x),
Zp(Zh2(x)) * sign(Zh2(x)),
Zp(Zh3(x)) * sign(Zh3(x)),
Zp(-x[0]) * sign(x[0]),
Zp(-x[1]) * sign(x[1]))
class Zirilli(Benchmark):
r"""
Zettl objective function.
This class defines the Zirilli [1]_ global optimization problem. This is a
unimodal minimization problem defined as follows:
.. math::
f_{\text{Zirilli}}(x) = 0.25x_1^4 - 0.5x_1^2 + 0.1x_1 + 0.5x_2^2
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-10, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = -0.3523` for :math:`x = [-1.0465, 0]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.custom_bounds = ([-2.0, 2.0], [-2.0, 2.0])
self.global_optimum = [[-1.0465, 0.0]]
self.fglob = -0.35238603
def fun(self, x, *args):
self.nfev += 1
return 0.25 * x[0] ** 4 - 0.5 * x[0] ** 2 + 0.1 * x[0] + 0.5 * x[1] ** 2
| 6,737
| 28.682819
| 80
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/go_benchmark_functions/go_funcs_P.py
|
# -*- coding: utf-8 -*-
from numpy import (abs, sum, sin, cos, sqrt, log, prod, where, pi, exp, arange,
floor, log10, atleast_2d, zeros)
from .go_benchmark import Benchmark
class Parsopoulos(Benchmark):
r"""
Parsopoulos objective function.
This class defines the Parsopoulos [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Parsopoulos}}(x) = \cos(x_1)^2 + \sin(x_2)^2
with :math:`x_i \in [-5, 5]` for :math:`i = 1, 2`.
*Global optimum*: This function has infinite number of global minima in R2,
at points :math:`\left(k\frac{\pi}{2}, \lambda \pi \right)`,
where :math:`k = \pm1, \pm3, ...` and :math:`\lambda = 0, \pm1, \pm2, ...`
In the given domain problem, function has 12 global minima all equal to
zero.
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-5.0] * self.N, [5.0] * self.N))
self.global_optimum = [[pi / 2.0, pi]]
self.fglob = 0
def fun(self, x, *args):
self.nfev += 1
return cos(x[0]) ** 2.0 + sin(x[1]) ** 2.0
class Pathological(Benchmark):
r"""
Pathological objective function.
This class defines the Pathological [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Pathological}}(x) = \sum_{i=1}^{n -1} \frac{\sin^{2}\left(
\sqrt{100 x_{i+1}^{2} + x_{i}^{2}}\right) -0.5}{0.001 \left(x_{i}^{2}
- 2x_{i}x_{i+1} + x_{i+1}^{2}\right)^{2} + 0.50}
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-100, 100]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0.` for :math:`x = [0, 0]` for
:math:`i = 1, 2`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-100.0] * self.N,
[100.0] * self.N))
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.
def fun(self, x, *args):
self.nfev += 1
vec = (0.5 + (sin(sqrt(100 * x[: -1] ** 2 + x[1:] ** 2)) ** 2 - 0.5) /
(1. + 0.001 * (x[: -1] ** 2 - 2 * x[: -1] * x[1:]
+ x[1:] ** 2) ** 2))
return sum(vec)
class Paviani(Benchmark):
r"""
Paviani objective function.
This class defines the Paviani [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Paviani}}(x) = \sum_{i=1}^{10} \left[\log^{2}\left(10
- x_i\right) + \log^{2}\left(x_i -2\right)\right]
- \left(\prod_{i=1}^{10} x_i^{10} \right)^{0.2}
with :math:`x_i \in [2.001, 9.999]` for :math:`i = 1, ... , 10`.
*Global optimum*: :math:`f(x_i) = -45.7784684040686` for
:math:`x_i = 9.350266` for :math:`i = 1, ..., 10`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO: think Gavana web/code definition is wrong because final product term
shouldn't raise x to power 10.
"""
def __init__(self, dimensions=10):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([2.001] * self.N, [9.999] * self.N))
self.global_optimum = [[9.350266 for _ in range(self.N)]]
self.fglob = -45.7784684040686
def fun(self, x, *args):
self.nfev += 1
return sum(log(x - 2) ** 2.0 + log(10.0 - x) ** 2.0) - prod(x) ** 0.2
class Penalty01(Benchmark):
r"""
Penalty 1 objective function.
This class defines the Penalty 1 [1]_ global optimization problem. This is a
imultimodal minimization problem defined as follows:
.. math::
f_{\text{Penalty01}}(x) = \frac{\pi}{30} \left\{10 \sin^2(\pi y_1)
+ \sum_{i=1}^{n-1} (y_i - 1)^2 \left[1 + 10 \sin^2(\pi y_{i+1}) \right]
+ (y_n - 1)^2 \right \} + \sum_{i=1}^n u(x_i, 10, 100, 4)
Where, in this exercise:
.. math::
y_i = 1 + \frac{1}{4}(x_i + 1)
And:
.. math::
u(x_i, a, k, m) =
\begin{cases}
k(x_i - a)^m & \textrm{if} \hspace{5pt} x_i > a \\
0 & \textrm{if} \hspace{5pt} -a \leq x_i \leq a \\
k(-x_i - a)^m & \textrm{if} \hspace{5pt} x_i < -a
\end{cases}
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-50, 50]` for :math:`i= 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = -1` for
:math:`i = 1, ..., n`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-50.0] * self.N, [50.0] * self.N))
self.custom_bounds = ([-5.0, 5.0], [-5.0, 5.0])
self.global_optimum = [[-1.0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
a, b, c = 10.0, 100.0, 4.0
xx = abs(x)
u = where(xx > a, b * (xx - a) ** c, 0.0)
y = 1.0 + (x + 1.0) / 4.0
return (sum(u) + (pi / 30.0) * (10.0 * sin(pi * y[0]) ** 2.0
+ sum((y[: -1] - 1.0) ** 2.0
* (1.0 + 10.0 * sin(pi * y[1:]) ** 2.0))
+ (y[-1] - 1) ** 2.0))
class Penalty02(Benchmark):
r"""
Penalty 2 objective function.
This class defines the Penalty 2 [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Penalty02}}(x) = 0.1 \left\{\sin^2(3\pi x_1) + \sum_{i=1}^{n-1}
(x_i - 1)^2 \left[1 + \sin^2(3\pi x_{i+1}) \right ]
+ (x_n - 1)^2 \left [1 + \sin^2(2 \pi x_n) \right ]\right \}
+ \sum_{i=1}^n u(x_i, 5, 100, 4)
Where, in this exercise:
.. math::
u(x_i, a, k, m) =
\begin{cases}
k(x_i - a)^m & \textrm{if} \hspace{5pt} x_i > a \\
0 & \textrm{if} \hspace{5pt} -a \leq x_i \leq a \\
k(-x_i - a)^m & \textrm{if} \hspace{5pt} x_i < -a \\
\end{cases}
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-50, 50]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 1` for
:math:`i = 1, ..., n`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-50.0] * self.N, [50.0] * self.N))
self.custom_bounds = ([-4.0, 4.0], [-4.0, 4.0])
self.global_optimum = [[1.0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
a, b, c = 5.0, 100.0, 4.0
xx = abs(x)
u = where(xx > a, b * (xx - a) ** c, 0.0)
return (sum(u) + 0.1 * (10 * sin(3.0 * pi * x[0]) ** 2.0
+ sum((x[:-1] - 1.0) ** 2.0
* (1.0 + sin(3 * pi * x[1:]) ** 2.0))
+ (x[-1] - 1) ** 2.0 * (1 + sin(2 * pi * x[-1]) ** 2.0)))
class PenHolder(Benchmark):
r"""
PenHolder objective function.
This class defines the PenHolder [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{PenHolder}}(x) = -e^{\left|{e^{-\left|{- \frac{\sqrt{x_{1}^{2}
+ x_{2}^{2}}}{\pi} + 1}\right|} \cos\left(x_{1}\right)
\cos\left(x_{2}\right)}\right|^{-1}}
with :math:`x_i \in [-11, 11]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x_i) = -0.9635348327265058` for
:math:`x_i = \pm 9.646167671043401` for :math:`i = 1, 2`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-11.0] * self.N, [11.0] * self.N))
self.global_optimum = [[-9.646167708023526, 9.646167671043401]]
self.fglob = -0.9635348327265058
def fun(self, x, *args):
self.nfev += 1
a = abs(1. - (sqrt(x[0] ** 2 + x[1] ** 2) / pi))
b = cos(x[0]) * cos(x[1]) * exp(a)
return -exp(-abs(b) ** -1)
class PermFunction01(Benchmark):
r"""
PermFunction 1 objective function.
This class defines the PermFunction1 [1]_ global optimization problem. This is
a multimodal minimization problem defined as follows:
.. math::
f_{\text{PermFunction01}}(x) = \sum_{k=1}^n \left\{ \sum_{j=1}^n (j^k
+ \beta) \left[ \left(\frac{x_j}{j}\right)^k - 1 \right] \right\}^2
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-n, n + 1]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = i` for
:math:`i = 1, ..., n`
.. [1] Mishra, S. Global Optimization by Differential Evolution and
Particle Swarm Methods: Evaluation on Some Benchmark Functions.
Munich Personal RePEc Archive, 2006, 1005
TODO: line 560
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-self.N] * self.N,
[self.N + 1] * self.N))
self.global_optimum = [list(range(1, self.N + 1))]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
b = 0.5
k = atleast_2d(arange(self.N) + 1).T
j = atleast_2d(arange(self.N) + 1)
s = (j ** k + b) * ((x / j) ** k - 1)
return sum((sum(s, axis=1) ** 2))
class PermFunction02(Benchmark):
r"""
PermFunction 2 objective function.
This class defines the Perm Function 2 [1]_ global optimization problem. This is
a multimodal minimization problem defined as follows:
.. math::
f_{\text{PermFunction02}}(x) = \sum_{k=1}^n \left\{ \sum_{j=1}^n (j
+ \beta) \left[ \left(x_j^k - {\frac{1}{j}}^{k} \right )
\right] \right\}^2
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-n, n+1]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = \frac{1}{i}`
for :math:`i = 1, ..., n`
.. [1] Mishra, S. Global Optimization by Differential Evolution and
Particle Swarm Methods: Evaluation on Some Benchmark Functions.
Munich Personal RePEc Archive, 2006, 1005
TODO: line 582
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-self.N] * self.N,
[self.N + 1] * self.N))
self.custom_bounds = ([0, 1.5], [0, 1.0])
self.global_optimum = [1. / arange(1, self.N + 1)]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
b = 10
k = atleast_2d(arange(self.N) + 1).T
j = atleast_2d(arange(self.N) + 1)
s = (j + b) * (x ** k - (1. / j) ** k)
return sum((sum(s, axis=1) ** 2))
class Pinter(Benchmark):
r"""
Pinter objective function.
This class defines the Pinter [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Pinter}}(x) = \sum_{i=1}^n ix_i^2 + \sum_{i=1}^n 20i
\sin^2 A + \sum_{i=1}^n i \log_{10} (1 + iB^2)
Where, in this exercise:
.. math::
\begin{cases}
A = x_{i-1} \sin x_i + \sin x_{i+1} \\
B = x_{i-1}^2 - 2x_i + 3x_{i + 1} - \cos x_i + 1\\
\end{cases}
Where :math:`x_0 = x_n` and :math:`x_{n + 1} = x_1`.
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-10, 10]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [[0.0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
i = arange(self.N) + 1
xx = zeros(self.N + 2)
xx[1: - 1] = x
xx[0] = x[-1]
xx[-1] = x[0]
A = xx[0: -2] * sin(xx[1: - 1]) + sin(xx[2:])
B = xx[0: -2] ** 2 - 2 * xx[1: - 1] + 3 * xx[2:] - cos(xx[1: - 1]) + 1
return (sum(i * x ** 2)
+ sum(20 * i * sin(A) ** 2)
+ sum(i * log10(1 + i * B ** 2)))
class Plateau(Benchmark):
r"""
Plateau objective function.
This class defines the Plateau [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Plateau}}(x) = 30 + \sum_{i=1}^n \lfloor \lvert x_i
\rvert\rfloor
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-5.12, 5.12]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 30` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-5.12] * self.N, [5.12] * self.N))
self.global_optimum = [[0.0 for _ in range(self.N)]]
self.fglob = 30.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
return 30.0 + sum(floor(abs(x)))
class Powell(Benchmark):
r"""
Powell objective function.
This class defines the Powell [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Powell}}(x) = (x_3+10x_1)^2 + 5(x_2-x_4)^2 + (x_1-2x_2)^4
+ 10(x_3-x_4)^4
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-4, 5]` for :math:`i = 1, ..., 4`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for
:math:`i = 1, ..., 4`
..[1] Powell, M. An iterative method for finding stationary values of a
function of several variables Computer Journal, 1962, 5, 147-151
"""
def __init__(self, dimensions=4):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-4.0] * self.N, [5.0] * self.N))
self.global_optimum = [[0, 0, 0, 0]]
self.fglob = 0
def fun(self, x, *args):
self.nfev += 1
return ((x[0] + 10 * x[1]) ** 2 + 5 * (x[2] - x[3]) ** 2
+ (x[1] - 2 * x[2]) ** 4 + 10 * (x[0] - x[3]) ** 4)
class PowerSum(Benchmark):
r"""
Power sum objective function.
This class defines the Power Sum global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{PowerSum}}(x) = \sum_{k=1}^n\left[\left(\sum_{i=1}^n x_i^k
\right) - b_k \right]^2
Where, in this exercise, :math:`b = [8, 18, 44, 114]`
Here, :math:`x_i \in [0, 4]` for :math:`i = 1, ..., 4`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [1, 2, 2, 3]`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
def __init__(self, dimensions=4):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([0.0] * self.N,
[4.0] * self.N))
self.global_optimum = [[1.0, 2.0, 2.0, 3.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
b = [8.0, 18.0, 44.0, 114.0]
k = atleast_2d(arange(self.N) + 1).T
return sum((sum(x ** k, axis=1) - b) ** 2)
class Price01(Benchmark):
r"""
Price 1 objective function.
This class defines the Price 1 [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Price01}}(x) = (\lvert x_1 \rvert - 5)^2
+ (\lvert x_2 \rvert - 5)^2
with :math:`x_i \in [-500, 500]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x_i) = 0.0` for :math:`x = [5, 5]` or
:math:`x = [5, -5]` or :math:`x = [-5, 5]` or :math:`x = [-5, -5]`.
.. [1] Price, W. A controlled random search procedure for global
optimisation Computer Journal, 1977, 20, 367-370
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-500.0] * self.N,
[500.0] * self.N))
self.custom_bounds = ([-10.0, 10.0], [-10.0, 10.0])
self.global_optimum = [[5.0, 5.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return (abs(x[0]) - 5.0) ** 2.0 + (abs(x[1]) - 5.0) ** 2.0
class Price02(Benchmark):
r"""
Price 2 objective function.
This class defines the Price 2 [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Price02}}(x) = 1 + \sin^2(x_1) + \sin^2(x_2)
- 0.1e^{(-x_1^2 - x_2^2)}
with :math:`x_i \in [-10, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0.9` for :math:`x_i = [0, 0]`
.. [1] Price, W. A controlled random search procedure for global
optimisation Computer Journal, 1977, 20, 367-370
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [[0.0, 0.0]]
self.fglob = 0.9
def fun(self, x, *args):
self.nfev += 1
return 1.0 + sum(sin(x) ** 2) - 0.1 * exp(-x[0] ** 2.0 - x[1] ** 2.0)
class Price03(Benchmark):
r"""
Price 3 objective function.
This class defines the Price 3 [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Price03}}(x) = 100(x_2 - x_1^2)^2 + \left[6.4(x_2 - 0.5)^2
- x_1 - 0.6 \right]^2
with :math:`x_i \in [-50, 50]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [-5, -5]`,
:math:`x = [-5, 5]`, :math:`x = [5, -5]`, :math:`x = [5, 5]`.
.. [1] Price, W. A controlled random search procedure for global
optimisation Computer Journal, 1977, 20, 367-370
TODO Jamil #96 has an erroneous factor of 6 in front of the square brackets
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-5.0] * self.N, [5.0] * self.N))
self.custom_bounds = ([0, 2], [0, 2])
self.global_optimum = [[1.0, 1.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return (100 * (x[1] - x[0] ** 2) ** 2
+ (6.4 * (x[1] - 0.5) ** 2 - x[0] - 0.6) ** 2)
class Price04(Benchmark):
r"""
Price 4 objective function.
This class defines the Price 4 [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Price04}}(x) = (2 x_1^3 x_2 - x_2^3)^2
+ (6 x_1 - x_2^2 + x_2)^2
with :math:`x_i \in [-50, 50]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [0, 0]`,
:math:`x = [2, 4]` and :math:`x = [1.464, -2.506]`
.. [1] Price, W. A controlled random search procedure for global
optimisation Computer Journal, 1977, 20, 367-370
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-50.0] * self.N, [50.0] * self.N))
self.custom_bounds = ([0, 2], [0, 2])
self.global_optimum = [[2.0, 4.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return ((2.0 * x[1] * x[0] ** 3.0 - x[1] ** 3.0) ** 2.0
+ (6.0 * x[0] - x[1] ** 2.0 + x[1]) ** 2.0)
| 20,990
| 27.873453
| 84
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/go_benchmark_functions/go_funcs_A.py
|
# -*- coding: utf-8 -*-
from numpy import abs, cos, exp, pi, prod, sin, sqrt, sum
from .go_benchmark import Benchmark
class Ackley01(Benchmark):
r"""
Ackley01 objective function.
The Ackley01 [1]_ global optimization problem is a multimodal minimization
problem defined as follows:
.. math::
f_{\text{Ackley01}}(x) = -20 e^{-0.2 \sqrt{\frac{1}{n} \sum_{i=1}^n
x_i^2}} - e^{\frac{1}{n} \sum_{i=1}^n \cos(2 \pi x_i)} + 20 + e
Here, :math:`n` represents the number of dimensions and :math:`x_i \in
[-35, 35]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Adorio, E. MVF - "Multivariate Test Functions Library in C for
Unconstrained Global Optimization", 2005
TODO: the -0.2 factor in the exponent of the first term is given as
-0.02 in Jamil et al.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-35.0] * self.N, [35.0] * self.N))
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
u = sum(x ** 2)
v = sum(cos(2 * pi * x))
return (-20. * exp(-0.2 * sqrt(u / self.N))
- exp(v / self.N) + 20. + exp(1.))
class Ackley02(Benchmark):
r"""
Ackley02 objective function.
The Ackley02 [1]_ global optimization problem is a multimodal minimization
problem defined as follows:
.. math::
f_{\text{Ackley02}(x) = -200 e^{-0.02 \sqrt{x_1^2 + x_2^2}}
with :math:`x_i \in [-32, 32]` for :math:`i=1, 2`.
*Global optimum*: :math:`f(x) = -200` for :math:`x = [0, 0]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-32.0] * self.N, [32.0] * self.N))
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = -200.
def fun(self, x, *args):
self.nfev += 1
return -200 * exp(-0.02 * sqrt(x[0] ** 2 + x[1] ** 2))
class Ackley03(Benchmark):
r"""
Ackley03 [1]_ objective function.
The Ackley03 global optimization problem is a multimodal minimization
problem defined as follows:
.. math::
f_{\text{Ackley03}}(x) = -200 e^{-0.02 \sqrt{x_1^2 + x_2^2}} +
5e^{\cos(3x_1) + \sin(3x_2)}
with :math:`x_i \in [-32, 32]` for :math:`i=1, 2`.
*Global optimum*: :math:`f(x) = -195.62902825923879` for :math:`x
= [-0.68255758, -0.36070859]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO: I think the minus sign is missing in front of the first term in eqn3
in [1]_. This changes the global minimum
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-32.0] * self.N, [32.0] * self.N))
self.global_optimum = [[-0.68255758, -0.36070859]]
self.fglob = -195.62902825923879
def fun(self, x, *args):
self.nfev += 1
a = -200 * exp(-0.02 * sqrt(x[0] ** 2 + x[1] ** 2))
a += 5 * exp(cos(3 * x[0]) + sin(3 * x[1]))
return a
class Adjiman(Benchmark):
r"""
Adjiman objective function.
The Adjiman [1]_ global optimization problem is a multimodal minimization
problem defined as follows:
.. math::
f_{\text{Adjiman}}(x) = \cos(x_1)\sin(x_2) - \frac{x_1}{(x_2^2 + 1)}
with, :math:`x_1 \in [-1, 2]` and :math:`x_2 \in [-1, 1]`.
*Global optimum*: :math:`f(x) = -2.02181` for :math:`x = [2.0, 0.10578]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = ([-1.0, 2.0], [-1.0, 1.0])
self.global_optimum = [[2.0, 0.10578]]
self.fglob = -2.02180678
def fun(self, x, *args):
self.nfev += 1
return cos(x[0]) * sin(x[1]) - x[0] / (x[1] ** 2 + 1)
class Alpine01(Benchmark):
r"""
Alpine01 objective function.
The Alpine01 [1]_ global optimization problem is a multimodal minimization
problem defined as follows:
.. math::
f_{\text{Alpine01}}(x) = \sum_{i=1}^{n} \lvert {x_i \sin \left( x_i
\right) + 0.1 x_i} \rvert
Here, :math:`n` represents the number of dimensions and :math:`x_i \in
[-10, 10]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
return sum(abs(x * sin(x) + 0.1 * x))
class Alpine02(Benchmark):
r"""
Alpine02 objective function.
The Alpine02 [1]_ global optimization problem is a multimodal minimization
problem defined as follows:
.. math::
f_{\text{Alpine02}(x) = \prod_{i=1}^{n} \sqrt{x_i} \sin(x_i)
Here, :math:`n` represents the number of dimensions and :math:`x_i \in [0,
10]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = -6.1295` for :math:`x =
[7.91705268, 4.81584232]` for :math:`i = 1, 2`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO: eqn 7 in [1]_ has the wrong global minimum value.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([0.0] * self.N, [10.0] * self.N))
self.global_optimum = [[7.91705268, 4.81584232]]
self.fglob = -6.12950
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
return prod(sqrt(x) * sin(x))
class AMGM(Benchmark):
r"""
AMGM objective function.
The AMGM (Arithmetic Mean - Geometric Mean Equality) global optimization
problem is a multimodal minimization problem defined as follows
.. math::
f_{\text{AMGM}}(x) = \left ( \frac{1}{n} \sum_{i=1}^{n} x_i -
\sqrt[n]{ \prod_{i=1}^{n} x_i} \right )^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \in
[0, 10]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_1 = x_2 = ... = x_n` for
:math:`i = 1, ..., n`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO, retrieved 2015
TODO: eqn 7 in [1]_ has the wrong global minimum value.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([0.0] * self.N, [10.0] * self.N))
self.global_optimum = [[1, 1]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
f1 = sum(x)
f2 = prod(x)
f1 = f1 / self.N
f2 = f2 ** (1.0 / self.N)
f = (f1 - f2) ** 2
return f
| 7,993
| 27.55
| 79
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/go_benchmark_functions/go_funcs_J.py
|
# -*- coding: utf-8 -*-
from numpy import sum, asarray, arange, exp
from .go_benchmark import Benchmark
class JennrichSampson(Benchmark):
r"""
Jennrich-Sampson objective function.
This class defines the Jennrich-Sampson [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{JennrichSampson}}(x) = \sum_{i=1}^{10} \left [2 + 2i
- (e^{ix_1} + e^{ix_2}) \right ]^2
with :math:`x_i \in [-1, 1]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 124.3621824` for
:math:`x = [0.257825, 0.257825]`.
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-1.0] * self.N, [1.0] * self.N))
self.global_optimum = [[0.257825, 0.257825]]
self.custom_bounds = [(-1, 0.34), (-1, 0.34)]
self.fglob = 124.3621824
def fun(self, x, *args):
self.nfev += 1
i = arange(1, 11)
return sum((2 + 2 * i - (exp(i * x[0]) + exp(i * x[1]))) ** 2)
class Judge(Benchmark):
r"""
Judge objective function.
This class defines the Judge [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Judge}}(x) = \sum_{i=1}^{20}
\left [ \left (x_1 + A_i x_2 + B x_2^2 \right ) - C_i \right ]^2
Where, in this exercise:
.. math::
\begin{cases}
C = [4.284, 4.149, 3.877, 0.533, 2.211, 2.389, 2.145,
3.231, 1.998, 1.379, 2.106, 1.428, 1.011, 2.179, 2.858, 1.388, 1.651,
1.593, 1.046, 2.152] \\
A = [0.286, 0.973, 0.384, 0.276, 0.973, 0.543, 0.957, 0.948, 0.543,
0.797, 0.936, 0.889, 0.006, 0.828, 0.399, 0.617, 0.939, 0.784,
0.072, 0.889] \\
B = [0.645, 0.585, 0.310, 0.058, 0.455, 0.779, 0.259, 0.202, 0.028,
0.099, 0.142, 0.296, 0.175, 0.180, 0.842, 0.039, 0.103, 0.620,
0.158, 0.704]
\end{cases}
with :math:`x_i \in [-10, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x_i) = 16.0817307` for
:math:`\mathbf{x} = [0.86479, 1.2357]`.
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [[0.86479, 1.2357]]
self.custom_bounds = [(-2.0, 2.0), (-2.0, 2.0)]
self.fglob = 16.0817307
self.c = asarray([4.284, 4.149, 3.877, 0.533, 2.211, 2.389, 2.145,
3.231, 1.998, 1.379, 2.106, 1.428, 1.011, 2.179,
2.858, 1.388, 1.651, 1.593, 1.046, 2.152])
self.a = asarray([0.286, 0.973, 0.384, 0.276, 0.973, 0.543, 0.957,
0.948, 0.543, 0.797, 0.936, 0.889, 0.006, 0.828,
0.399, 0.617, 0.939, 0.784, 0.072, 0.889])
self.b = asarray([0.645, 0.585, 0.310, 0.058, 0.455, 0.779, 0.259,
0.202, 0.028, 0.099, 0.142, 0.296, 0.175, 0.180,
0.842, 0.039, 0.103, 0.620, 0.158, 0.704])
def fun(self, x, *args):
self.nfev += 1
return sum(((x[0] + x[1] * self.a + (x[1] ** 2.0) * self.b) - self.c)
** 2.0)
| 3,581
| 32.166667
| 82
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/go_benchmark_functions/go_funcs_O.py
|
# -*- coding: utf-8 -*-
from numpy import sum, cos, exp, pi, asarray
from .go_benchmark import Benchmark
class OddSquare(Benchmark):
r"""
Odd Square objective function.
This class defines the Odd Square [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{OddSquare}}(x) = -e^{-\frac{d}{2\pi}} \cos(\pi d)
\left( 1 + \frac{0.02h}{d + 0.01} \right )
Where, in this exercise:
.. math::
\begin{cases}
d = n \cdot \smash{\displaystyle\max_{1 \leq i \leq n}}
\left[ (x_i - b_i)^2 \right ] \\
h = \sum_{i=1}^{n} (x_i - b_i)^2
\end{cases}
And :math:`b = [1, 1.3, 0.8, -0.4, -1.3, 1.6, -0.2, -0.6, 0.5, 1.4, 1, 1.3,
0.8, -0.4, -1.3, 1.6, -0.2, -0.6, 0.5, 1.4]`
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-5 \pi, 5 \pi]` for :math:`i = 1, ..., n` and
:math:`n \leq 20`.
*Global optimum*: :math:`f(x_i) = -1.0084` for :math:`x \approx b`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
TODO The best solution changes on dimensionality
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-5.0 * pi] * self.N,
[5.0 * pi] * self.N))
self.custom_bounds = ([-2.0, 4.0], [-2.0, 4.0])
self.a = asarray([1, 1.3, 0.8, -0.4, -1.3, 1.6, -0.2, -0.6, 0.5, 1.4]
* 2)
self.global_optimum = [[1.0873320463871847, 1.3873320456818079]]
self.fglob = -1.00846728102
def fun(self, x, *args):
self.nfev += 1
b = self.a[0: self.N]
d = self.N * max((x - b) ** 2.0)
h = sum((x - b) ** 2.0)
return (-exp(-d / (2.0 * pi)) * cos(pi * d)
* (1.0 + 0.02 * h / (d + 0.01)))
| 1,919
| 29.47619
| 79
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/go_benchmark_functions/go_benchmark.py
|
# -*- coding: utf-8 -*-
import numpy as np
from numpy import abs, asarray
from ..common import safe_import
with safe_import():
from scipy.special import factorial
class Benchmark:
"""
Defines a global optimization benchmark problem.
This abstract class defines the basic structure of a global
optimization problem. Subclasses should implement the ``fun`` method
for a particular optimization problem.
Attributes
----------
N : int
The dimensionality of the problem.
bounds : sequence
The lower/upper bounds to be used for minimizing the problem.
This a list of (lower, upper) tuples that contain the lower and upper
bounds for the problem. The problem should not be asked for evaluation
outside these bounds. ``len(bounds) == N``.
xmin : sequence
The lower bounds for the problem
xmax : sequence
The upper bounds for the problem
fglob : float
The global minimum of the evaluated function.
global_optimum : sequence
A list of vectors that provide the locations of the global minimum.
Note that some problems have multiple global minima, not all of which
may be listed.
nfev : int
the number of function evaluations that the object has been asked to
calculate.
change_dimensionality : bool
Whether we can change the benchmark function `x` variable length (i.e.,
the dimensionality of the problem)
custom_bounds : sequence
a list of tuples that contain lower/upper bounds for use in plotting.
"""
def __init__(self, dimensions):
"""
Initialises the problem
Parameters
----------
dimensions : int
The dimensionality of the problem
"""
self._dimensions = dimensions
self.nfev = 0
self.fglob = np.nan
self.global_optimum = None
self.change_dimensionality = False
self.custom_bounds = None
def __str__(self):
return '{0} ({1} dimensions)'.format(self.__class__.__name__, self.N)
def __repr__(self):
return self.__class__.__name__
def initial_vector(self):
"""
Random initialisation for the benchmark problem.
Returns
-------
x : sequence
a vector of length ``N`` that contains random floating point
numbers that lie between the lower and upper bounds for a given
parameter.
"""
return asarray([np.random.uniform(l, u) for l, u in self.bounds])
def success(self, x, tol=1.e-5):
"""
Tests if a candidate solution at the global minimum.
The default test is
Parameters
----------
x : sequence
The candidate vector for testing if the global minimum has been
reached. Must have ``len(x) == self.N``
tol : float
The evaluated function and known global minimum must differ by less
than this amount to be at a global minimum.
Returns
-------
bool : is the candidate vector at the global minimum?
"""
val = self.fun(asarray(x))
if abs(val - self.fglob) < tol:
return True
# the solution should still be in bounds, otherwise immediate fail.
if np.any(x > np.asfarray(self.bounds)[:, 1]):
return False
if np.any(x < np.asfarray(self.bounds)[:, 0]):
return False
# you found a lower global minimum. This shouldn't happen.
if val < self.fglob:
raise ValueError("Found a lower global minimum",
x,
val,
self.fglob)
return False
def fun(self, x):
"""
Evaluation of the benchmark function.
Parameters
----------
x : sequence
The candidate vector for evaluating the benchmark problem. Must
have ``len(x) == self.N``.
Returns
-------
val : float
the evaluated benchmark function
"""
raise NotImplementedError
def change_dimensions(self, ndim):
"""
Changes the dimensionality of the benchmark problem
The dimensionality will only be changed if the problem is suitable
Parameters
----------
ndim : int
The new dimensionality for the problem.
"""
if self.change_dimensionality:
self._dimensions = ndim
else:
raise ValueError('dimensionality cannot be changed for this'
'problem')
@property
def bounds(self):
"""
The lower/upper bounds to be used for minimizing the problem.
This a list of (lower, upper) tuples that contain the lower and upper
bounds for the problem. The problem should not be asked for evaluation
outside these bounds. ``len(bounds) == N``.
"""
if self.change_dimensionality:
return [self._bounds[0]] * self.N
else:
return self._bounds
@property
def N(self):
"""
The dimensionality of the problem.
Returns
-------
N : int
The dimensionality of the problem
"""
return self._dimensions
@property
def xmin(self):
"""
The lower bounds for the problem
Returns
-------
xmin : sequence
The lower bounds for the problem
"""
return asarray([b[0] for b in self.bounds])
@property
def xmax(self):
"""
The upper bounds for the problem
Returns
-------
xmax : sequence
The upper bounds for the problem
"""
return asarray([b[1] for b in self.bounds])
| 5,925
| 27.490385
| 79
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/go_benchmark_functions/go_funcs_F.py
|
# -*- coding: utf-8 -*-
from .go_benchmark import Benchmark
class FreudensteinRoth(Benchmark):
r"""
FreudensteinRoth objective function.
This class defines the Freudenstein & Roth [1]_ global optimization problem.
This is a multimodal minimization problem defined as follows:
.. math::
f_{\text{FreudensteinRoth}}(x) = \left\{x_1 - 13 + \left[(5 - x_2) x_2
- 2 \right] x_2 \right\}^2 + \left \{x_1 - 29
+ \left[(x_2 + 1) x_2 - 14 \right] x_2 \right\}^2
with :math:`x_i \in [-10, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [5, 4]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.custom_bounds = [(-3, 3), (-5, 5)]
self.global_optimum = [[5.0, 4.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
f1 = (-13.0 + x[0] + ((5.0 - x[1]) * x[1] - 2.0) * x[1]) ** 2
f2 = (-29.0 + x[0] + ((x[1] + 1.0) * x[1] - 14.0) * x[1]) ** 2
return f1 + f2
| 1,329
| 28.555556
| 80
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/go_benchmark_functions/go_funcs_K.py
|
# -*- coding: utf-8 -*-
from numpy import asarray, atleast_2d, arange, sin, sqrt, prod, sum, round
from .go_benchmark import Benchmark
class Katsuura(Benchmark):
r"""
Katsuura objective function.
This class defines the Katsuura [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Katsuura}}(x) = \prod_{i=0}^{n-1} \left [ 1 +
(i+1) \sum_{k=1}^{d} \lfloor (2^k x_i) \rfloor 2^{-k} \right ]
Where, in this exercise, :math:`d = 32`.
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [0, 100]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 1` for :math:`x_i = 0` for
:math:`i = 1, ..., n`.
.. [1] Adorio, E. MVF - "Multivariate Test Functions Library in C for
Unconstrained Global Optimization", 2005
.. [2] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
TODO: Adorio has wrong global minimum. Adorio uses round, Gavana docstring
uses floor, but Gavana code uses round. We'll use round...
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([0.0] * self.N, [100.0] * self.N))
self.global_optimum = [[0.0 for _ in range(self.N)]]
self.custom_bounds = [(0, 1), (0, 1)]
self.fglob = 1.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
d = 32
k = atleast_2d(arange(1, d + 1)).T
i = arange(0., self.N * 1.)
inner = round(2 ** k * x) * (2. ** (-k))
return prod(sum(inner, axis=0) * (i + 1) + 1)
class Keane(Benchmark):
r"""
Keane objective function.
This class defines the Keane [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Keane}}(x) = \frac{\sin^2(x_1 - x_2)\sin^2(x_1 + x_2)}
{\sqrt{x_1^2 + x_2^2}}
with :math:`x_i \in [0, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0.0` for
:math:`x = [7.85396153, 7.85396135]`.
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO: Jamil #69, there is no way that the function can have a negative
value. Everything is squared. I think that they have the wrong solution.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([0.0] * self.N, [10.0] * self.N))
self.global_optimum = [[7.85396153, 7.85396135]]
self.custom_bounds = [(-1, 0.34), (-1, 0.34)]
self.fglob = 0.
def fun(self, x, *args):
self.nfev += 1
val = sin(x[0] - x[1]) ** 2 * sin(x[0] + x[1]) ** 2
return val / sqrt(x[0] ** 2 + x[1] ** 2)
class Kowalik(Benchmark):
r"""
Kowalik objective function.
This class defines the Kowalik [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Kowalik}}(x) = \sum_{i=0}^{10} \left [ a_i
- \frac{x_1 (b_i^2 + b_i x_2)} {b_i^2 + b_i x_3 + x_4} \right ]^2
Where:
.. math::
\begin{matrix}
a = [4, 2, 1, 1/2, 1/4 1/8, 1/10, 1/12, 1/14, 1/16] \\
b = [0.1957, 0.1947, 0.1735, 0.1600, 0.0844, 0.0627,
0.0456, 0.0342, 0.0323, 0.0235, 0.0246]\\
\end{matrix}
Here, :math:`n` represents the number of dimensions and :math:`x_i \in
[-5, 5]` for :math:`i = 1, ..., 4`.
*Global optimum*: :math:`f(x) = 0.00030748610` for :math:`x =
[0.192833, 0.190836, 0.123117, 0.135766]`.
..[1] https://www.itl.nist.gov/div898/strd/nls/data/mgh09.shtml
"""
def __init__(self, dimensions=4):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-5.0] * self.N, [5.0] * self.N))
self.global_optimum = [[0.192833, 0.190836, 0.123117, 0.135766]]
self.fglob = 0.00030748610
self.a = asarray([4.0, 2.0, 1.0, 1 / 2.0, 1 / 4.0, 1 / 6.0, 1 / 8.0,
1 / 10.0, 1 / 12.0, 1 / 14.0, 1 / 16.0])
self.b = asarray([0.1957, 0.1947, 0.1735, 0.1600, 0.0844, 0.0627,
0.0456, 0.0342, 0.0323, 0.0235, 0.0246])
def fun(self, x, *args):
self.nfev += 1
vec = self.b - (x[0] * (self.a ** 2 + self.a * x[1])
/ (self.a ** 2 + self.a * x[2] + x[3]))
return sum(vec ** 2)
| 4,627
| 29.853333
| 79
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/cutest/calfun.py
|
# This is a python implementation of calfun.m,
# provided at https://github.com/POptUS/BenDFO
import numpy as np
from .dfovec import dfovec
def norm(x, type=2):
if type == 1:
return np.sum(np.abs(x))
elif type == 2:
return np.sqrt(x ** 2)
else: # type==np.inf:
return max(np.abs(x))
def calfun(x, m, nprob, probtype="smooth", noise_level=1e-3):
n = len(x)
# Restrict domain for some nondiff problems
xc = x
if probtype == "nondiff":
if (
nprob == 8
or nprob == 9
or nprob == 13
or nprob == 16
or nprob == 17
or nprob == 18
):
xc = max(x, 0)
# Generate the vector
fvec = dfovec(m, n, xc, nprob)
# Calculate the function value
if probtype == "noisy3":
sigma = noise_level
u = sigma * (-np.ones(m) + 2 * np.random.rand(m))
fvec = fvec * (1 + u)
y = np.sum(fvec ** 2)
elif probtype == "wild3":
sigma = noise_level
phi = 0.9 * np.sin(100 * norm(x, 1)) * np.cos(
100 * norm(x, np.inf)
) + 0.1 * np.cos(norm(x, 2))
phi = phi * (4 * phi ** 2 - 3)
y = (1 + sigma * phi) * sum(fvec ** 2)
elif probtype == "smooth":
y = np.sum(fvec ** 2)
elif probtype == "nondiff":
y = np.sum(np.abs(fvec))
else:
print(f"invalid probtype {probtype}")
return None
# Never return nan. Return inf instead so that
# optimization algorithms treat it as out of bounds.
if np.isnan(y):
return np.inf
return y
| 1,607
| 25.8
| 61
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/cutest/dfovec.py
|
# This is a python implementation of dfovec.m,
# provided at https://github.com/POptUS/BenDFO
import numpy as np
def dfovec(m, n, x, nprob):
# Set lots of constants:
c13 = 1.3e1
c14 = 1.4e1
c29 = 2.9e1
c45 = 4.5e1
v = [
4.0e0,
2.0e0,
1.0e0,
5.0e-1,
2.5e-1,
1.67e-1,
1.25e-1,
1.0e-1,
8.33e-2,
7.14e-2,
6.25e-2,
]
y1 = [
1.4e-1,
1.8e-1,
2.2e-1,
2.5e-1,
2.9e-1,
3.2e-1,
3.5e-1,
3.9e-1,
3.7e-1,
5.8e-1,
7.3e-1,
9.6e-1,
1.34e0,
2.1e0,
4.39e0,
]
y2 = [
1.957e-1,
1.947e-1,
1.735e-1,
1.6e-1,
8.44e-2,
6.27e-2,
4.56e-2,
3.42e-2,
3.23e-2,
2.35e-2,
2.46e-2,
]
y3 = [
3.478e4,
2.861e4,
2.365e4,
1.963e4,
1.637e4,
1.372e4,
1.154e4,
9.744e3,
8.261e3,
7.03e3,
6.005e3,
5.147e3,
4.427e3,
3.82e3,
3.307e3,
2.872e3,
]
y4 = [
8.44e-1,
9.08e-1,
9.32e-1,
9.36e-1,
9.25e-1,
9.08e-1,
8.81e-1,
8.5e-1,
8.18e-1,
7.84e-1,
7.51e-1,
7.18e-1,
6.85e-1,
6.58e-1,
6.28e-1,
6.03e-1,
5.8e-1,
5.58e-1,
5.38e-1,
5.22e-1,
5.06e-1,
4.9e-1,
4.78e-1,
4.67e-1,
4.57e-1,
4.48e-1,
4.38e-1,
4.31e-1,
4.24e-1,
4.2e-1,
4.14e-1,
4.11e-1,
4.06e-1,
]
y5 = [
1.366e0,
1.191e0,
1.112e0,
1.013e0,
9.91e-1,
8.85e-1,
8.31e-1,
8.47e-1,
7.86e-1,
7.25e-1,
7.46e-1,
6.79e-1,
6.08e-1,
6.55e-1,
6.16e-1,
6.06e-1,
6.02e-1,
6.26e-1,
6.51e-1,
7.24e-1,
6.49e-1,
6.49e-1,
6.94e-1,
6.44e-1,
6.24e-1,
6.61e-1,
6.12e-1,
5.58e-1,
5.33e-1,
4.95e-1,
5.0e-1,
4.23e-1,
3.95e-1,
3.75e-1,
3.72e-1,
3.91e-1,
3.96e-1,
4.05e-1,
4.28e-1,
4.29e-1,
5.23e-1,
5.62e-1,
6.07e-1,
6.53e-1,
6.72e-1,
7.08e-1,
6.33e-1,
6.68e-1,
6.45e-1,
6.32e-1,
5.91e-1,
5.59e-1,
5.97e-1,
6.25e-1,
7.39e-1,
7.1e-1,
7.29e-1,
7.2e-1,
6.36e-1,
5.81e-1,
4.28e-1,
2.92e-1,
1.62e-1,
9.8e-2,
5.4e-2,
]
# Initialize things
fvec = np.zeros(m)
total = 0
if nprob == 1: # Linear function - full rank.
for j in range(n):
total = total + x[j]
temp = 2 * total / m + 1
for i in range(m):
fvec[i] = -temp
if i < n:
fvec[i] = fvec[i] + x[i]
elif nprob == 2: # Linear function - rank 1.
for j in range(n):
total = total + (j + 1) * x[j]
for i in range(m):
fvec[i] = (i + 1) * total - 1
elif nprob == 3: # Linear function - rank 1 with zero columns and rows.
for j in range(1, n - 1):
total = total + (j + 1) * x[j]
for i in range(m - 1):
fvec[i] = i * total - 1
fvec[m - 1] = -1
elif nprob == 4: # Rosenbrock function.
fvec[0] = 10 * (x[1] - x[0] * x[0])
fvec[1] = 1 - x[0]
elif nprob == 5: # Helical valley function.
if x[0] > 0:
th = np.arctan(x[1] / x[0]) / (2 * np.pi)
elif x[0] < 0:
th = np.arctan(x[1] / x[0]) / (2 * np.pi) + 0.5
elif x[0] == x[1] and x[1] == 0:
th = 0.0
else:
th = 0.25
r = np.sqrt(x[0] * x[0] + x[1] * x[1])
fvec[0] = 10 * (x[2] - 10 * th)
fvec[1] = 10 * (r - 1)
fvec[2] = x[2]
elif nprob == 6: # Powell singular function.
fvec[0] = x[0] + 10 * x[1]
fvec[1] = np.sqrt(5) * (x[2] - x[3])
fvec[2] = (x[1] - 2 * x[2]) ** 2
fvec[3] = np.sqrt(10) * (x[0] - x[3]) ** 2
elif nprob == 7: # Freudenstein and Roth function.
fvec[0] = -c13 + x[0] + ((5 - x[1]) * x[1] - 2) * x[1]
fvec[1] = -c29 + x[0] + ((1 + x[1]) * x[1] - c14) * x[1]
elif nprob == 8: # Bard function.
for i in range(15):
tmp1 = i + 1
tmp2 = 15 - i
tmp3 = tmp1
if i > 7:
tmp3 = tmp2
fvec[i] = y1[i] - (x[0] + tmp1 / (x[1] * tmp2 + x[2] * tmp3))
elif nprob == 9: # Kowalik and Osborne function.
for i in range(11):
tmp1 = v[i] * (v[i] + x[1])
tmp2 = v[i] * (v[i] + x[2]) + x[3]
fvec[i] = y2[i] - x[0] * tmp1 / tmp2
elif nprob == 10: # Meyer function.
for i in range(16):
temp = 5 * (i + 1) + c45 + x[2]
tmp1 = x[1] / temp
tmp2 = np.exp(tmp1)
fvec[i] = x[0] * tmp2 - y3[i]
elif nprob == 11: # Watson function.
for i in range(29):
div = (i + 1) / c29
s1 = 0
dx = 1
for j in range(1, n):
s1 = s1 + j * dx * x[j]
dx = div * dx
s2 = 0
dx = 1
for j in range(n):
s2 = s2 + dx * x[j]
dx = div * dx
fvec[i] = s1 - s2 * s2 - 1
fvec[29] = x[0]
fvec[30] = x[1] - x[0] * x[0] - 1
elif nprob == 12: # Box 3-dimensional function.
for i in range(m):
temp = i + 1
tmp1 = temp / 10
fvec[i] = (
np.exp(-tmp1 * x[0])
- np.exp(-tmp1 * x[1])
+ (np.exp(-temp) - np.exp(-tmp1)) * x[2]
)
elif nprob == 13: # Jennrich and Sampson function.
for i in range(m):
temp = i + 1
fvec[i] = 2 + 2 * temp - np.exp(temp * x[0]) - np.exp(temp * x[1])
elif nprob == 14: # Brown and Dennis function.
for i in range(m):
temp = (i + 1) / 5
tmp1 = x[0] + temp * x[1] - np.exp(temp)
tmp2 = x[2] + np.sin(temp) * x[3] - np.cos(temp)
fvec[i] = tmp1 * tmp1 + tmp2 * tmp2
elif nprob == 15: # Chebyquad function.
for j in range(n):
t1 = 1
t2 = 2 * x[j] - 1
t = 2 * t2
for i in range(m):
fvec[i] = fvec[i] + t2
th = t * t2 - t1
t1 = t2
t2 = th
iev = -1
for i in range(m):
fvec[i] = fvec[i] / n
if iev > 0:
fvec[i] = fvec[i] + 1 / ((i + 1) ** 2 - 1)
iev = -iev
elif nprob == 16: # Brown almost-linear function.
total1 = -(n + 1)
prod1 = 1
for j in range(n):
total1 = total1 + x[j]
prod1 = x[j] * prod1
for i in range(n - 1):
fvec[i] = x[i] + total1
fvec[n - 1] = prod1 - 1
elif nprob == 17: # Osborne 1 function.
for i in range(33):
temp = 10 * i
tmp1 = np.exp(-x[3] * temp)
tmp2 = np.exp(-x[4] * temp)
fvec[i] = y4[i] - (x[0] + x[1] * tmp1 + x[2] * tmp2)
elif nprob == 18: # Osborne 2 function.
for i in range(65):
temp = i / 10
tmp1 = np.exp(-x[4] * temp)
tmp2 = np.exp(-x[5] * (temp - x[8]) ** 2)
tmp3 = np.exp(-x[6] * (temp - x[9]) ** 2)
tmp4 = np.exp(-x[7] * (temp - x[10]) ** 2)
fvec[i] = y5[i] - (x[0] * tmp1 + x[1] * tmp2 + x[2] * tmp3 + x[3] * tmp4) # noqa
elif nprob == 19: # Bdqrtic
# n >= 5, m = (n-4)*2
for i in range(n - 4):
fvec[i] = -4 * x[i] + 3.0
fvec[n - 4 + i] = (
x[i] ** 2
+ 2 * x[i + 1] ** 2
+ 3 * x[i + 2] ** 2
+ 4 * x[i + 3] ** 2
+ 5 * x[n - 1] ** 2
)
elif nprob == 20: # Cube
# n = 2, m = n
fvec[1] = x[0] - 1.0
for i in range(1, n):
fvec[i] = 10 * (x[i] - x[i - 1] ** 3)
elif nprob == 21: # Mancino
# n = 2, m = n
for i in range(n):
ss = 0
for j in range(n):
v2 = np.sqrt(x[i] ** 2 + (i + 1) / (j + 1))
ss = ss + v2 * ((np.sin(np.log(v2))) ** 5 + (np.cos(np.log(v2))) ** 5) # noqa
fvec[i] = 1400 * x[i] + (i - 49) ** 3 + ss
elif nprob == 22: # Heart8ls
# m = n = 8
fvec[0] = x[0] + x[1] + 0.69
fvec[1] = x[2] + x[3] + 0.044
fvec[2] = x[4] * x[0] + x[5] * x[1] - x[6] * x[2] - x[7] * x[3] + 1.57
fvec[3] = x[6] * x[0] + x[7] * x[1] + x[4] * x[2] + x[5] * x[3] + 1.31
fvec[4] = (
x[0] * (x[4] ** 2 - x[6] ** 2)
- 2.0 * x[2] * x[4] * x[6]
+ x[1] * (x[5] ** 2 - x[7] ** 2)
- 2.0 * x[3] * x[5] * x[7]
+ 2.65
)
fvec[5] = (
x[2] * (x[4] ** 2 - x[6] ** 2)
+ 2.0 * x[0] * x[4] * x[6]
+ x[3] * (x[5] ** 2 - x[7] ** 2)
+ 2.0 * x[1] * x[5] * x[7]
- 2.0
)
fvec[6] = (
x[0] * x[4] * (x[4] ** 2 - 3.0 * x[6] ** 2)
+ x[2] * x[6] * (x[6] ** 2 - 3.0 * x[4] ** 2)
+ x[1] * x[5] * (x[5] ** 2 - 3.0 * x[7] ** 2)
+ x[3] * x[7] * (x[7] ** 2 - 3.0 * x[5] ** 2)
+ 12.6
)
fvec[7] = (
x[2] * x[4] * (x[4] ** 2 - 3.0 * x[6] ** 2)
- x[0] * x[6] * (x[6] ** 2 - 3.0 * x[4] ** 2)
+ x[3] * x[5] * (x[5] ** 2 - 3.0 * x[7] ** 2)
- x[1] * x[7] * (x[7] ** 2 - 3.0 * x[6] ** 2)
- 9.48
)
else:
print(f"unrecognized function number {nprob}")
return None
return fvec
| 10,188
| 25.955026
| 94
|
py
|
scipy
|
scipy-main/benchmarks/benchmarks/cutest/dfoxs.py
|
# This is a python implementation of dfoxs.m,
# provided at https://github.com/POptUS/BenDFO
import numpy as np
def dfoxs(n, nprob, factor):
x = np.zeros(n)
if nprob == 1 or nprob == 2 or nprob == 3: # Linear functions.
x = np.ones(n)
elif nprob == 4: # Rosenbrock function.
x[0] = -1.2
x[1] = 1
elif nprob == 5: # Helical valley function.
x[0] = -1
elif nprob == 6: # Powell singular function.
x[0] = 3
x[1] = -1
x[2] = 0
x[3] = 1
elif nprob == 7: # Freudenstein and Roth function.
x[0] = 0.5
x[1] = -2
elif nprob == 8: # Bard function.
x[0] = 1
x[1] = 1
x[2] = 1
elif nprob == 9: # Kowalik and Osborne function.
x[0] = 0.25
x[1] = 0.39
x[2] = 0.415
x[3] = 0.39
elif nprob == 10: # Meyer function.
x[0] = 0.02
x[1] = 4000
x[2] = 250
elif nprob == 11: # Watson function.
x = 0.5 * np.ones(n)
elif nprob == 12: # Box 3-dimensional function.
x[0] = 0
x[1] = 10
x[2] = 20
elif nprob == 13: # Jennrich and Sampson function.
x[0] = 0.3
x[1] = 0.4
elif nprob == 14: # Brown and Dennis function.
x[0] = 25
x[1] = 5
x[2] = -5
x[3] = -1
elif nprob == 15: # Chebyquad function.
for k in range(n):
x[k] = (k + 1) / (n + 1)
elif nprob == 16: # Brown almost-linear function.
x = 0.5 * np.ones(n)
elif nprob == 17: # Osborne 1 function.
x[0] = 0.5
x[1] = 1.5
x[2] = 1
x[3] = 0.01
x[4] = 0.02
elif nprob == 18: # Osborne 2 function.
x[0] = 1.3
x[1] = 0.65
x[2] = 0.65
x[3] = 0.7
x[4] = 0.6
x[5] = 3
x[6] = 5
x[7] = 7
x[8] = 2
x[9] = 4.5
x[10] = 5.5
elif nprob == 19: # Bdqrtic.
x = np.ones(n)
elif nprob == 20: # Cube.
x = 0.5 * np.ones(n)
elif nprob == 21: # Mancino.
for i in range(n):
ss = 0
for j in range(n):
frac = (i + 1) / (j + 1)
ss = ss + np.sqrt(frac) * (
(np.sin(np.log(np.sqrt(frac)))) ** 5
+ (np.cos(np.log(np.sqrt(frac)))) ** 5
)
x[i] = -8.710996e-4 * ((i - 49) ** 3 + ss)
elif nprob == 22: # Heart8ls.
x = np.asarray([-0.3, -0.39, 0.3, -0.344, -1.2, 2.69, 1.59, -1.5])
else:
print(f"unrecognized function number {nprob}")
return None
return factor * x
| 2,637
| 26.768421
| 74
|
py
|
scipy
|
scipy-main/scipy/conftest.py
|
# Pytest customization
import json
import os
import warnings
import numpy as np
import numpy.array_api
import numpy.testing as npt
import pytest
from scipy._lib._fpumode import get_fpu_mode
from scipy._lib._testutils import FPUModeChangeWarning
from scipy._lib import _pep440
from scipy._lib._array_api import SCIPY_ARRAY_API, SCIPY_DEVICE
def pytest_configure(config):
config.addinivalue_line("markers",
"slow: Tests that are very slow.")
config.addinivalue_line("markers",
"xslow: mark test as extremely slow (not run unless explicitly requested)")
config.addinivalue_line("markers",
"xfail_on_32bit: mark test as failing on 32-bit platforms")
try:
import pytest_timeout # noqa:F401
except Exception:
config.addinivalue_line(
"markers", 'timeout: mark a test for a non-default timeout')
def _get_mark(item, name):
if _pep440.parse(pytest.__version__) >= _pep440.Version("3.6.0"):
mark = item.get_closest_marker(name)
else:
mark = item.get_marker(name)
return mark
def pytest_runtest_setup(item):
mark = _get_mark(item, "xslow")
if mark is not None:
try:
v = int(os.environ.get('SCIPY_XSLOW', '0'))
except ValueError:
v = False
if not v:
pytest.skip("very slow test; set environment variable SCIPY_XSLOW=1 to run it")
mark = _get_mark(item, 'xfail_on_32bit')
if mark is not None and np.intp(0).itemsize < 8:
pytest.xfail(f'Fails on our 32-bit test platform(s): {mark.args[0]}')
# Older versions of threadpoolctl have an issue that may lead to this
# warning being emitted, see gh-14441
with npt.suppress_warnings() as sup:
sup.filter(pytest.PytestUnraisableExceptionWarning)
try:
from threadpoolctl import threadpool_limits
HAS_THREADPOOLCTL = True
except Exception: # observed in gh-14441: (ImportError, AttributeError)
# Optional dependency only. All exceptions are caught, for robustness
HAS_THREADPOOLCTL = False
if HAS_THREADPOOLCTL:
# Set the number of openmp threads based on the number of workers
# xdist is using to prevent oversubscription. Simplified version of what
# sklearn does (it can rely on threadpoolctl and its builtin OpenMP helper
# functions)
try:
xdist_worker_count = int(os.environ['PYTEST_XDIST_WORKER_COUNT'])
except KeyError:
# raises when pytest-xdist is not installed
return
if not os.getenv('OMP_NUM_THREADS'):
max_openmp_threads = os.cpu_count() // 2 # use nr of physical cores
threads_per_worker = max(max_openmp_threads // xdist_worker_count, 1)
try:
threadpool_limits(threads_per_worker, user_api='blas')
except Exception:
# May raise AttributeError for older versions of OpenBLAS.
# Catch any error for robustness.
return
@pytest.fixture(scope="function", autouse=True)
def check_fpu_mode(request):
"""
Check FPU mode was not changed during the test.
"""
old_mode = get_fpu_mode()
yield
new_mode = get_fpu_mode()
if old_mode != new_mode:
warnings.warn("FPU mode changed from {:#x} to {:#x} during "
"the test".format(old_mode, new_mode),
category=FPUModeChangeWarning, stacklevel=0)
# Array API backend handling
xp_available_backends = {'numpy': np}
if SCIPY_ARRAY_API and isinstance(SCIPY_ARRAY_API, str):
# fill the dict of backends with available libraries
xp_available_backends.update({'numpy.array_api': numpy.array_api})
try:
import torch # type: ignore[import]
xp_available_backends.update({'pytorch': torch})
# can use `mps` or `cpu`
torch.set_default_device(SCIPY_DEVICE)
except ImportError:
pass
try:
import cupy # type: ignore[import]
xp_available_backends.update({'cupy': cupy})
except ImportError:
pass
# by default, use all available backends
if SCIPY_ARRAY_API.lower() not in ("1", "true"):
SCIPY_ARRAY_API_ = json.loads(SCIPY_ARRAY_API)
if 'all' in SCIPY_ARRAY_API_:
pass # same as True
else:
# only select a subset of backend by filtering out the dict
try:
xp_available_backends = {
backend: xp_available_backends[backend]
for backend in SCIPY_ARRAY_API_
}
except KeyError:
msg = f"'--array-api-backend' must be in {xp_available_backends.keys()}"
raise ValueError(msg)
if 'cupy' in xp_available_backends:
SCIPY_DEVICE = 'cuda'
array_api_compatible = pytest.mark.parametrize("xp", xp_available_backends.values())
skip_if_array_api = pytest.mark.skipif(
SCIPY_ARRAY_API,
reason="do not run with Array API on",
)
skip_if_array_api_gpu = pytest.mark.skipif(
SCIPY_ARRAY_API and SCIPY_DEVICE != 'cpu',
reason="do not run with Array API on and not on CPU",
)
def skip_if_array_api_backend(backend):
def wrapper(func):
reason = (
f"do not run with Array API backend: {backend}"
)
# method gets there as a function so we cannot use inspect.ismethod
if '.' in func.__qualname__:
def wrapped(self, *args, xp, **kwargs):
if xp.__name__ == backend:
pytest.skip(reason=reason)
return func(self, *args, xp, **kwargs)
else:
def wrapped(*args, xp, **kwargs): # type: ignore[misc]
if xp.__name__ == backend:
pytest.skip(reason=reason)
return func(*args, xp, **kwargs)
return wrapped
return wrapper
| 5,991
| 33.436782
| 91
|
py
|
scipy
|
scipy-main/scipy/setup.py
|
def configuration(parent_package='',top_path=None):
from numpy.distutils.system_info import get_info
get_info("lapack_opt")
from numpy.distutils.misc_util import Configuration
config = Configuration('scipy',parent_package,top_path)
config.add_subpackage('_lib')
config.add_subpackage('cluster')
config.add_subpackage('constants')
config.add_subpackage('datasets')
config.add_subpackage('fft')
config.add_subpackage('fftpack')
config.add_subpackage('integrate')
config.add_subpackage('interpolate')
config.add_subpackage('io')
config.add_subpackage('linalg')
config.add_data_files('*.pxd')
config.add_subpackage('misc')
config.add_subpackage('odr')
config.add_subpackage('optimize')
config.add_subpackage('signal')
config.add_subpackage('sparse')
config.add_subpackage('spatial')
config.add_subpackage('special')
config.add_subpackage('stats')
config.add_subpackage('ndimage')
config.add_subpackage('_build_utils')
config.make_config_py()
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| 1,182
| 32.8
| 59
|
py
|
scipy
|
scipy-main/scipy/_distributor_init.py
|
""" Distributor init file
Distributors: you can add custom code here to support particular distributions
of SciPy.
For example, this is a good place to put any checks for hardware requirements.
The SciPy standard source distribution will not put code in this file, so you
can safely replace this file with your own version.
"""
| 331
| 29.181818
| 78
|
py
|
scipy
|
scipy-main/scipy/__init__.py
|
"""
SciPy: A scientific computing package for Python
================================================
Documentation is available in the docstrings and
online at https://docs.scipy.org.
Contents
--------
SciPy imports all the functions from the NumPy namespace, and in
addition provides:
Subpackages
-----------
Using any of these subpackages requires an explicit import. For example,
``import scipy.cluster``.
::
cluster --- Vector Quantization / Kmeans
constants --- Physical and mathematical constants and units
datasets --- Dataset methods
fft --- Discrete Fourier transforms
fftpack --- Legacy discrete Fourier transforms
integrate --- Integration routines
interpolate --- Interpolation Tools
io --- Data input and output
linalg --- Linear algebra routines
misc --- Utilities that don't have another home.
ndimage --- N-D image package
odr --- Orthogonal Distance Regression
optimize --- Optimization Tools
signal --- Signal Processing Tools
sparse --- Sparse Matrices
spatial --- Spatial data structures and algorithms
special --- Special functions
stats --- Statistical Functions
Public API in the main SciPy namespace
--------------------------------------
::
__version__ --- SciPy version string
LowLevelCallable --- Low-level callback function
show_config --- Show scipy build configuration
test --- Run scipy unittests
"""
from numpy import show_config as show_numpy_config
if show_numpy_config is None:
raise ImportError(
"Cannot import SciPy when running from NumPy source directory.")
from numpy import __version__ as __numpy_version__
# Import numpy symbols to scipy name space (DEPRECATED)
from ._lib.deprecation import _deprecated
import numpy as np
_msg = ('scipy.{0} is deprecated and will be removed in SciPy 2.0.0, '
'use numpy.{0} instead')
# deprecate callable objects from numpy, skipping classes and modules
import types as _types # noqa: E402
for _key in np.__all__:
if _key.startswith('_'):
continue
_fun = getattr(np, _key)
if isinstance(_fun, _types.ModuleType):
continue
if callable(_fun) and not isinstance(_fun, type):
_fun = _deprecated(_msg.format(_key))(_fun)
globals()[_key] = _fun
del np, _types
from numpy.random import rand, randn
_msg = ('scipy.{0} is deprecated and will be removed in SciPy 2.0.0, '
'use numpy.random.{0} instead')
rand = _deprecated(_msg.format('rand'))(rand)
randn = _deprecated(_msg.format('randn'))(randn)
# fft is especially problematic, so was removed in SciPy 1.6.0
from numpy.fft import ifft
ifft = _deprecated('scipy.ifft is deprecated and will be removed in SciPy '
'2.0.0, use scipy.fft.ifft instead')(ifft)
from numpy.lib import scimath # noqa: E402
_msg = ('scipy.{0} is deprecated and will be removed in SciPy 2.0.0, '
'use numpy.lib.scimath.{0} instead')
for _key in scimath.__all__:
_fun = getattr(scimath, _key)
if callable(_fun):
_fun = _deprecated(_msg.format(_key))(_fun)
globals()[_key] = _fun
del scimath
del _msg, _fun, _key, _deprecated
# We first need to detect if we're being called as part of the SciPy
# setup procedure itself in a reliable manner.
try:
__SCIPY_SETUP__
except NameError:
__SCIPY_SETUP__ = False
if __SCIPY_SETUP__:
import sys
sys.stderr.write('Running from SciPy source directory.\n')
del sys
else:
try:
from scipy.__config__ import show as show_config
except ImportError as e:
msg = """Error importing SciPy: you cannot import SciPy while
being in scipy source directory; please exit the SciPy source
tree first and relaunch your Python interpreter."""
raise ImportError(msg) from e
from scipy.version import version as __version__
# Allow distributors to run custom init code
from . import _distributor_init
del _distributor_init
from scipy._lib import _pep440
# In maintenance branch, change to np_maxversion N+3 if numpy is at N
# See setup.py for more details
np_minversion = '1.22.4'
np_maxversion = '9.9.99'
if (_pep440.parse(__numpy_version__) < _pep440.Version(np_minversion) or
_pep440.parse(__numpy_version__) >= _pep440.Version(np_maxversion)):
import warnings
warnings.warn(f"A NumPy version >={np_minversion} and <{np_maxversion}"
f" is required for this version of SciPy (detected "
f"version {__numpy_version__})",
UserWarning)
del _pep440
# This is the first import of an extension module within SciPy. If there's
# a general issue with the install, such that extension modules are missing
# or cannot be imported, this is where we'll get a failure - so give an
# informative error message.
try:
from scipy._lib._ccallback import LowLevelCallable
except ImportError as e:
msg = "The `scipy` install you are using seems to be broken, " + \
"(extension modules cannot be imported), " + \
"please try reinstalling."
raise ImportError(msg) from e
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
submodules = [
'cluster',
'constants',
'datasets',
'fft',
'fftpack',
'integrate',
'interpolate',
'io',
'linalg',
'misc',
'ndimage',
'odr',
'optimize',
'signal',
'sparse',
'spatial',
'special',
'stats'
]
__all__ = submodules + [
'LowLevelCallable',
'test',
'show_config',
'__version__',
]
def __dir__():
return __all__
import importlib as _importlib
def __getattr__(name):
if name in submodules:
return _importlib.import_module(f'scipy.{name}')
else:
try:
return globals()[name]
except KeyError:
raise AttributeError(
f"Module 'scipy' has no attribute '{name}'"
)
| 6,530
| 32.152284
| 80
|
py
|
scipy
|
scipy-main/scipy/integrate/lsoda.py
|
# This file is not meant for public use and will be removed in SciPy v2.0.0.
import warnings
from . import _lsoda # type: ignore
__all__ = ['lsoda'] # noqa: F822
def __dir__():
return __all__
def __getattr__(name):
if name not in __all__:
raise AttributeError(
"scipy.integrate.lsoda is deprecated and has no attribute "
f"{name}.")
warnings.warn("The `scipy.integrate.lsoda` namespace is deprecated "
"and will be removed in SciPy v2.0.0.",
category=DeprecationWarning, stacklevel=2)
return getattr(_lsoda, name)
| 610
| 22.5
| 76
|
py
|
scipy
|
scipy-main/scipy/integrate/odepack.py
|
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.integrate` namespace for importing the functions
# included below.
import warnings
from . import _odepack_py
__all__ = ['odeint', 'ODEintWarning'] # noqa: F822
def __dir__():
return __all__
def __getattr__(name):
if name not in __all__:
raise AttributeError(
"scipy.integrate.odepack is deprecated and has no attribute "
f"{name}. Try looking in scipy.integrate instead.")
warnings.warn(f"Please use `{name}` from the `scipy.integrate` namespace, "
"the `scipy.integrate.odepack` namespace is deprecated.",
category=DeprecationWarning, stacklevel=2)
return getattr(_odepack_py, name)
| 771
| 28.692308
| 79
|
py
|
scipy
|
scipy-main/scipy/integrate/setup.py
|
import os
from os.path import join
from scipy._build_utils import numpy_nodepr_api
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info
from scipy._build_utils import (uses_blas64, blas_ilp64_pre_build_hook,
combine_dict, get_f2py_int64_options)
config = Configuration('integrate', parent_package, top_path)
if uses_blas64():
lapack_opt = get_info('lapack_ilp64_opt', 2)
pre_build_hook = blas_ilp64_pre_build_hook(lapack_opt)
f2py_options = get_f2py_int64_options()
else:
lapack_opt = get_info('lapack_opt')
pre_build_hook = None
f2py_options = None
mach_src = [join('mach','*.f')]
quadpack_src = [join('quadpack', '*.f')]
lsoda_src = [join('odepack', fn) for fn in [
'blkdta000.f', 'bnorm.f', 'cfode.f',
'ewset.f', 'fnorm.f', 'intdy.f',
'lsoda.f', 'prja.f', 'solsy.f', 'srcma.f',
'stoda.f', 'vmnorm.f', 'xerrwv.f', 'xsetf.f',
'xsetun.f']]
vode_src = [join('odepack', 'vode.f'), join('odepack', 'zvode.f')]
dop_src = [join('dop','*.f')]
quadpack_test_src = [join('tests','_test_multivariate.c')]
odeint_banded_test_src = [join('tests', 'banded5x5.f')]
config.add_library('mach', sources=mach_src, config_fc={'noopt': (__file__, 1)},
_pre_build_hook=pre_build_hook)
config.add_library('quadpack', sources=quadpack_src, _pre_build_hook=pre_build_hook)
config.add_library('lsoda', sources=lsoda_src, _pre_build_hook=pre_build_hook)
config.add_library('vode', sources=vode_src, _pre_build_hook=pre_build_hook)
config.add_library('dop', sources=dop_src, _pre_build_hook=pre_build_hook)
# Extensions
# quadpack:
include_dirs = [join(os.path.dirname(__file__), '..', '_lib', 'src')]
cfg = combine_dict(lapack_opt,
include_dirs=include_dirs,
libraries=['quadpack', 'mach'])
config.add_extension('_quadpack',
sources=['_quadpackmodule.c'],
depends=(['__quadpack.h']
+ quadpack_src + mach_src),
**cfg)
# odepack/lsoda-odeint
cfg = combine_dict(lapack_opt, numpy_nodepr_api,
libraries=['lsoda', 'mach'])
config.add_extension('_odepack',
sources=['_odepackmodule.c'],
depends=(lsoda_src + mach_src),
**cfg)
# vode
cfg = combine_dict(lapack_opt,
libraries=['vode'])
ext = config.add_extension('_vode',
sources=['vode.pyf'],
depends=vode_src,
f2py_options=f2py_options,
**cfg)
ext._pre_build_hook = pre_build_hook
# lsoda
cfg = combine_dict(lapack_opt,
libraries=['lsoda', 'mach'])
ext = config.add_extension('_lsoda',
sources=['lsoda.pyf'],
depends=(lsoda_src + mach_src),
f2py_options=f2py_options,
**cfg)
ext._pre_build_hook = pre_build_hook
# dop
ext = config.add_extension('_dop',
sources=['dop.pyf'],
libraries=['dop'],
depends=dop_src,
f2py_options=f2py_options)
ext._pre_build_hook = pre_build_hook
config.add_extension('_test_multivariate',
sources=quadpack_test_src)
# Fortran+f2py extension module for testing odeint.
cfg = combine_dict(lapack_opt,
libraries=['lsoda', 'mach'])
ext = config.add_extension('_test_odeint_banded',
sources=odeint_banded_test_src,
depends=(lsoda_src + mach_src),
f2py_options=f2py_options,
**cfg)
ext._pre_build_hook = pre_build_hook
config.add_subpackage('_ivp')
config.add_data_dir('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| 4,440
| 37.95614
| 88
|
py
|
scipy
|
scipy-main/scipy/integrate/_bvp.py
|
"""Boundary value problem solver."""
from warnings import warn
import numpy as np
from numpy.linalg import pinv
from scipy.sparse import coo_matrix, csc_matrix
from scipy.sparse.linalg import splu
from scipy.optimize import OptimizeResult
EPS = np.finfo(float).eps
def estimate_fun_jac(fun, x, y, p, f0=None):
"""Estimate derivatives of an ODE system rhs with forward differences.
Returns
-------
df_dy : ndarray, shape (n, n, m)
Derivatives with respect to y. An element (i, j, q) corresponds to
d f_i(x_q, y_q) / d (y_q)_j.
df_dp : ndarray with shape (n, k, m) or None
Derivatives with respect to p. An element (i, j, q) corresponds to
d f_i(x_q, y_q, p) / d p_j. If `p` is empty, None is returned.
"""
n, m = y.shape
if f0 is None:
f0 = fun(x, y, p)
dtype = y.dtype
df_dy = np.empty((n, n, m), dtype=dtype)
h = EPS**0.5 * (1 + np.abs(y))
for i in range(n):
y_new = y.copy()
y_new[i] += h[i]
hi = y_new[i] - y[i]
f_new = fun(x, y_new, p)
df_dy[:, i, :] = (f_new - f0) / hi
k = p.shape[0]
if k == 0:
df_dp = None
else:
df_dp = np.empty((n, k, m), dtype=dtype)
h = EPS**0.5 * (1 + np.abs(p))
for i in range(k):
p_new = p.copy()
p_new[i] += h[i]
hi = p_new[i] - p[i]
f_new = fun(x, y, p_new)
df_dp[:, i, :] = (f_new - f0) / hi
return df_dy, df_dp
def estimate_bc_jac(bc, ya, yb, p, bc0=None):
"""Estimate derivatives of boundary conditions with forward differences.
Returns
-------
dbc_dya : ndarray, shape (n + k, n)
Derivatives with respect to ya. An element (i, j) corresponds to
d bc_i / d ya_j.
dbc_dyb : ndarray, shape (n + k, n)
Derivatives with respect to yb. An element (i, j) corresponds to
d bc_i / d ya_j.
dbc_dp : ndarray with shape (n + k, k) or None
Derivatives with respect to p. An element (i, j) corresponds to
d bc_i / d p_j. If `p` is empty, None is returned.
"""
n = ya.shape[0]
k = p.shape[0]
if bc0 is None:
bc0 = bc(ya, yb, p)
dtype = ya.dtype
dbc_dya = np.empty((n, n + k), dtype=dtype)
h = EPS**0.5 * (1 + np.abs(ya))
for i in range(n):
ya_new = ya.copy()
ya_new[i] += h[i]
hi = ya_new[i] - ya[i]
bc_new = bc(ya_new, yb, p)
dbc_dya[i] = (bc_new - bc0) / hi
dbc_dya = dbc_dya.T
h = EPS**0.5 * (1 + np.abs(yb))
dbc_dyb = np.empty((n, n + k), dtype=dtype)
for i in range(n):
yb_new = yb.copy()
yb_new[i] += h[i]
hi = yb_new[i] - yb[i]
bc_new = bc(ya, yb_new, p)
dbc_dyb[i] = (bc_new - bc0) / hi
dbc_dyb = dbc_dyb.T
if k == 0:
dbc_dp = None
else:
h = EPS**0.5 * (1 + np.abs(p))
dbc_dp = np.empty((k, n + k), dtype=dtype)
for i in range(k):
p_new = p.copy()
p_new[i] += h[i]
hi = p_new[i] - p[i]
bc_new = bc(ya, yb, p_new)
dbc_dp[i] = (bc_new - bc0) / hi
dbc_dp = dbc_dp.T
return dbc_dya, dbc_dyb, dbc_dp
def compute_jac_indices(n, m, k):
"""Compute indices for the collocation system Jacobian construction.
See `construct_global_jac` for the explanation.
"""
i_col = np.repeat(np.arange((m - 1) * n), n)
j_col = (np.tile(np.arange(n), n * (m - 1)) +
np.repeat(np.arange(m - 1) * n, n**2))
i_bc = np.repeat(np.arange((m - 1) * n, m * n + k), n)
j_bc = np.tile(np.arange(n), n + k)
i_p_col = np.repeat(np.arange((m - 1) * n), k)
j_p_col = np.tile(np.arange(m * n, m * n + k), (m - 1) * n)
i_p_bc = np.repeat(np.arange((m - 1) * n, m * n + k), k)
j_p_bc = np.tile(np.arange(m * n, m * n + k), n + k)
i = np.hstack((i_col, i_col, i_bc, i_bc, i_p_col, i_p_bc))
j = np.hstack((j_col, j_col + n,
j_bc, j_bc + (m - 1) * n,
j_p_col, j_p_bc))
return i, j
def stacked_matmul(a, b):
"""Stacked matrix multiply: out[i,:,:] = np.dot(a[i,:,:], b[i,:,:]).
Empirical optimization. Use outer Python loop and BLAS for large
matrices, otherwise use a single einsum call.
"""
if a.shape[1] > 50:
out = np.empty((a.shape[0], a.shape[1], b.shape[2]))
for i in range(a.shape[0]):
out[i] = np.dot(a[i], b[i])
return out
else:
return np.einsum('...ij,...jk->...ik', a, b)
def construct_global_jac(n, m, k, i_jac, j_jac, h, df_dy, df_dy_middle, df_dp,
df_dp_middle, dbc_dya, dbc_dyb, dbc_dp):
"""Construct the Jacobian of the collocation system.
There are n * m + k functions: m - 1 collocations residuals, each
containing n components, followed by n + k boundary condition residuals.
There are n * m + k variables: m vectors of y, each containing n
components, followed by k values of vector p.
For example, let m = 4, n = 2 and k = 1, then the Jacobian will have
the following sparsity structure:
1 1 2 2 0 0 0 0 5
1 1 2 2 0 0 0 0 5
0 0 1 1 2 2 0 0 5
0 0 1 1 2 2 0 0 5
0 0 0 0 1 1 2 2 5
0 0 0 0 1 1 2 2 5
3 3 0 0 0 0 4 4 6
3 3 0 0 0 0 4 4 6
3 3 0 0 0 0 4 4 6
Zeros denote identically zero values, other values denote different kinds
of blocks in the matrix (see below). The blank row indicates the separation
of collocation residuals from boundary conditions. And the blank column
indicates the separation of y values from p values.
Refer to [1]_ (p. 306) for the formula of n x n blocks for derivatives
of collocation residuals with respect to y.
Parameters
----------
n : int
Number of equations in the ODE system.
m : int
Number of nodes in the mesh.
k : int
Number of the unknown parameters.
i_jac, j_jac : ndarray
Row and column indices returned by `compute_jac_indices`. They
represent different blocks in the Jacobian matrix in the following
order (see the scheme above):
* 1: m - 1 diagonal n x n blocks for the collocation residuals.
* 2: m - 1 off-diagonal n x n blocks for the collocation residuals.
* 3 : (n + k) x n block for the dependency of the boundary
conditions on ya.
* 4: (n + k) x n block for the dependency of the boundary
conditions on yb.
* 5: (m - 1) * n x k block for the dependency of the collocation
residuals on p.
* 6: (n + k) x k block for the dependency of the boundary
conditions on p.
df_dy : ndarray, shape (n, n, m)
Jacobian of f with respect to y computed at the mesh nodes.
df_dy_middle : ndarray, shape (n, n, m - 1)
Jacobian of f with respect to y computed at the middle between the
mesh nodes.
df_dp : ndarray with shape (n, k, m) or None
Jacobian of f with respect to p computed at the mesh nodes.
df_dp_middle : ndarray with shape (n, k, m - 1) or None
Jacobian of f with respect to p computed at the middle between the
mesh nodes.
dbc_dya, dbc_dyb : ndarray, shape (n, n)
Jacobian of bc with respect to ya and yb.
dbc_dp : ndarray with shape (n, k) or None
Jacobian of bc with respect to p.
Returns
-------
J : csc_matrix, shape (n * m + k, n * m + k)
Jacobian of the collocation system in a sparse form.
References
----------
.. [1] J. Kierzenka, L. F. Shampine, "A BVP Solver Based on Residual
Control and the Maltab PSE", ACM Trans. Math. Softw., Vol. 27,
Number 3, pp. 299-316, 2001.
"""
df_dy = np.transpose(df_dy, (2, 0, 1))
df_dy_middle = np.transpose(df_dy_middle, (2, 0, 1))
h = h[:, np.newaxis, np.newaxis]
dtype = df_dy.dtype
# Computing diagonal n x n blocks.
dPhi_dy_0 = np.empty((m - 1, n, n), dtype=dtype)
dPhi_dy_0[:] = -np.identity(n)
dPhi_dy_0 -= h / 6 * (df_dy[:-1] + 2 * df_dy_middle)
T = stacked_matmul(df_dy_middle, df_dy[:-1])
dPhi_dy_0 -= h**2 / 12 * T
# Computing off-diagonal n x n blocks.
dPhi_dy_1 = np.empty((m - 1, n, n), dtype=dtype)
dPhi_dy_1[:] = np.identity(n)
dPhi_dy_1 -= h / 6 * (df_dy[1:] + 2 * df_dy_middle)
T = stacked_matmul(df_dy_middle, df_dy[1:])
dPhi_dy_1 += h**2 / 12 * T
values = np.hstack((dPhi_dy_0.ravel(), dPhi_dy_1.ravel(), dbc_dya.ravel(),
dbc_dyb.ravel()))
if k > 0:
df_dp = np.transpose(df_dp, (2, 0, 1))
df_dp_middle = np.transpose(df_dp_middle, (2, 0, 1))
T = stacked_matmul(df_dy_middle, df_dp[:-1] - df_dp[1:])
df_dp_middle += 0.125 * h * T
dPhi_dp = -h/6 * (df_dp[:-1] + df_dp[1:] + 4 * df_dp_middle)
values = np.hstack((values, dPhi_dp.ravel(), dbc_dp.ravel()))
J = coo_matrix((values, (i_jac, j_jac)))
return csc_matrix(J)
def collocation_fun(fun, y, p, x, h):
"""Evaluate collocation residuals.
This function lies in the core of the method. The solution is sought
as a cubic C1 continuous spline with derivatives matching the ODE rhs
at given nodes `x`. Collocation conditions are formed from the equality
of the spline derivatives and rhs of the ODE system in the middle points
between nodes.
Such method is classified to Lobbato IIIA family in ODE literature.
Refer to [1]_ for the formula and some discussion.
Returns
-------
col_res : ndarray, shape (n, m - 1)
Collocation residuals at the middle points of the mesh intervals.
y_middle : ndarray, shape (n, m - 1)
Values of the cubic spline evaluated at the middle points of the mesh
intervals.
f : ndarray, shape (n, m)
RHS of the ODE system evaluated at the mesh nodes.
f_middle : ndarray, shape (n, m - 1)
RHS of the ODE system evaluated at the middle points of the mesh
intervals (and using `y_middle`).
References
----------
.. [1] J. Kierzenka, L. F. Shampine, "A BVP Solver Based on Residual
Control and the Maltab PSE", ACM Trans. Math. Softw., Vol. 27,
Number 3, pp. 299-316, 2001.
"""
f = fun(x, y, p)
y_middle = (0.5 * (y[:, 1:] + y[:, :-1]) -
0.125 * h * (f[:, 1:] - f[:, :-1]))
f_middle = fun(x[:-1] + 0.5 * h, y_middle, p)
col_res = y[:, 1:] - y[:, :-1] - h / 6 * (f[:, :-1] + f[:, 1:] +
4 * f_middle)
return col_res, y_middle, f, f_middle
def prepare_sys(n, m, k, fun, bc, fun_jac, bc_jac, x, h):
"""Create the function and the Jacobian for the collocation system."""
x_middle = x[:-1] + 0.5 * h
i_jac, j_jac = compute_jac_indices(n, m, k)
def col_fun(y, p):
return collocation_fun(fun, y, p, x, h)
def sys_jac(y, p, y_middle, f, f_middle, bc0):
if fun_jac is None:
df_dy, df_dp = estimate_fun_jac(fun, x, y, p, f)
df_dy_middle, df_dp_middle = estimate_fun_jac(
fun, x_middle, y_middle, p, f_middle)
else:
df_dy, df_dp = fun_jac(x, y, p)
df_dy_middle, df_dp_middle = fun_jac(x_middle, y_middle, p)
if bc_jac is None:
dbc_dya, dbc_dyb, dbc_dp = estimate_bc_jac(bc, y[:, 0], y[:, -1],
p, bc0)
else:
dbc_dya, dbc_dyb, dbc_dp = bc_jac(y[:, 0], y[:, -1], p)
return construct_global_jac(n, m, k, i_jac, j_jac, h, df_dy,
df_dy_middle, df_dp, df_dp_middle, dbc_dya,
dbc_dyb, dbc_dp)
return col_fun, sys_jac
def solve_newton(n, m, h, col_fun, bc, jac, y, p, B, bvp_tol, bc_tol):
"""Solve the nonlinear collocation system by a Newton method.
This is a simple Newton method with a backtracking line search. As
advised in [1]_, an affine-invariant criterion function F = ||J^-1 r||^2
is used, where J is the Jacobian matrix at the current iteration and r is
the vector or collocation residuals (values of the system lhs).
The method alters between full Newton iterations and the fixed-Jacobian
iterations based
There are other tricks proposed in [1]_, but they are not used as they
don't seem to improve anything significantly, and even break the
convergence on some test problems I tried.
All important parameters of the algorithm are defined inside the function.
Parameters
----------
n : int
Number of equations in the ODE system.
m : int
Number of nodes in the mesh.
h : ndarray, shape (m-1,)
Mesh intervals.
col_fun : callable
Function computing collocation residuals.
bc : callable
Function computing boundary condition residuals.
jac : callable
Function computing the Jacobian of the whole system (including
collocation and boundary condition residuals). It is supposed to
return csc_matrix.
y : ndarray, shape (n, m)
Initial guess for the function values at the mesh nodes.
p : ndarray, shape (k,)
Initial guess for the unknown parameters.
B : ndarray with shape (n, n) or None
Matrix to force the S y(a) = 0 condition for a problems with the
singular term. If None, the singular term is assumed to be absent.
bvp_tol : float
Tolerance to which we want to solve a BVP.
bc_tol : float
Tolerance to which we want to satisfy the boundary conditions.
Returns
-------
y : ndarray, shape (n, m)
Final iterate for the function values at the mesh nodes.
p : ndarray, shape (k,)
Final iterate for the unknown parameters.
singular : bool
True, if the LU decomposition failed because Jacobian turned out
to be singular.
References
----------
.. [1] U. Ascher, R. Mattheij and R. Russell "Numerical Solution of
Boundary Value Problems for Ordinary Differential Equations"
"""
# We know that the solution residuals at the middle points of the mesh
# are connected with collocation residuals r_middle = 1.5 * col_res / h.
# As our BVP solver tries to decrease relative residuals below a certain
# tolerance, it seems reasonable to terminated Newton iterations by
# comparison of r_middle / (1 + np.abs(f_middle)) with a certain threshold,
# which we choose to be 1.5 orders lower than the BVP tolerance. We rewrite
# the condition as col_res < tol_r * (1 + np.abs(f_middle)), then tol_r
# should be computed as follows:
tol_r = 2/3 * h * 5e-2 * bvp_tol
# Maximum allowed number of Jacobian evaluation and factorization, in
# other words, the maximum number of full Newton iterations. A small value
# is recommended in the literature.
max_njev = 4
# Maximum number of iterations, considering that some of them can be
# performed with the fixed Jacobian. In theory, such iterations are cheap,
# but it's not that simple in Python.
max_iter = 8
# Minimum relative improvement of the criterion function to accept the
# step (Armijo constant).
sigma = 0.2
# Step size decrease factor for backtracking.
tau = 0.5
# Maximum number of backtracking steps, the minimum step is then
# tau ** n_trial.
n_trial = 4
col_res, y_middle, f, f_middle = col_fun(y, p)
bc_res = bc(y[:, 0], y[:, -1], p)
res = np.hstack((col_res.ravel(order='F'), bc_res))
njev = 0
singular = False
recompute_jac = True
for iteration in range(max_iter):
if recompute_jac:
J = jac(y, p, y_middle, f, f_middle, bc_res)
njev += 1
try:
LU = splu(J)
except RuntimeError:
singular = True
break
step = LU.solve(res)
cost = np.dot(step, step)
y_step = step[:m * n].reshape((n, m), order='F')
p_step = step[m * n:]
alpha = 1
for trial in range(n_trial + 1):
y_new = y - alpha * y_step
if B is not None:
y_new[:, 0] = np.dot(B, y_new[:, 0])
p_new = p - alpha * p_step
col_res, y_middle, f, f_middle = col_fun(y_new, p_new)
bc_res = bc(y_new[:, 0], y_new[:, -1], p_new)
res = np.hstack((col_res.ravel(order='F'), bc_res))
step_new = LU.solve(res)
cost_new = np.dot(step_new, step_new)
if cost_new < (1 - 2 * alpha * sigma) * cost:
break
if trial < n_trial:
alpha *= tau
y = y_new
p = p_new
if njev == max_njev:
break
if (np.all(np.abs(col_res) < tol_r * (1 + np.abs(f_middle))) and
np.all(np.abs(bc_res) < bc_tol)):
break
# If the full step was taken, then we are going to continue with
# the same Jacobian. This is the approach of BVP_SOLVER.
if alpha == 1:
step = step_new
cost = cost_new
recompute_jac = False
else:
recompute_jac = True
return y, p, singular
def print_iteration_header():
print("{:^15}{:^15}{:^15}{:^15}{:^15}".format(
"Iteration", "Max residual", "Max BC residual", "Total nodes",
"Nodes added"))
def print_iteration_progress(iteration, residual, bc_residual, total_nodes,
nodes_added):
print("{:^15}{:^15.2e}{:^15.2e}{:^15}{:^15}".format(
iteration, residual, bc_residual, total_nodes, nodes_added))
class BVPResult(OptimizeResult):
pass
TERMINATION_MESSAGES = {
0: "The algorithm converged to the desired accuracy.",
1: "The maximum number of mesh nodes is exceeded.",
2: "A singular Jacobian encountered when solving the collocation system.",
3: "The solver was unable to satisfy boundary conditions tolerance on iteration 10."
}
def estimate_rms_residuals(fun, sol, x, h, p, r_middle, f_middle):
"""Estimate rms values of collocation residuals using Lobatto quadrature.
The residuals are defined as the difference between the derivatives of
our solution and rhs of the ODE system. We use relative residuals, i.e.,
normalized by 1 + np.abs(f). RMS values are computed as sqrt from the
normalized integrals of the squared relative residuals over each interval.
Integrals are estimated using 5-point Lobatto quadrature [1]_, we use the
fact that residuals at the mesh nodes are identically zero.
In [2] they don't normalize integrals by interval lengths, which gives
a higher rate of convergence of the residuals by the factor of h**0.5.
I chose to do such normalization for an ease of interpretation of return
values as RMS estimates.
Returns
-------
rms_res : ndarray, shape (m - 1,)
Estimated rms values of the relative residuals over each interval.
References
----------
.. [1] http://mathworld.wolfram.com/LobattoQuadrature.html
.. [2] J. Kierzenka, L. F. Shampine, "A BVP Solver Based on Residual
Control and the Maltab PSE", ACM Trans. Math. Softw., Vol. 27,
Number 3, pp. 299-316, 2001.
"""
x_middle = x[:-1] + 0.5 * h
s = 0.5 * h * (3/7)**0.5
x1 = x_middle + s
x2 = x_middle - s
y1 = sol(x1)
y2 = sol(x2)
y1_prime = sol(x1, 1)
y2_prime = sol(x2, 1)
f1 = fun(x1, y1, p)
f2 = fun(x2, y2, p)
r1 = y1_prime - f1
r2 = y2_prime - f2
r_middle /= 1 + np.abs(f_middle)
r1 /= 1 + np.abs(f1)
r2 /= 1 + np.abs(f2)
r1 = np.sum(np.real(r1 * np.conj(r1)), axis=0)
r2 = np.sum(np.real(r2 * np.conj(r2)), axis=0)
r_middle = np.sum(np.real(r_middle * np.conj(r_middle)), axis=0)
return (0.5 * (32 / 45 * r_middle + 49 / 90 * (r1 + r2))) ** 0.5
def create_spline(y, yp, x, h):
"""Create a cubic spline given values and derivatives.
Formulas for the coefficients are taken from interpolate.CubicSpline.
Returns
-------
sol : PPoly
Constructed spline as a PPoly instance.
"""
from scipy.interpolate import PPoly
n, m = y.shape
c = np.empty((4, n, m - 1), dtype=y.dtype)
slope = (y[:, 1:] - y[:, :-1]) / h
t = (yp[:, :-1] + yp[:, 1:] - 2 * slope) / h
c[0] = t / h
c[1] = (slope - yp[:, :-1]) / h - t
c[2] = yp[:, :-1]
c[3] = y[:, :-1]
c = np.moveaxis(c, 1, 0)
return PPoly(c, x, extrapolate=True, axis=1)
def modify_mesh(x, insert_1, insert_2):
"""Insert nodes into a mesh.
Nodes removal logic is not established, its impact on the solver is
presumably negligible. So, only insertion is done in this function.
Parameters
----------
x : ndarray, shape (m,)
Mesh nodes.
insert_1 : ndarray
Intervals to each insert 1 new node in the middle.
insert_2 : ndarray
Intervals to each insert 2 new nodes, such that divide an interval
into 3 equal parts.
Returns
-------
x_new : ndarray
New mesh nodes.
Notes
-----
`insert_1` and `insert_2` should not have common values.
"""
# Because np.insert implementation apparently varies with a version of
# NumPy, we use a simple and reliable approach with sorting.
return np.sort(np.hstack((
x,
0.5 * (x[insert_1] + x[insert_1 + 1]),
(2 * x[insert_2] + x[insert_2 + 1]) / 3,
(x[insert_2] + 2 * x[insert_2 + 1]) / 3
)))
def wrap_functions(fun, bc, fun_jac, bc_jac, k, a, S, D, dtype):
"""Wrap functions for unified usage in the solver."""
if fun_jac is None:
fun_jac_wrapped = None
if bc_jac is None:
bc_jac_wrapped = None
if k == 0:
def fun_p(x, y, _):
return np.asarray(fun(x, y), dtype)
def bc_wrapped(ya, yb, _):
return np.asarray(bc(ya, yb), dtype)
if fun_jac is not None:
def fun_jac_p(x, y, _):
return np.asarray(fun_jac(x, y), dtype), None
if bc_jac is not None:
def bc_jac_wrapped(ya, yb, _):
dbc_dya, dbc_dyb = bc_jac(ya, yb)
return (np.asarray(dbc_dya, dtype),
np.asarray(dbc_dyb, dtype), None)
else:
def fun_p(x, y, p):
return np.asarray(fun(x, y, p), dtype)
def bc_wrapped(x, y, p):
return np.asarray(bc(x, y, p), dtype)
if fun_jac is not None:
def fun_jac_p(x, y, p):
df_dy, df_dp = fun_jac(x, y, p)
return np.asarray(df_dy, dtype), np.asarray(df_dp, dtype)
if bc_jac is not None:
def bc_jac_wrapped(ya, yb, p):
dbc_dya, dbc_dyb, dbc_dp = bc_jac(ya, yb, p)
return (np.asarray(dbc_dya, dtype), np.asarray(dbc_dyb, dtype),
np.asarray(dbc_dp, dtype))
if S is None:
fun_wrapped = fun_p
else:
def fun_wrapped(x, y, p):
f = fun_p(x, y, p)
if x[0] == a:
f[:, 0] = np.dot(D, f[:, 0])
f[:, 1:] += np.dot(S, y[:, 1:]) / (x[1:] - a)
else:
f += np.dot(S, y) / (x - a)
return f
if fun_jac is not None:
if S is None:
fun_jac_wrapped = fun_jac_p
else:
Sr = S[:, :, np.newaxis]
def fun_jac_wrapped(x, y, p):
df_dy, df_dp = fun_jac_p(x, y, p)
if x[0] == a:
df_dy[:, :, 0] = np.dot(D, df_dy[:, :, 0])
df_dy[:, :, 1:] += Sr / (x[1:] - a)
else:
df_dy += Sr / (x - a)
return df_dy, df_dp
return fun_wrapped, bc_wrapped, fun_jac_wrapped, bc_jac_wrapped
def solve_bvp(fun, bc, x, y, p=None, S=None, fun_jac=None, bc_jac=None,
tol=1e-3, max_nodes=1000, verbose=0, bc_tol=None):
"""Solve a boundary value problem for a system of ODEs.
This function numerically solves a first order system of ODEs subject to
two-point boundary conditions::
dy / dx = f(x, y, p) + S * y / (x - a), a <= x <= b
bc(y(a), y(b), p) = 0
Here x is a 1-D independent variable, y(x) is an N-D
vector-valued function and p is a k-D vector of unknown
parameters which is to be found along with y(x). For the problem to be
determined, there must be n + k boundary conditions, i.e., bc must be an
(n + k)-D function.
The last singular term on the right-hand side of the system is optional.
It is defined by an n-by-n matrix S, such that the solution must satisfy
S y(a) = 0. This condition will be forced during iterations, so it must not
contradict boundary conditions. See [2]_ for the explanation how this term
is handled when solving BVPs numerically.
Problems in a complex domain can be solved as well. In this case, y and p
are considered to be complex, and f and bc are assumed to be complex-valued
functions, but x stays real. Note that f and bc must be complex
differentiable (satisfy Cauchy-Riemann equations [4]_), otherwise you
should rewrite your problem for real and imaginary parts separately. To
solve a problem in a complex domain, pass an initial guess for y with a
complex data type (see below).
Parameters
----------
fun : callable
Right-hand side of the system. The calling signature is ``fun(x, y)``,
or ``fun(x, y, p)`` if parameters are present. All arguments are
ndarray: ``x`` with shape (m,), ``y`` with shape (n, m), meaning that
``y[:, i]`` corresponds to ``x[i]``, and ``p`` with shape (k,). The
return value must be an array with shape (n, m) and with the same
layout as ``y``.
bc : callable
Function evaluating residuals of the boundary conditions. The calling
signature is ``bc(ya, yb)``, or ``bc(ya, yb, p)`` if parameters are
present. All arguments are ndarray: ``ya`` and ``yb`` with shape (n,),
and ``p`` with shape (k,). The return value must be an array with
shape (n + k,).
x : array_like, shape (m,)
Initial mesh. Must be a strictly increasing sequence of real numbers
with ``x[0]=a`` and ``x[-1]=b``.
y : array_like, shape (n, m)
Initial guess for the function values at the mesh nodes, ith column
corresponds to ``x[i]``. For problems in a complex domain pass `y`
with a complex data type (even if the initial guess is purely real).
p : array_like with shape (k,) or None, optional
Initial guess for the unknown parameters. If None (default), it is
assumed that the problem doesn't depend on any parameters.
S : array_like with shape (n, n) or None
Matrix defining the singular term. If None (default), the problem is
solved without the singular term.
fun_jac : callable or None, optional
Function computing derivatives of f with respect to y and p. The
calling signature is ``fun_jac(x, y)``, or ``fun_jac(x, y, p)`` if
parameters are present. The return must contain 1 or 2 elements in the
following order:
* df_dy : array_like with shape (n, n, m), where an element
(i, j, q) equals to d f_i(x_q, y_q, p) / d (y_q)_j.
* df_dp : array_like with shape (n, k, m), where an element
(i, j, q) equals to d f_i(x_q, y_q, p) / d p_j.
Here q numbers nodes at which x and y are defined, whereas i and j
number vector components. If the problem is solved without unknown
parameters, df_dp should not be returned.
If `fun_jac` is None (default), the derivatives will be estimated
by the forward finite differences.
bc_jac : callable or None, optional
Function computing derivatives of bc with respect to ya, yb, and p.
The calling signature is ``bc_jac(ya, yb)``, or ``bc_jac(ya, yb, p)``
if parameters are present. The return must contain 2 or 3 elements in
the following order:
* dbc_dya : array_like with shape (n, n), where an element (i, j)
equals to d bc_i(ya, yb, p) / d ya_j.
* dbc_dyb : array_like with shape (n, n), where an element (i, j)
equals to d bc_i(ya, yb, p) / d yb_j.
* dbc_dp : array_like with shape (n, k), where an element (i, j)
equals to d bc_i(ya, yb, p) / d p_j.
If the problem is solved without unknown parameters, dbc_dp should not
be returned.
If `bc_jac` is None (default), the derivatives will be estimated by
the forward finite differences.
tol : float, optional
Desired tolerance of the solution. If we define ``r = y' - f(x, y)``,
where y is the found solution, then the solver tries to achieve on each
mesh interval ``norm(r / (1 + abs(f)) < tol``, where ``norm`` is
estimated in a root mean squared sense (using a numerical quadrature
formula). Default is 1e-3.
max_nodes : int, optional
Maximum allowed number of the mesh nodes. If exceeded, the algorithm
terminates. Default is 1000.
verbose : {0, 1, 2}, optional
Level of algorithm's verbosity:
* 0 (default) : work silently.
* 1 : display a termination report.
* 2 : display progress during iterations.
bc_tol : float, optional
Desired absolute tolerance for the boundary condition residuals: `bc`
value should satisfy ``abs(bc) < bc_tol`` component-wise.
Equals to `tol` by default. Up to 10 iterations are allowed to achieve this
tolerance.
Returns
-------
Bunch object with the following fields defined:
sol : PPoly
Found solution for y as `scipy.interpolate.PPoly` instance, a C1
continuous cubic spline.
p : ndarray or None, shape (k,)
Found parameters. None, if the parameters were not present in the
problem.
x : ndarray, shape (m,)
Nodes of the final mesh.
y : ndarray, shape (n, m)
Solution values at the mesh nodes.
yp : ndarray, shape (n, m)
Solution derivatives at the mesh nodes.
rms_residuals : ndarray, shape (m - 1,)
RMS values of the relative residuals over each mesh interval (see the
description of `tol` parameter).
niter : int
Number of completed iterations.
status : int
Reason for algorithm termination:
* 0: The algorithm converged to the desired accuracy.
* 1: The maximum number of mesh nodes is exceeded.
* 2: A singular Jacobian encountered when solving the collocation
system.
message : string
Verbal description of the termination reason.
success : bool
True if the algorithm converged to the desired accuracy (``status=0``).
Notes
-----
This function implements a 4th order collocation algorithm with the
control of residuals similar to [1]_. A collocation system is solved
by a damped Newton method with an affine-invariant criterion function as
described in [3]_.
Note that in [1]_ integral residuals are defined without normalization
by interval lengths. So, their definition is different by a multiplier of
h**0.5 (h is an interval length) from the definition used here.
.. versionadded:: 0.18.0
References
----------
.. [1] J. Kierzenka, L. F. Shampine, "A BVP Solver Based on Residual
Control and the Maltab PSE", ACM Trans. Math. Softw., Vol. 27,
Number 3, pp. 299-316, 2001.
.. [2] L.F. Shampine, P. H. Muir and H. Xu, "A User-Friendly Fortran BVP
Solver".
.. [3] U. Ascher, R. Mattheij and R. Russell "Numerical Solution of
Boundary Value Problems for Ordinary Differential Equations".
.. [4] `Cauchy-Riemann equations
<https://en.wikipedia.org/wiki/Cauchy-Riemann_equations>`_ on
Wikipedia.
Examples
--------
In the first example, we solve Bratu's problem::
y'' + k * exp(y) = 0
y(0) = y(1) = 0
for k = 1.
We rewrite the equation as a first-order system and implement its
right-hand side evaluation::
y1' = y2
y2' = -exp(y1)
>>> import numpy as np
>>> def fun(x, y):
... return np.vstack((y[1], -np.exp(y[0])))
Implement evaluation of the boundary condition residuals:
>>> def bc(ya, yb):
... return np.array([ya[0], yb[0]])
Define the initial mesh with 5 nodes:
>>> x = np.linspace(0, 1, 5)
This problem is known to have two solutions. To obtain both of them, we
use two different initial guesses for y. We denote them by subscripts
a and b.
>>> y_a = np.zeros((2, x.size))
>>> y_b = np.zeros((2, x.size))
>>> y_b[0] = 3
Now we are ready to run the solver.
>>> from scipy.integrate import solve_bvp
>>> res_a = solve_bvp(fun, bc, x, y_a)
>>> res_b = solve_bvp(fun, bc, x, y_b)
Let's plot the two found solutions. We take an advantage of having the
solution in a spline form to produce a smooth plot.
>>> x_plot = np.linspace(0, 1, 100)
>>> y_plot_a = res_a.sol(x_plot)[0]
>>> y_plot_b = res_b.sol(x_plot)[0]
>>> import matplotlib.pyplot as plt
>>> plt.plot(x_plot, y_plot_a, label='y_a')
>>> plt.plot(x_plot, y_plot_b, label='y_b')
>>> plt.legend()
>>> plt.xlabel("x")
>>> plt.ylabel("y")
>>> plt.show()
We see that the two solutions have similar shape, but differ in scale
significantly.
In the second example, we solve a simple Sturm-Liouville problem::
y'' + k**2 * y = 0
y(0) = y(1) = 0
It is known that a non-trivial solution y = A * sin(k * x) is possible for
k = pi * n, where n is an integer. To establish the normalization constant
A = 1 we add a boundary condition::
y'(0) = k
Again, we rewrite our equation as a first-order system and implement its
right-hand side evaluation::
y1' = y2
y2' = -k**2 * y1
>>> def fun(x, y, p):
... k = p[0]
... return np.vstack((y[1], -k**2 * y[0]))
Note that parameters p are passed as a vector (with one element in our
case).
Implement the boundary conditions:
>>> def bc(ya, yb, p):
... k = p[0]
... return np.array([ya[0], yb[0], ya[1] - k])
Set up the initial mesh and guess for y. We aim to find the solution for
k = 2 * pi, to achieve that we set values of y to approximately follow
sin(2 * pi * x):
>>> x = np.linspace(0, 1, 5)
>>> y = np.zeros((2, x.size))
>>> y[0, 1] = 1
>>> y[0, 3] = -1
Run the solver with 6 as an initial guess for k.
>>> sol = solve_bvp(fun, bc, x, y, p=[6])
We see that the found k is approximately correct:
>>> sol.p[0]
6.28329460046
And, finally, plot the solution to see the anticipated sinusoid:
>>> x_plot = np.linspace(0, 1, 100)
>>> y_plot = sol.sol(x_plot)[0]
>>> plt.plot(x_plot, y_plot)
>>> plt.xlabel("x")
>>> plt.ylabel("y")
>>> plt.show()
"""
x = np.asarray(x, dtype=float)
if x.ndim != 1:
raise ValueError("`x` must be 1 dimensional.")
h = np.diff(x)
if np.any(h <= 0):
raise ValueError("`x` must be strictly increasing.")
a = x[0]
y = np.asarray(y)
if np.issubdtype(y.dtype, np.complexfloating):
dtype = complex
else:
dtype = float
y = y.astype(dtype, copy=False)
if y.ndim != 2:
raise ValueError("`y` must be 2 dimensional.")
if y.shape[1] != x.shape[0]:
raise ValueError("`y` is expected to have {} columns, but actually "
"has {}.".format(x.shape[0], y.shape[1]))
if p is None:
p = np.array([])
else:
p = np.asarray(p, dtype=dtype)
if p.ndim != 1:
raise ValueError("`p` must be 1 dimensional.")
if tol < 100 * EPS:
warn(f"`tol` is too low, setting to {100 * EPS:.2e}")
tol = 100 * EPS
if verbose not in [0, 1, 2]:
raise ValueError("`verbose` must be in [0, 1, 2].")
n = y.shape[0]
k = p.shape[0]
if S is not None:
S = np.asarray(S, dtype=dtype)
if S.shape != (n, n):
raise ValueError("`S` is expected to have shape {}, "
"but actually has {}".format((n, n), S.shape))
# Compute I - S^+ S to impose necessary boundary conditions.
B = np.identity(n) - np.dot(pinv(S), S)
y[:, 0] = np.dot(B, y[:, 0])
# Compute (I - S)^+ to correct derivatives at x=a.
D = pinv(np.identity(n) - S)
else:
B = None
D = None
if bc_tol is None:
bc_tol = tol
# Maximum number of iterations
max_iteration = 10
fun_wrapped, bc_wrapped, fun_jac_wrapped, bc_jac_wrapped = wrap_functions(
fun, bc, fun_jac, bc_jac, k, a, S, D, dtype)
f = fun_wrapped(x, y, p)
if f.shape != y.shape:
raise ValueError("`fun` return is expected to have shape {}, "
"but actually has {}.".format(y.shape, f.shape))
bc_res = bc_wrapped(y[:, 0], y[:, -1], p)
if bc_res.shape != (n + k,):
raise ValueError("`bc` return is expected to have shape {}, "
"but actually has {}.".format((n + k,), bc_res.shape))
status = 0
iteration = 0
if verbose == 2:
print_iteration_header()
while True:
m = x.shape[0]
col_fun, jac_sys = prepare_sys(n, m, k, fun_wrapped, bc_wrapped,
fun_jac_wrapped, bc_jac_wrapped, x, h)
y, p, singular = solve_newton(n, m, h, col_fun, bc_wrapped, jac_sys,
y, p, B, tol, bc_tol)
iteration += 1
col_res, y_middle, f, f_middle = collocation_fun(fun_wrapped, y,
p, x, h)
bc_res = bc_wrapped(y[:, 0], y[:, -1], p)
max_bc_res = np.max(abs(bc_res))
# This relation is not trivial, but can be verified.
r_middle = 1.5 * col_res / h
sol = create_spline(y, f, x, h)
rms_res = estimate_rms_residuals(fun_wrapped, sol, x, h, p,
r_middle, f_middle)
max_rms_res = np.max(rms_res)
if singular:
status = 2
break
insert_1, = np.nonzero((rms_res > tol) & (rms_res < 100 * tol))
insert_2, = np.nonzero(rms_res >= 100 * tol)
nodes_added = insert_1.shape[0] + 2 * insert_2.shape[0]
if m + nodes_added > max_nodes:
status = 1
if verbose == 2:
nodes_added = f"({nodes_added})"
print_iteration_progress(iteration, max_rms_res, max_bc_res,
m, nodes_added)
break
if verbose == 2:
print_iteration_progress(iteration, max_rms_res, max_bc_res, m,
nodes_added)
if nodes_added > 0:
x = modify_mesh(x, insert_1, insert_2)
h = np.diff(x)
y = sol(x)
elif max_bc_res <= bc_tol:
status = 0
break
elif iteration >= max_iteration:
status = 3
break
if verbose > 0:
if status == 0:
print("Solved in {} iterations, number of nodes {}. \n"
"Maximum relative residual: {:.2e} \n"
"Maximum boundary residual: {:.2e}"
.format(iteration, x.shape[0], max_rms_res, max_bc_res))
elif status == 1:
print("Number of nodes is exceeded after iteration {}. \n"
"Maximum relative residual: {:.2e} \n"
"Maximum boundary residual: {:.2e}"
.format(iteration, max_rms_res, max_bc_res))
elif status == 2:
print("Singular Jacobian encountered when solving the collocation "
"system on iteration {}. \n"
"Maximum relative residual: {:.2e} \n"
"Maximum boundary residual: {:.2e}"
.format(iteration, max_rms_res, max_bc_res))
elif status == 3:
print("The solver was unable to satisfy boundary conditions "
"tolerance on iteration {}. \n"
"Maximum relative residual: {:.2e} \n"
"Maximum boundary residual: {:.2e}"
.format(iteration, max_rms_res, max_bc_res))
if p.size == 0:
p = None
return BVPResult(sol=sol, p=p, x=x, y=y, yp=f, rms_residuals=rms_res,
niter=iteration, status=status,
message=TERMINATION_MESSAGES[status], success=status == 0)
| 41,067
| 34.403448
| 88
|
py
|
scipy
|
scipy-main/scipy/integrate/vode.py
|
# This file is not meant for public use and will be removed in SciPy v2.0.0.
import warnings
from . import _vode # type: ignore
__all__ = [ # noqa: F822
'dvode',
'zvode'
]
def __dir__():
return __all__
def __getattr__(name):
if name not in __all__:
raise AttributeError(
"scipy.integrate.vode is deprecated and has no attribute "
f"{name}.")
warnings.warn("The `scipy.integrate.vode` namespace is deprecated "
"and will be removed in SciPy v2.0.0.",
category=DeprecationWarning, stacklevel=2)
return getattr(_vode, name)
| 625
| 20.586207
| 76
|
py
|
scipy
|
scipy-main/scipy/integrate/_quadpack_py.py
|
# Author: Travis Oliphant 2001
# Author: Nathan Woods 2013 (nquad &c)
import sys
import warnings
from functools import partial
from . import _quadpack
import numpy as np
__all__ = ["quad", "dblquad", "tplquad", "nquad", "IntegrationWarning"]
error = _quadpack.error
class IntegrationWarning(UserWarning):
"""
Warning on issues during integration.
"""
pass
def quad(func, a, b, args=(), full_output=0, epsabs=1.49e-8, epsrel=1.49e-8,
limit=50, points=None, weight=None, wvar=None, wopts=None, maxp1=50,
limlst=50, complex_func=False):
"""
Compute a definite integral.
Integrate func from `a` to `b` (possibly infinite interval) using a
technique from the Fortran library QUADPACK.
Parameters
----------
func : {function, scipy.LowLevelCallable}
A Python function or method to integrate. If `func` takes many
arguments, it is integrated along the axis corresponding to the
first argument.
If the user desires improved integration performance, then `f` may
be a `scipy.LowLevelCallable` with one of the signatures::
double func(double x)
double func(double x, void *user_data)
double func(int n, double *xx)
double func(int n, double *xx, void *user_data)
The ``user_data`` is the data contained in the `scipy.LowLevelCallable`.
In the call forms with ``xx``, ``n`` is the length of the ``xx``
array which contains ``xx[0] == x`` and the rest of the items are
numbers contained in the ``args`` argument of quad.
In addition, certain ctypes call signatures are supported for
backward compatibility, but those should not be used in new code.
a : float
Lower limit of integration (use -numpy.inf for -infinity).
b : float
Upper limit of integration (use numpy.inf for +infinity).
args : tuple, optional
Extra arguments to pass to `func`.
full_output : int, optional
Non-zero to return a dictionary of integration information.
If non-zero, warning messages are also suppressed and the
message is appended to the output tuple.
complex_func : bool, optional
Indicate if the function's (`func`) return type is real
(``complex_func=False``: default) or complex (``complex_func=True``).
In both cases, the function's argument is real.
If full_output is also non-zero, the `infodict`, `message`, and
`explain` for the real and complex components are returned in
a dictionary with keys "real output" and "imag output".
Returns
-------
y : float
The integral of func from `a` to `b`.
abserr : float
An estimate of the absolute error in the result.
infodict : dict
A dictionary containing additional information.
message
A convergence message.
explain
Appended only with 'cos' or 'sin' weighting and infinite
integration limits, it contains an explanation of the codes in
infodict['ierlst']
Other Parameters
----------------
epsabs : float or int, optional
Absolute error tolerance. Default is 1.49e-8. `quad` tries to obtain
an accuracy of ``abs(i-result) <= max(epsabs, epsrel*abs(i))``
where ``i`` = integral of `func` from `a` to `b`, and ``result`` is the
numerical approximation. See `epsrel` below.
epsrel : float or int, optional
Relative error tolerance. Default is 1.49e-8.
If ``epsabs <= 0``, `epsrel` must be greater than both 5e-29
and ``50 * (machine epsilon)``. See `epsabs` above.
limit : float or int, optional
An upper bound on the number of subintervals used in the adaptive
algorithm.
points : (sequence of floats,ints), optional
A sequence of break points in the bounded integration interval
where local difficulties of the integrand may occur (e.g.,
singularities, discontinuities). The sequence does not have
to be sorted. Note that this option cannot be used in conjunction
with ``weight``.
weight : float or int, optional
String indicating weighting function. Full explanation for this
and the remaining arguments can be found below.
wvar : optional
Variables for use with weighting functions.
wopts : optional
Optional input for reusing Chebyshev moments.
maxp1 : float or int, optional
An upper bound on the number of Chebyshev moments.
limlst : int, optional
Upper bound on the number of cycles (>=3) for use with a sinusoidal
weighting and an infinite end-point.
See Also
--------
dblquad : double integral
tplquad : triple integral
nquad : n-dimensional integrals (uses `quad` recursively)
fixed_quad : fixed-order Gaussian quadrature
quadrature : adaptive Gaussian quadrature
odeint : ODE integrator
ode : ODE integrator
simpson : integrator for sampled data
romb : integrator for sampled data
scipy.special : for coefficients and roots of orthogonal polynomials
Notes
-----
For valid results, the integral must converge; behavior for divergent
integrals is not guaranteed.
**Extra information for quad() inputs and outputs**
If full_output is non-zero, then the third output argument
(infodict) is a dictionary with entries as tabulated below. For
infinite limits, the range is transformed to (0,1) and the
optional outputs are given with respect to this transformed range.
Let M be the input argument limit and let K be infodict['last'].
The entries are:
'neval'
The number of function evaluations.
'last'
The number, K, of subintervals produced in the subdivision process.
'alist'
A rank-1 array of length M, the first K elements of which are the
left end points of the subintervals in the partition of the
integration range.
'blist'
A rank-1 array of length M, the first K elements of which are the
right end points of the subintervals.
'rlist'
A rank-1 array of length M, the first K elements of which are the
integral approximations on the subintervals.
'elist'
A rank-1 array of length M, the first K elements of which are the
moduli of the absolute error estimates on the subintervals.
'iord'
A rank-1 integer array of length M, the first L elements of
which are pointers to the error estimates over the subintervals
with ``L=K`` if ``K<=M/2+2`` or ``L=M+1-K`` otherwise. Let I be the
sequence ``infodict['iord']`` and let E be the sequence
``infodict['elist']``. Then ``E[I[1]], ..., E[I[L]]`` forms a
decreasing sequence.
If the input argument points is provided (i.e., it is not None),
the following additional outputs are placed in the output
dictionary. Assume the points sequence is of length P.
'pts'
A rank-1 array of length P+2 containing the integration limits
and the break points of the intervals in ascending order.
This is an array giving the subintervals over which integration
will occur.
'level'
A rank-1 integer array of length M (=limit), containing the
subdivision levels of the subintervals, i.e., if (aa,bb) is a
subinterval of ``(pts[1], pts[2])`` where ``pts[0]`` and ``pts[2]``
are adjacent elements of ``infodict['pts']``, then (aa,bb) has level l
if ``|bb-aa| = |pts[2]-pts[1]| * 2**(-l)``.
'ndin'
A rank-1 integer array of length P+2. After the first integration
over the intervals (pts[1], pts[2]), the error estimates over some
of the intervals may have been increased artificially in order to
put their subdivision forward. This array has ones in slots
corresponding to the subintervals for which this happens.
**Weighting the integrand**
The input variables, *weight* and *wvar*, are used to weight the
integrand by a select list of functions. Different integration
methods are used to compute the integral with these weighting
functions, and these do not support specifying break points. The
possible values of weight and the corresponding weighting functions are.
========== =================================== =====================
``weight`` Weight function used ``wvar``
========== =================================== =====================
'cos' cos(w*x) wvar = w
'sin' sin(w*x) wvar = w
'alg' g(x) = ((x-a)**alpha)*((b-x)**beta) wvar = (alpha, beta)
'alg-loga' g(x)*log(x-a) wvar = (alpha, beta)
'alg-logb' g(x)*log(b-x) wvar = (alpha, beta)
'alg-log' g(x)*log(x-a)*log(b-x) wvar = (alpha, beta)
'cauchy' 1/(x-c) wvar = c
========== =================================== =====================
wvar holds the parameter w, (alpha, beta), or c depending on the weight
selected. In these expressions, a and b are the integration limits.
For the 'cos' and 'sin' weighting, additional inputs and outputs are
available.
For finite integration limits, the integration is performed using a
Clenshaw-Curtis method which uses Chebyshev moments. For repeated
calculations, these moments are saved in the output dictionary:
'momcom'
The maximum level of Chebyshev moments that have been computed,
i.e., if ``M_c`` is ``infodict['momcom']`` then the moments have been
computed for intervals of length ``|b-a| * 2**(-l)``,
``l=0,1,...,M_c``.
'nnlog'
A rank-1 integer array of length M(=limit), containing the
subdivision levels of the subintervals, i.e., an element of this
array is equal to l if the corresponding subinterval is
``|b-a|* 2**(-l)``.
'chebmo'
A rank-2 array of shape (25, maxp1) containing the computed
Chebyshev moments. These can be passed on to an integration
over the same interval by passing this array as the second
element of the sequence wopts and passing infodict['momcom'] as
the first element.
If one of the integration limits is infinite, then a Fourier integral is
computed (assuming w neq 0). If full_output is 1 and a numerical error
is encountered, besides the error message attached to the output tuple,
a dictionary is also appended to the output tuple which translates the
error codes in the array ``info['ierlst']`` to English messages. The
output information dictionary contains the following entries instead of
'last', 'alist', 'blist', 'rlist', and 'elist':
'lst'
The number of subintervals needed for the integration (call it ``K_f``).
'rslst'
A rank-1 array of length M_f=limlst, whose first ``K_f`` elements
contain the integral contribution over the interval
``(a+(k-1)c, a+kc)`` where ``c = (2*floor(|w|) + 1) * pi / |w|``
and ``k=1,2,...,K_f``.
'erlst'
A rank-1 array of length ``M_f`` containing the error estimate
corresponding to the interval in the same position in
``infodict['rslist']``.
'ierlst'
A rank-1 integer array of length ``M_f`` containing an error flag
corresponding to the interval in the same position in
``infodict['rslist']``. See the explanation dictionary (last entry
in the output tuple) for the meaning of the codes.
**Details of QUADPACK level routines**
`quad` calls routines from the FORTRAN library QUADPACK. This section
provides details on the conditions for each routine to be called and a
short description of each routine. The routine called depends on
`weight`, `points` and the integration limits `a` and `b`.
================ ============== ========== =====================
QUADPACK routine `weight` `points` infinite bounds
================ ============== ========== =====================
qagse None No No
qagie None No Yes
qagpe None Yes No
qawoe 'sin', 'cos' No No
qawfe 'sin', 'cos' No either `a` or `b`
qawse 'alg*' No No
qawce 'cauchy' No No
================ ============== ========== =====================
The following provides a short desciption from [1]_ for each
routine.
qagse
is an integrator based on globally adaptive interval
subdivision in connection with extrapolation, which will
eliminate the effects of integrand singularities of
several types.
qagie
handles integration over infinite intervals. The infinite range is
mapped onto a finite interval and subsequently the same strategy as
in ``QAGS`` is applied.
qagpe
serves the same purposes as QAGS, but also allows the
user to provide explicit information about the location
and type of trouble-spots i.e. the abscissae of internal
singularities, discontinuities and other difficulties of
the integrand function.
qawoe
is an integrator for the evaluation of
:math:`\\int^b_a \\cos(\\omega x)f(x)dx` or
:math:`\\int^b_a \\sin(\\omega x)f(x)dx`
over a finite interval [a,b], where :math:`\\omega` and :math:`f`
are specified by the user. The rule evaluation component is based
on the modified Clenshaw-Curtis technique
An adaptive subdivision scheme is used in connection
with an extrapolation procedure, which is a modification
of that in ``QAGS`` and allows the algorithm to deal with
singularities in :math:`f(x)`.
qawfe
calculates the Fourier transform
:math:`\\int^\\infty_a \\cos(\\omega x)f(x)dx` or
:math:`\\int^\\infty_a \\sin(\\omega x)f(x)dx`
for user-provided :math:`\\omega` and :math:`f`. The procedure of
``QAWO`` is applied on successive finite intervals, and convergence
acceleration by means of the :math:`\\varepsilon`-algorithm is applied
to the series of integral approximations.
qawse
approximate :math:`\\int^b_a w(x)f(x)dx`, with :math:`a < b` where
:math:`w(x) = (x-a)^{\\alpha}(b-x)^{\\beta}v(x)` with
:math:`\\alpha,\\beta > -1`, where :math:`v(x)` may be one of the
following functions: :math:`1`, :math:`\\log(x-a)`, :math:`\\log(b-x)`,
:math:`\\log(x-a)\\log(b-x)`.
The user specifies :math:`\\alpha`, :math:`\\beta` and the type of the
function :math:`v`. A globally adaptive subdivision strategy is
applied, with modified Clenshaw-Curtis integration on those
subintervals which contain `a` or `b`.
qawce
compute :math:`\\int^b_a f(x) / (x-c)dx` where the integral must be
interpreted as a Cauchy principal value integral, for user specified
:math:`c` and :math:`f`. The strategy is globally adaptive. Modified
Clenshaw-Curtis integration is used on those intervals containing the
point :math:`x = c`.
**Integration of Complex Function of a Real Variable**
A complex valued function, :math:`f`, of a real variable can be written as
:math:`f = g + ih`. Similarly, the integral of :math:`f` can be
written as
.. math::
\\int_a^b f(x) dx = \\int_a^b g(x) dx + i\\int_a^b h(x) dx
assuming that the integrals of :math:`g` and :math:`h` exist
over the inteval :math:`[a,b]` [2]_. Therefore, ``quad`` integrates
complex-valued functions by integrating the real and imaginary components
separately.
References
----------
.. [1] Piessens, Robert; de Doncker-Kapenga, Elise;
Überhuber, Christoph W.; Kahaner, David (1983).
QUADPACK: A subroutine package for automatic integration.
Springer-Verlag.
ISBN 978-3-540-12553-2.
.. [2] McCullough, Thomas; Phillips, Keith (1973).
Foundations of Analysis in the Complex Plane.
Holt Rinehart Winston.
ISBN 0-03-086370-8
Examples
--------
Calculate :math:`\\int^4_0 x^2 dx` and compare with an analytic result
>>> from scipy import integrate
>>> import numpy as np
>>> x2 = lambda x: x**2
>>> integrate.quad(x2, 0, 4)
(21.333333333333332, 2.3684757858670003e-13)
>>> print(4**3 / 3.) # analytical result
21.3333333333
Calculate :math:`\\int^\\infty_0 e^{-x} dx`
>>> invexp = lambda x: np.exp(-x)
>>> integrate.quad(invexp, 0, np.inf)
(1.0, 5.842605999138044e-11)
Calculate :math:`\\int^1_0 a x \\,dx` for :math:`a = 1, 3`
>>> f = lambda x, a: a*x
>>> y, err = integrate.quad(f, 0, 1, args=(1,))
>>> y
0.5
>>> y, err = integrate.quad(f, 0, 1, args=(3,))
>>> y
1.5
Calculate :math:`\\int^1_0 x^2 + y^2 dx` with ctypes, holding
y parameter as 1::
testlib.c =>
double func(int n, double args[n]){
return args[0]*args[0] + args[1]*args[1];}
compile to library testlib.*
::
from scipy import integrate
import ctypes
lib = ctypes.CDLL('/home/.../testlib.*') #use absolute path
lib.func.restype = ctypes.c_double
lib.func.argtypes = (ctypes.c_int,ctypes.c_double)
integrate.quad(lib.func,0,1,(1))
#(1.3333333333333333, 1.4802973661668752e-14)
print((1.0**3/3.0 + 1.0) - (0.0**3/3.0 + 0.0)) #Analytic result
# 1.3333333333333333
Be aware that pulse shapes and other sharp features as compared to the
size of the integration interval may not be integrated correctly using
this method. A simplified example of this limitation is integrating a
y-axis reflected step function with many zero values within the integrals
bounds.
>>> y = lambda x: 1 if x<=0 else 0
>>> integrate.quad(y, -1, 1)
(1.0, 1.1102230246251565e-14)
>>> integrate.quad(y, -1, 100)
(1.0000000002199108, 1.0189464580163188e-08)
>>> integrate.quad(y, -1, 10000)
(0.0, 0.0)
"""
if not isinstance(args, tuple):
args = (args,)
# check the limits of integration: \int_a^b, expect a < b
flip, a, b = b < a, min(a, b), max(a, b)
if complex_func:
def imfunc(x, *args):
return np.imag(func(x, *args))
def refunc(x, *args):
return np.real(func(x, *args))
re_retval = quad(refunc, a, b, args, full_output, epsabs,
epsrel, limit, points, weight, wvar, wopts,
maxp1, limlst, complex_func=False)
im_retval = quad(imfunc, a, b, args, full_output, epsabs,
epsrel, limit, points, weight, wvar, wopts,
maxp1, limlst, complex_func=False)
integral = re_retval[0] + 1j*im_retval[0]
error_estimate = re_retval[1] + 1j*im_retval[1]
retval = integral, error_estimate
if full_output:
msgexp = {}
msgexp["real"] = re_retval[2:]
msgexp["imag"] = im_retval[2:]
retval = retval + (msgexp,)
return retval
if weight is None:
retval = _quad(func, a, b, args, full_output, epsabs, epsrel, limit,
points)
else:
if points is not None:
msg = ("Break points cannot be specified when using weighted integrand.\n"
"Continuing, ignoring specified points.")
warnings.warn(msg, IntegrationWarning, stacklevel=2)
retval = _quad_weight(func, a, b, args, full_output, epsabs, epsrel,
limlst, limit, maxp1, weight, wvar, wopts)
if flip:
retval = (-retval[0],) + retval[1:]
ier = retval[-1]
if ier == 0:
return retval[:-1]
msgs = {80: "A Python error occurred possibly while calling the function.",
1: "The maximum number of subdivisions (%d) has been achieved.\n If increasing the limit yields no improvement it is advised to analyze \n the integrand in order to determine the difficulties. If the position of a \n local difficulty can be determined (singularity, discontinuity) one will \n probably gain from splitting up the interval and calling the integrator \n on the subranges. Perhaps a special-purpose integrator should be used." % limit,
2: "The occurrence of roundoff error is detected, which prevents \n the requested tolerance from being achieved. The error may be \n underestimated.",
3: "Extremely bad integrand behavior occurs at some points of the\n integration interval.",
4: "The algorithm does not converge. Roundoff error is detected\n in the extrapolation table. It is assumed that the requested tolerance\n cannot be achieved, and that the returned result (if full_output = 1) is \n the best which can be obtained.",
5: "The integral is probably divergent, or slowly convergent.",
6: "The input is invalid.",
7: "Abnormal termination of the routine. The estimates for result\n and error are less reliable. It is assumed that the requested accuracy\n has not been achieved.",
'unknown': "Unknown error."}
if weight in ['cos','sin'] and (b == np.inf or a == -np.inf):
msgs[1] = "The maximum number of cycles allowed has been achieved., e.e.\n of subintervals (a+(k-1)c, a+kc) where c = (2*int(abs(omega)+1))\n *pi/abs(omega), for k = 1, 2, ..., lst. One can allow more cycles by increasing the value of limlst. Look at info['ierlst'] with full_output=1."
msgs[4] = "The extrapolation table constructed for convergence acceleration\n of the series formed by the integral contributions over the cycles, \n does not converge to within the requested accuracy. Look at \n info['ierlst'] with full_output=1."
msgs[7] = "Bad integrand behavior occurs within one or more of the cycles.\n Location and type of the difficulty involved can be determined from \n the vector info['ierlist'] obtained with full_output=1."
explain = {1: "The maximum number of subdivisions (= limit) has been \n achieved on this cycle.",
2: "The occurrence of roundoff error is detected and prevents\n the tolerance imposed on this cycle from being achieved.",
3: "Extremely bad integrand behavior occurs at some points of\n this cycle.",
4: "The integral over this cycle does not converge (to within the required accuracy) due to roundoff in the extrapolation procedure invoked on this cycle. It is assumed that the result on this interval is the best which can be obtained.",
5: "The integral over this cycle is probably divergent or slowly convergent."}
try:
msg = msgs[ier]
except KeyError:
msg = msgs['unknown']
if ier in [1,2,3,4,5,7]:
if full_output:
if weight in ['cos', 'sin'] and (b == np.inf or a == -np.inf):
return retval[:-1] + (msg, explain)
else:
return retval[:-1] + (msg,)
else:
warnings.warn(msg, IntegrationWarning, stacklevel=2)
return retval[:-1]
elif ier == 6: # Forensic decision tree when QUADPACK throws ier=6
if epsabs <= 0: # Small error tolerance - applies to all methods
if epsrel < max(50 * sys.float_info.epsilon, 5e-29):
msg = ("If 'epsabs'<=0, 'epsrel' must be greater than both"
" 5e-29 and 50*(machine epsilon).")
elif weight in ['sin', 'cos'] and (abs(a) + abs(b) == np.inf):
msg = ("Sine or cosine weighted intergals with infinite domain"
" must have 'epsabs'>0.")
elif weight is None:
if points is None: # QAGSE/QAGIE
msg = ("Invalid 'limit' argument. There must be"
" at least one subinterval")
else: # QAGPE
if not (min(a, b) <= min(points) <= max(points) <= max(a, b)):
msg = ("All break points in 'points' must lie within the"
" integration limits.")
elif len(points) >= limit:
msg = ("Number of break points ({:d})"
" must be less than subinterval"
" limit ({:d})").format(len(points), limit)
else:
if maxp1 < 1:
msg = "Chebyshev moment limit maxp1 must be >=1."
elif weight in ('cos', 'sin') and abs(a+b) == np.inf: # QAWFE
msg = "Cycle limit limlst must be >=3."
elif weight.startswith('alg'): # QAWSE
if min(wvar) < -1:
msg = "wvar parameters (alpha, beta) must both be >= -1."
if b < a:
msg = "Integration limits a, b must satistfy a<b."
elif weight == 'cauchy' and wvar in (a, b):
msg = ("Parameter 'wvar' must not equal"
" integration limits 'a' or 'b'.")
raise ValueError(msg)
def _quad(func,a,b,args,full_output,epsabs,epsrel,limit,points):
infbounds = 0
if (b != np.inf and a != -np.inf):
pass # standard integration
elif (b == np.inf and a != -np.inf):
infbounds = 1
bound = a
elif (b == np.inf and a == -np.inf):
infbounds = 2
bound = 0 # ignored
elif (b != np.inf and a == -np.inf):
infbounds = -1
bound = b
else:
raise RuntimeError("Infinity comparisons don't work for you.")
if points is None:
if infbounds == 0:
return _quadpack._qagse(func,a,b,args,full_output,epsabs,epsrel,limit)
else:
return _quadpack._qagie(func,bound,infbounds,args,full_output,epsabs,epsrel,limit)
else:
if infbounds != 0:
raise ValueError("Infinity inputs cannot be used with break points.")
else:
#Duplicates force function evaluation at singular points
the_points = np.unique(points)
the_points = the_points[a < the_points]
the_points = the_points[the_points < b]
the_points = np.concatenate((the_points, (0., 0.)))
return _quadpack._qagpe(func,a,b,the_points,args,full_output,epsabs,epsrel,limit)
def _quad_weight(func,a,b,args,full_output,epsabs,epsrel,limlst,limit,maxp1,weight,wvar,wopts):
if weight not in ['cos','sin','alg','alg-loga','alg-logb','alg-log','cauchy']:
raise ValueError("%s not a recognized weighting function." % weight)
strdict = {'cos':1,'sin':2,'alg':1,'alg-loga':2,'alg-logb':3,'alg-log':4}
if weight in ['cos','sin']:
integr = strdict[weight]
if (b != np.inf and a != -np.inf): # finite limits
if wopts is None: # no precomputed Chebyshev moments
return _quadpack._qawoe(func, a, b, wvar, integr, args, full_output,
epsabs, epsrel, limit, maxp1,1)
else: # precomputed Chebyshev moments
momcom = wopts[0]
chebcom = wopts[1]
return _quadpack._qawoe(func, a, b, wvar, integr, args, full_output,
epsabs, epsrel, limit, maxp1, 2, momcom, chebcom)
elif (b == np.inf and a != -np.inf):
return _quadpack._qawfe(func, a, wvar, integr, args, full_output,
epsabs,limlst,limit,maxp1)
elif (b != np.inf and a == -np.inf): # remap function and interval
if weight == 'cos':
def thefunc(x,*myargs):
y = -x
func = myargs[0]
myargs = (y,) + myargs[1:]
return func(*myargs)
else:
def thefunc(x,*myargs):
y = -x
func = myargs[0]
myargs = (y,) + myargs[1:]
return -func(*myargs)
args = (func,) + args
return _quadpack._qawfe(thefunc, -b, wvar, integr, args,
full_output, epsabs, limlst, limit, maxp1)
else:
raise ValueError("Cannot integrate with this weight from -Inf to +Inf.")
else:
if a in [-np.inf, np.inf] or b in [-np.inf, np.inf]:
raise ValueError("Cannot integrate with this weight over an infinite interval.")
if weight.startswith('alg'):
integr = strdict[weight]
return _quadpack._qawse(func, a, b, wvar, integr, args,
full_output, epsabs, epsrel, limit)
else: # weight == 'cauchy'
return _quadpack._qawce(func, a, b, wvar, args, full_output,
epsabs, epsrel, limit)
def dblquad(func, a, b, gfun, hfun, args=(), epsabs=1.49e-8, epsrel=1.49e-8):
"""
Compute a double integral.
Return the double (definite) integral of ``func(y, x)`` from ``x = a..b``
and ``y = gfun(x)..hfun(x)``.
Parameters
----------
func : callable
A Python function or method of at least two variables: y must be the
first argument and x the second argument.
a, b : float
The limits of integration in x: `a` < `b`
gfun : callable or float
The lower boundary curve in y which is a function taking a single
floating point argument (x) and returning a floating point result
or a float indicating a constant boundary curve.
hfun : callable or float
The upper boundary curve in y (same requirements as `gfun`).
args : sequence, optional
Extra arguments to pass to `func`.
epsabs : float, optional
Absolute tolerance passed directly to the inner 1-D quadrature
integration. Default is 1.49e-8. ``dblquad`` tries to obtain
an accuracy of ``abs(i-result) <= max(epsabs, epsrel*abs(i))``
where ``i`` = inner integral of ``func(y, x)`` from ``gfun(x)``
to ``hfun(x)``, and ``result`` is the numerical approximation.
See `epsrel` below.
epsrel : float, optional
Relative tolerance of the inner 1-D integrals. Default is 1.49e-8.
If ``epsabs <= 0``, `epsrel` must be greater than both 5e-29
and ``50 * (machine epsilon)``. See `epsabs` above.
Returns
-------
y : float
The resultant integral.
abserr : float
An estimate of the error.
See Also
--------
quad : single integral
tplquad : triple integral
nquad : N-dimensional integrals
fixed_quad : fixed-order Gaussian quadrature
quadrature : adaptive Gaussian quadrature
odeint : ODE integrator
ode : ODE integrator
simpson : integrator for sampled data
romb : integrator for sampled data
scipy.special : for coefficients and roots of orthogonal polynomials
Notes
-----
For valid results, the integral must converge; behavior for divergent
integrals is not guaranteed.
**Details of QUADPACK level routines**
`quad` calls routines from the FORTRAN library QUADPACK. This section
provides details on the conditions for each routine to be called and a
short description of each routine. For each level of integration, ``qagse``
is used for finite limits or ``qagie`` is used if either limit (or both!)
are infinite. The following provides a short description from [1]_ for each
routine.
qagse
is an integrator based on globally adaptive interval
subdivision in connection with extrapolation, which will
eliminate the effects of integrand singularities of
several types.
qagie
handles integration over infinite intervals. The infinite range is
mapped onto a finite interval and subsequently the same strategy as
in ``QAGS`` is applied.
References
----------
.. [1] Piessens, Robert; de Doncker-Kapenga, Elise;
Überhuber, Christoph W.; Kahaner, David (1983).
QUADPACK: A subroutine package for automatic integration.
Springer-Verlag.
ISBN 978-3-540-12553-2.
Examples
--------
Compute the double integral of ``x * y**2`` over the box
``x`` ranging from 0 to 2 and ``y`` ranging from 0 to 1.
That is, :math:`\\int^{x=2}_{x=0} \\int^{y=1}_{y=0} x y^2 \\,dy \\,dx`.
>>> import numpy as np
>>> from scipy import integrate
>>> f = lambda y, x: x*y**2
>>> integrate.dblquad(f, 0, 2, 0, 1)
(0.6666666666666667, 7.401486830834377e-15)
Calculate :math:`\\int^{x=\\pi/4}_{x=0} \\int^{y=\\cos(x)}_{y=\\sin(x)} 1
\\,dy \\,dx`.
>>> f = lambda y, x: 1
>>> integrate.dblquad(f, 0, np.pi/4, np.sin, np.cos)
(0.41421356237309503, 1.1083280054755938e-14)
Calculate :math:`\\int^{x=1}_{x=0} \\int^{y=2-x}_{y=x} a x y \\,dy \\,dx`
for :math:`a=1, 3`.
>>> f = lambda y, x, a: a*x*y
>>> integrate.dblquad(f, 0, 1, lambda x: x, lambda x: 2-x, args=(1,))
(0.33333333333333337, 5.551115123125783e-15)
>>> integrate.dblquad(f, 0, 1, lambda x: x, lambda x: 2-x, args=(3,))
(0.9999999999999999, 1.6653345369377348e-14)
Compute the two-dimensional Gaussian Integral, which is the integral of the
Gaussian function :math:`f(x,y) = e^{-(x^{2} + y^{2})}`, over
:math:`(-\\infty,+\\infty)`. That is, compute the integral
:math:`\\iint^{+\\infty}_{-\\infty} e^{-(x^{2} + y^{2})} \\,dy\\,dx`.
>>> f = lambda x, y: np.exp(-(x ** 2 + y ** 2))
>>> integrate.dblquad(f, -np.inf, np.inf, -np.inf, np.inf)
(3.141592653589777, 2.5173086737433208e-08)
"""
def temp_ranges(*args):
return [gfun(args[0]) if callable(gfun) else gfun,
hfun(args[0]) if callable(hfun) else hfun]
return nquad(func, [temp_ranges, [a, b]], args=args,
opts={"epsabs": epsabs, "epsrel": epsrel})
def tplquad(func, a, b, gfun, hfun, qfun, rfun, args=(), epsabs=1.49e-8,
epsrel=1.49e-8):
"""
Compute a triple (definite) integral.
Return the triple integral of ``func(z, y, x)`` from ``x = a..b``,
``y = gfun(x)..hfun(x)``, and ``z = qfun(x,y)..rfun(x,y)``.
Parameters
----------
func : function
A Python function or method of at least three variables in the
order (z, y, x).
a, b : float
The limits of integration in x: `a` < `b`
gfun : function or float
The lower boundary curve in y which is a function taking a single
floating point argument (x) and returning a floating point result
or a float indicating a constant boundary curve.
hfun : function or float
The upper boundary curve in y (same requirements as `gfun`).
qfun : function or float
The lower boundary surface in z. It must be a function that takes
two floats in the order (x, y) and returns a float or a float
indicating a constant boundary surface.
rfun : function or float
The upper boundary surface in z. (Same requirements as `qfun`.)
args : tuple, optional
Extra arguments to pass to `func`.
epsabs : float, optional
Absolute tolerance passed directly to the innermost 1-D quadrature
integration. Default is 1.49e-8.
epsrel : float, optional
Relative tolerance of the innermost 1-D integrals. Default is 1.49e-8.
Returns
-------
y : float
The resultant integral.
abserr : float
An estimate of the error.
See Also
--------
quad : Adaptive quadrature using QUADPACK
quadrature : Adaptive Gaussian quadrature
fixed_quad : Fixed-order Gaussian quadrature
dblquad : Double integrals
nquad : N-dimensional integrals
romb : Integrators for sampled data
simpson : Integrators for sampled data
ode : ODE integrators
odeint : ODE integrators
scipy.special : For coefficients and roots of orthogonal polynomials
Notes
-----
For valid results, the integral must converge; behavior for divergent
integrals is not guaranteed.
**Details of QUADPACK level routines**
`quad` calls routines from the FORTRAN library QUADPACK. This section
provides details on the conditions for each routine to be called and a
short description of each routine. For each level of integration, ``qagse``
is used for finite limits or ``qagie`` is used, if either limit (or both!)
are infinite. The following provides a short description from [1]_ for each
routine.
qagse
is an integrator based on globally adaptive interval
subdivision in connection with extrapolation, which will
eliminate the effects of integrand singularities of
several types.
qagie
handles integration over infinite intervals. The infinite range is
mapped onto a finite interval and subsequently the same strategy as
in ``QAGS`` is applied.
References
----------
.. [1] Piessens, Robert; de Doncker-Kapenga, Elise;
Überhuber, Christoph W.; Kahaner, David (1983).
QUADPACK: A subroutine package for automatic integration.
Springer-Verlag.
ISBN 978-3-540-12553-2.
Examples
--------
Compute the triple integral of ``x * y * z``, over ``x`` ranging
from 1 to 2, ``y`` ranging from 2 to 3, ``z`` ranging from 0 to 1.
That is, :math:`\\int^{x=2}_{x=1} \\int^{y=3}_{y=2} \\int^{z=1}_{z=0} x y z
\\,dz \\,dy \\,dx`.
>>> import numpy as np
>>> from scipy import integrate
>>> f = lambda z, y, x: x*y*z
>>> integrate.tplquad(f, 1, 2, 2, 3, 0, 1)
(1.8749999999999998, 3.3246447942574074e-14)
Calculate :math:`\\int^{x=1}_{x=0} \\int^{y=1-2x}_{y=0}
\\int^{z=1-x-2y}_{z=0} x y z \\,dz \\,dy \\,dx`.
Note: `qfun`/`rfun` takes arguments in the order (x, y), even though ``f``
takes arguments in the order (z, y, x).
>>> f = lambda z, y, x: x*y*z
>>> integrate.tplquad(f, 0, 1, 0, lambda x: 1-2*x, 0, lambda x, y: 1-x-2*y)
(0.05416666666666668, 2.1774196738157757e-14)
Calculate :math:`\\int^{x=1}_{x=0} \\int^{y=1}_{y=0} \\int^{z=1}_{z=0}
a x y z \\,dz \\,dy \\,dx` for :math:`a=1, 3`.
>>> f = lambda z, y, x, a: a*x*y*z
>>> integrate.tplquad(f, 0, 1, 0, 1, 0, 1, args=(1,))
(0.125, 5.527033708952211e-15)
>>> integrate.tplquad(f, 0, 1, 0, 1, 0, 1, args=(3,))
(0.375, 1.6581101126856635e-14)
Compute the three-dimensional Gaussian Integral, which is the integral of
the Gaussian function :math:`f(x,y,z) = e^{-(x^{2} + y^{2} + z^{2})}`, over
:math:`(-\\infty,+\\infty)`. That is, compute the integral
:math:`\\iiint^{+\\infty}_{-\\infty} e^{-(x^{2} + y^{2} + z^{2})} \\,dz
\\,dy\\,dx`.
>>> f = lambda x, y, z: np.exp(-(x ** 2 + y ** 2 + z ** 2))
>>> integrate.tplquad(f, -np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf)
(5.568327996830833, 4.4619078828029765e-08)
"""
# f(z, y, x)
# qfun/rfun(x, y)
# gfun/hfun(x)
# nquad will hand (y, x, t0, ...) to ranges0
# nquad will hand (x, t0, ...) to ranges1
# Only qfun / rfun is different API...
def ranges0(*args):
return [qfun(args[1], args[0]) if callable(qfun) else qfun,
rfun(args[1], args[0]) if callable(rfun) else rfun]
def ranges1(*args):
return [gfun(args[0]) if callable(gfun) else gfun,
hfun(args[0]) if callable(hfun) else hfun]
ranges = [ranges0, ranges1, [a, b]]
return nquad(func, ranges, args=args,
opts={"epsabs": epsabs, "epsrel": epsrel})
def nquad(func, ranges, args=None, opts=None, full_output=False):
r"""
Integration over multiple variables.
Wraps `quad` to enable integration over multiple variables.
Various options allow improved integration of discontinuous functions, as
well as the use of weighted integration, and generally finer control of the
integration process.
Parameters
----------
func : {callable, scipy.LowLevelCallable}
The function to be integrated. Has arguments of ``x0, ... xn``,
``t0, ... tm``, where integration is carried out over ``x0, ... xn``,
which must be floats. Where ``t0, ... tm`` are extra arguments
passed in args.
Function signature should be ``func(x0, x1, ..., xn, t0, t1, ..., tm)``.
Integration is carried out in order. That is, integration over ``x0``
is the innermost integral, and ``xn`` is the outermost.
If the user desires improved integration performance, then `f` may
be a `scipy.LowLevelCallable` with one of the signatures::
double func(int n, double *xx)
double func(int n, double *xx, void *user_data)
where ``n`` is the number of variables and args. The ``xx`` array
contains the coordinates and extra arguments. ``user_data`` is the data
contained in the `scipy.LowLevelCallable`.
ranges : iterable object
Each element of ranges may be either a sequence of 2 numbers, or else
a callable that returns such a sequence. ``ranges[0]`` corresponds to
integration over x0, and so on. If an element of ranges is a callable,
then it will be called with all of the integration arguments available,
as well as any parametric arguments. e.g., if
``func = f(x0, x1, x2, t0, t1)``, then ``ranges[0]`` may be defined as
either ``(a, b)`` or else as ``(a, b) = range0(x1, x2, t0, t1)``.
args : iterable object, optional
Additional arguments ``t0, ... tn``, required by ``func``, ``ranges``,
and ``opts``.
opts : iterable object or dict, optional
Options to be passed to `quad`. May be empty, a dict, or
a sequence of dicts or functions that return a dict. If empty, the
default options from scipy.integrate.quad are used. If a dict, the same
options are used for all levels of integraion. If a sequence, then each
element of the sequence corresponds to a particular integration. e.g.,
``opts[0]`` corresponds to integration over ``x0``, and so on. If a
callable, the signature must be the same as for ``ranges``. The
available options together with their default values are:
- epsabs = 1.49e-08
- epsrel = 1.49e-08
- limit = 50
- points = None
- weight = None
- wvar = None
- wopts = None
For more information on these options, see `quad`.
full_output : bool, optional
Partial implementation of ``full_output`` from scipy.integrate.quad.
The number of integrand function evaluations ``neval`` can be obtained
by setting ``full_output=True`` when calling nquad.
Returns
-------
result : float
The result of the integration.
abserr : float
The maximum of the estimates of the absolute error in the various
integration results.
out_dict : dict, optional
A dict containing additional information on the integration.
See Also
--------
quad : 1-D numerical integration
dblquad, tplquad : double and triple integrals
fixed_quad : fixed-order Gaussian quadrature
quadrature : adaptive Gaussian quadrature
Notes
-----
For valid results, the integral must converge; behavior for divergent
integrals is not guaranteed.
**Details of QUADPACK level routines**
`nquad` calls routines from the FORTRAN library QUADPACK. This section
provides details on the conditions for each routine to be called and a
short description of each routine. The routine called depends on
`weight`, `points` and the integration limits `a` and `b`.
================ ============== ========== =====================
QUADPACK routine `weight` `points` infinite bounds
================ ============== ========== =====================
qagse None No No
qagie None No Yes
qagpe None Yes No
qawoe 'sin', 'cos' No No
qawfe 'sin', 'cos' No either `a` or `b`
qawse 'alg*' No No
qawce 'cauchy' No No
================ ============== ========== =====================
The following provides a short desciption from [1]_ for each
routine.
qagse
is an integrator based on globally adaptive interval
subdivision in connection with extrapolation, which will
eliminate the effects of integrand singularities of
several types.
qagie
handles integration over infinite intervals. The infinite range is
mapped onto a finite interval and subsequently the same strategy as
in ``QAGS`` is applied.
qagpe
serves the same purposes as QAGS, but also allows the
user to provide explicit information about the location
and type of trouble-spots i.e. the abscissae of internal
singularities, discontinuities and other difficulties of
the integrand function.
qawoe
is an integrator for the evaluation of
:math:`\int^b_a \cos(\omega x)f(x)dx` or
:math:`\int^b_a \sin(\omega x)f(x)dx`
over a finite interval [a,b], where :math:`\omega` and :math:`f`
are specified by the user. The rule evaluation component is based
on the modified Clenshaw-Curtis technique
An adaptive subdivision scheme is used in connection
with an extrapolation procedure, which is a modification
of that in ``QAGS`` and allows the algorithm to deal with
singularities in :math:`f(x)`.
qawfe
calculates the Fourier transform
:math:`\int^\infty_a \cos(\omega x)f(x)dx` or
:math:`\int^\infty_a \sin(\omega x)f(x)dx`
for user-provided :math:`\omega` and :math:`f`. The procedure of
``QAWO`` is applied on successive finite intervals, and convergence
acceleration by means of the :math:`\varepsilon`-algorithm is applied
to the series of integral approximations.
qawse
approximate :math:`\int^b_a w(x)f(x)dx`, with :math:`a < b` where
:math:`w(x) = (x-a)^{\alpha}(b-x)^{\beta}v(x)` with
:math:`\alpha,\beta > -1`, where :math:`v(x)` may be one of the
following functions: :math:`1`, :math:`\log(x-a)`, :math:`\log(b-x)`,
:math:`\log(x-a)\log(b-x)`.
The user specifies :math:`\alpha`, :math:`\beta` and the type of the
function :math:`v`. A globally adaptive subdivision strategy is
applied, with modified Clenshaw-Curtis integration on those
subintervals which contain `a` or `b`.
qawce
compute :math:`\int^b_a f(x) / (x-c)dx` where the integral must be
interpreted as a Cauchy principal value integral, for user specified
:math:`c` and :math:`f`. The strategy is globally adaptive. Modified
Clenshaw-Curtis integration is used on those intervals containing the
point :math:`x = c`.
References
----------
.. [1] Piessens, Robert; de Doncker-Kapenga, Elise;
Überhuber, Christoph W.; Kahaner, David (1983).
QUADPACK: A subroutine package for automatic integration.
Springer-Verlag.
ISBN 978-3-540-12553-2.
Examples
--------
Calculate
.. math::
\int^{1}_{-0.15} \int^{0.8}_{0.13} \int^{1}_{-1} \int^{1}_{0}
f(x_0, x_1, x_2, x_3) \,dx_0 \,dx_1 \,dx_2 \,dx_3 ,
where
.. math::
f(x_0, x_1, x_2, x_3) = \begin{cases}
x_0^2+x_1 x_2-x_3^3+ \sin{x_0}+1 & (x_0-0.2 x_3-0.5-0.25 x_1 > 0) \\
x_0^2+x_1 x_2-x_3^3+ \sin{x_0}+0 & (x_0-0.2 x_3-0.5-0.25 x_1 \leq 0)
\end{cases} .
>>> import numpy as np
>>> from scipy import integrate
>>> func = lambda x0,x1,x2,x3 : x0**2 + x1*x2 - x3**3 + np.sin(x0) + (
... 1 if (x0-.2*x3-.5-.25*x1>0) else 0)
>>> def opts0(*args, **kwargs):
... return {'points':[0.2*args[2] + 0.5 + 0.25*args[0]]}
>>> integrate.nquad(func, [[0,1], [-1,1], [.13,.8], [-.15,1]],
... opts=[opts0,{},{},{}], full_output=True)
(1.5267454070738633, 2.9437360001402324e-14, {'neval': 388962})
Calculate
.. math::
\int^{t_0+t_1+1}_{t_0+t_1-1}
\int^{x_2+t_0^2 t_1^3+1}_{x_2+t_0^2 t_1^3-1}
\int^{t_0 x_1+t_1 x_2+1}_{t_0 x_1+t_1 x_2-1}
f(x_0,x_1, x_2,t_0,t_1)
\,dx_0 \,dx_1 \,dx_2,
where
.. math::
f(x_0, x_1, x_2, t_0, t_1) = \begin{cases}
x_0 x_2^2 + \sin{x_1}+2 & (x_0+t_1 x_1-t_0 > 0) \\
x_0 x_2^2 +\sin{x_1}+1 & (x_0+t_1 x_1-t_0 \leq 0)
\end{cases}
and :math:`(t_0, t_1) = (0, 1)` .
>>> def func2(x0, x1, x2, t0, t1):
... return x0*x2**2 + np.sin(x1) + 1 + (1 if x0+t1*x1-t0>0 else 0)
>>> def lim0(x1, x2, t0, t1):
... return [t0*x1 + t1*x2 - 1, t0*x1 + t1*x2 + 1]
>>> def lim1(x2, t0, t1):
... return [x2 + t0**2*t1**3 - 1, x2 + t0**2*t1**3 + 1]
>>> def lim2(t0, t1):
... return [t0 + t1 - 1, t0 + t1 + 1]
>>> def opts0(x1, x2, t0, t1):
... return {'points' : [t0 - t1*x1]}
>>> def opts1(x2, t0, t1):
... return {}
>>> def opts2(t0, t1):
... return {}
>>> integrate.nquad(func2, [lim0, lim1, lim2], args=(0,1),
... opts=[opts0, opts1, opts2])
(36.099919226771625, 1.8546948553373528e-07)
"""
depth = len(ranges)
ranges = [rng if callable(rng) else _RangeFunc(rng) for rng in ranges]
if args is None:
args = ()
if opts is None:
opts = [dict([])] * depth
if isinstance(opts, dict):
opts = [_OptFunc(opts)] * depth
else:
opts = [opt if callable(opt) else _OptFunc(opt) for opt in opts]
return _NQuad(func, ranges, opts, full_output).integrate(*args)
class _RangeFunc:
def __init__(self, range_):
self.range_ = range_
def __call__(self, *args):
"""Return stored value.
*args needed because range_ can be float or func, and is called with
variable number of parameters.
"""
return self.range_
class _OptFunc:
def __init__(self, opt):
self.opt = opt
def __call__(self, *args):
"""Return stored dict."""
return self.opt
class _NQuad:
def __init__(self, func, ranges, opts, full_output):
self.abserr = 0
self.func = func
self.ranges = ranges
self.opts = opts
self.maxdepth = len(ranges)
self.full_output = full_output
if self.full_output:
self.out_dict = {'neval': 0}
def integrate(self, *args, **kwargs):
depth = kwargs.pop('depth', 0)
if kwargs:
raise ValueError('unexpected kwargs')
# Get the integration range and options for this depth.
ind = -(depth + 1)
fn_range = self.ranges[ind]
low, high = fn_range(*args)
fn_opt = self.opts[ind]
opt = dict(fn_opt(*args))
if 'points' in opt:
opt['points'] = [x for x in opt['points'] if low <= x <= high]
if depth + 1 == self.maxdepth:
f = self.func
else:
f = partial(self.integrate, depth=depth+1)
quad_r = quad(f, low, high, args=args, full_output=self.full_output,
**opt)
value = quad_r[0]
abserr = quad_r[1]
if self.full_output:
infodict = quad_r[2]
# The 'neval' parameter in full_output returns the total
# number of times the integrand function was evaluated.
# Therefore, only the innermost integration loop counts.
if depth + 1 == self.maxdepth:
self.out_dict['neval'] += infodict['neval']
self.abserr = max(self.abserr, abserr)
if depth > 0:
return value
else:
# Final result of N-D integration with error
if self.full_output:
return value, self.abserr, self.out_dict
else:
return value, self.abserr
| 52,822
| 41.190895
| 468
|
py
|
scipy
|
scipy-main/scipy/integrate/_odepack_py.py
|
# Author: Travis Oliphant
__all__ = ['odeint']
import numpy as np
from . import _odepack
from copy import copy
import warnings
class ODEintWarning(Warning):
pass
_msgs = {2: "Integration successful.",
1: "Nothing was done; the integration time was 0.",
-1: "Excess work done on this call (perhaps wrong Dfun type).",
-2: "Excess accuracy requested (tolerances too small).",
-3: "Illegal input detected (internal error).",
-4: "Repeated error test failures (internal error).",
-5: "Repeated convergence failures (perhaps bad Jacobian or tolerances).",
-6: "Error weight became zero during problem.",
-7: "Internal workspace insufficient to finish (internal error).",
-8: "Run terminated (internal error)."
}
def odeint(func, y0, t, args=(), Dfun=None, col_deriv=0, full_output=0,
ml=None, mu=None, rtol=None, atol=None, tcrit=None, h0=0.0,
hmax=0.0, hmin=0.0, ixpr=0, mxstep=0, mxhnil=0, mxordn=12,
mxords=5, printmessg=0, tfirst=False):
"""
Integrate a system of ordinary differential equations.
.. note:: For new code, use `scipy.integrate.solve_ivp` to solve a
differential equation.
Solve a system of ordinary differential equations using lsoda from the
FORTRAN library odepack.
Solves the initial value problem for stiff or non-stiff systems
of first order ode-s::
dy/dt = func(y, t, ...) [or func(t, y, ...)]
where y can be a vector.
.. note:: By default, the required order of the first two arguments of
`func` are in the opposite order of the arguments in the system
definition function used by the `scipy.integrate.ode` class and
the function `scipy.integrate.solve_ivp`. To use a function with
the signature ``func(t, y, ...)``, the argument `tfirst` must be
set to ``True``.
Parameters
----------
func : callable(y, t, ...) or callable(t, y, ...)
Computes the derivative of y at t.
If the signature is ``callable(t, y, ...)``, then the argument
`tfirst` must be set ``True``.
y0 : array
Initial condition on y (can be a vector).
t : array
A sequence of time points for which to solve for y. The initial
value point should be the first element of this sequence.
This sequence must be monotonically increasing or monotonically
decreasing; repeated values are allowed.
args : tuple, optional
Extra arguments to pass to function.
Dfun : callable(y, t, ...) or callable(t, y, ...)
Gradient (Jacobian) of `func`.
If the signature is ``callable(t, y, ...)``, then the argument
`tfirst` must be set ``True``.
col_deriv : bool, optional
True if `Dfun` defines derivatives down columns (faster),
otherwise `Dfun` should define derivatives across rows.
full_output : bool, optional
True if to return a dictionary of optional outputs as the second output
printmessg : bool, optional
Whether to print the convergence message
tfirst : bool, optional
If True, the first two arguments of `func` (and `Dfun`, if given)
must ``t, y`` instead of the default ``y, t``.
.. versionadded:: 1.1.0
Returns
-------
y : array, shape (len(t), len(y0))
Array containing the value of y for each desired time in t,
with the initial value `y0` in the first row.
infodict : dict, only returned if full_output == True
Dictionary containing additional output information
======= ============================================================
key meaning
======= ============================================================
'hu' vector of step sizes successfully used for each time step
'tcur' vector with the value of t reached for each time step
(will always be at least as large as the input times)
'tolsf' vector of tolerance scale factors, greater than 1.0,
computed when a request for too much accuracy was detected
'tsw' value of t at the time of the last method switch
(given for each time step)
'nst' cumulative number of time steps
'nfe' cumulative number of function evaluations for each time step
'nje' cumulative number of jacobian evaluations for each time step
'nqu' a vector of method orders for each successful step
'imxer' index of the component of largest magnitude in the
weighted local error vector (e / ewt) on an error return, -1
otherwise
'lenrw' the length of the double work array required
'leniw' the length of integer work array required
'mused' a vector of method indicators for each successful time step:
1: adams (nonstiff), 2: bdf (stiff)
======= ============================================================
Other Parameters
----------------
ml, mu : int, optional
If either of these are not None or non-negative, then the
Jacobian is assumed to be banded. These give the number of
lower and upper non-zero diagonals in this banded matrix.
For the banded case, `Dfun` should return a matrix whose
rows contain the non-zero bands (starting with the lowest diagonal).
Thus, the return matrix `jac` from `Dfun` should have shape
``(ml + mu + 1, len(y0))`` when ``ml >=0`` or ``mu >=0``.
The data in `jac` must be stored such that ``jac[i - j + mu, j]``
holds the derivative of the `i`th equation with respect to the `j`th
state variable. If `col_deriv` is True, the transpose of this
`jac` must be returned.
rtol, atol : float, optional
The input parameters `rtol` and `atol` determine the error
control performed by the solver. The solver will control the
vector, e, of estimated local errors in y, according to an
inequality of the form ``max-norm of (e / ewt) <= 1``,
where ewt is a vector of positive error weights computed as
``ewt = rtol * abs(y) + atol``.
rtol and atol can be either vectors the same length as y or scalars.
Defaults to 1.49012e-8.
tcrit : ndarray, optional
Vector of critical points (e.g., singularities) where integration
care should be taken.
h0 : float, (0: solver-determined), optional
The step size to be attempted on the first step.
hmax : float, (0: solver-determined), optional
The maximum absolute step size allowed.
hmin : float, (0: solver-determined), optional
The minimum absolute step size allowed.
ixpr : bool, optional
Whether to generate extra printing at method switches.
mxstep : int, (0: solver-determined), optional
Maximum number of (internally defined) steps allowed for each
integration point in t.
mxhnil : int, (0: solver-determined), optional
Maximum number of messages printed.
mxordn : int, (0: solver-determined), optional
Maximum order to be allowed for the non-stiff (Adams) method.
mxords : int, (0: solver-determined), optional
Maximum order to be allowed for the stiff (BDF) method.
See Also
--------
solve_ivp : solve an initial value problem for a system of ODEs
ode : a more object-oriented integrator based on VODE
quad : for finding the area under a curve
Examples
--------
The second order differential equation for the angle `theta` of a
pendulum acted on by gravity with friction can be written::
theta''(t) + b*theta'(t) + c*sin(theta(t)) = 0
where `b` and `c` are positive constants, and a prime (') denotes a
derivative. To solve this equation with `odeint`, we must first convert
it to a system of first order equations. By defining the angular
velocity ``omega(t) = theta'(t)``, we obtain the system::
theta'(t) = omega(t)
omega'(t) = -b*omega(t) - c*sin(theta(t))
Let `y` be the vector [`theta`, `omega`]. We implement this system
in Python as:
>>> import numpy as np
>>> def pend(y, t, b, c):
... theta, omega = y
... dydt = [omega, -b*omega - c*np.sin(theta)]
... return dydt
...
We assume the constants are `b` = 0.25 and `c` = 5.0:
>>> b = 0.25
>>> c = 5.0
For initial conditions, we assume the pendulum is nearly vertical
with `theta(0)` = `pi` - 0.1, and is initially at rest, so
`omega(0)` = 0. Then the vector of initial conditions is
>>> y0 = [np.pi - 0.1, 0.0]
We will generate a solution at 101 evenly spaced samples in the interval
0 <= `t` <= 10. So our array of times is:
>>> t = np.linspace(0, 10, 101)
Call `odeint` to generate the solution. To pass the parameters
`b` and `c` to `pend`, we give them to `odeint` using the `args`
argument.
>>> from scipy.integrate import odeint
>>> sol = odeint(pend, y0, t, args=(b, c))
The solution is an array with shape (101, 2). The first column
is `theta(t)`, and the second is `omega(t)`. The following code
plots both components.
>>> import matplotlib.pyplot as plt
>>> plt.plot(t, sol[:, 0], 'b', label='theta(t)')
>>> plt.plot(t, sol[:, 1], 'g', label='omega(t)')
>>> plt.legend(loc='best')
>>> plt.xlabel('t')
>>> plt.grid()
>>> plt.show()
"""
if ml is None:
ml = -1 # changed to zero inside function call
if mu is None:
mu = -1 # changed to zero inside function call
dt = np.diff(t)
if not ((dt >= 0).all() or (dt <= 0).all()):
raise ValueError("The values in t must be monotonically increasing "
"or monotonically decreasing; repeated values are "
"allowed.")
t = copy(t)
y0 = copy(y0)
output = _odepack.odeint(func, y0, t, args, Dfun, col_deriv, ml, mu,
full_output, rtol, atol, tcrit, h0, hmax, hmin,
ixpr, mxstep, mxhnil, mxordn, mxords,
int(bool(tfirst)))
if output[-1] < 0:
warning_msg = _msgs[output[-1]] + " Run with full_output = 1 to get quantitative information."
warnings.warn(warning_msg, ODEintWarning)
elif printmessg:
warning_msg = _msgs[output[-1]]
warnings.warn(warning_msg, ODEintWarning)
if full_output:
output[1]['message'] = _msgs[output[-1]]
output = output[:-1]
if len(output) == 1:
return output[0]
else:
return output
| 10,769
| 40.264368
| 102
|
py
|
scipy
|
scipy-main/scipy/integrate/_quad_vec.py
|
import sys
import copy
import heapq
import collections
import functools
import numpy as np
from scipy._lib._util import MapWrapper, _FunctionWrapper
class LRUDict(collections.OrderedDict):
def __init__(self, max_size):
self.__max_size = max_size
def __setitem__(self, key, value):
existing_key = (key in self)
super().__setitem__(key, value)
if existing_key:
self.move_to_end(key)
elif len(self) > self.__max_size:
self.popitem(last=False)
def update(self, other):
# Not needed below
raise NotImplementedError()
class SemiInfiniteFunc:
"""
Argument transform from (start, +-oo) to (0, 1)
"""
def __init__(self, func, start, infty):
self._func = func
self._start = start
self._sgn = -1 if infty < 0 else 1
# Overflow threshold for the 1/t**2 factor
self._tmin = sys.float_info.min**0.5
def get_t(self, x):
z = self._sgn * (x - self._start) + 1
if z == 0:
# Can happen only if point not in range
return np.inf
return 1 / z
def __call__(self, t):
if t < self._tmin:
return 0.0
else:
x = self._start + self._sgn * (1 - t) / t
f = self._func(x)
return self._sgn * (f / t) / t
class DoubleInfiniteFunc:
"""
Argument transform from (-oo, oo) to (-1, 1)
"""
def __init__(self, func):
self._func = func
# Overflow threshold for the 1/t**2 factor
self._tmin = sys.float_info.min**0.5
def get_t(self, x):
s = -1 if x < 0 else 1
return s / (abs(x) + 1)
def __call__(self, t):
if abs(t) < self._tmin:
return 0.0
else:
x = (1 - abs(t)) / t
f = self._func(x)
return (f / t) / t
def _max_norm(x):
return np.amax(abs(x))
def _get_sizeof(obj):
try:
return sys.getsizeof(obj)
except TypeError:
# occurs on pypy
if hasattr(obj, '__sizeof__'):
return int(obj.__sizeof__())
return 64
class _Bunch:
def __init__(self, **kwargs):
self.__keys = kwargs.keys()
self.__dict__.update(**kwargs)
def __repr__(self):
return "_Bunch({})".format(", ".join(f"{k}={repr(self.__dict__[k])}"
for k in self.__keys))
def quad_vec(f, a, b, epsabs=1e-200, epsrel=1e-8, norm='2', cache_size=100e6, limit=10000,
workers=1, points=None, quadrature=None, full_output=False,
*, args=()):
r"""Adaptive integration of a vector-valued function.
Parameters
----------
f : callable
Vector-valued function f(x) to integrate.
a : float
Initial point.
b : float
Final point.
epsabs : float, optional
Absolute tolerance.
epsrel : float, optional
Relative tolerance.
norm : {'max', '2'}, optional
Vector norm to use for error estimation.
cache_size : int, optional
Number of bytes to use for memoization.
limit : float or int, optional
An upper bound on the number of subintervals used in the adaptive
algorithm.
workers : int or map-like callable, optional
If `workers` is an integer, part of the computation is done in
parallel subdivided to this many tasks (using
:class:`python:multiprocessing.pool.Pool`).
Supply `-1` to use all cores available to the Process.
Alternatively, supply a map-like callable, such as
:meth:`python:multiprocessing.pool.Pool.map` for evaluating the
population in parallel.
This evaluation is carried out as ``workers(func, iterable)``.
points : list, optional
List of additional breakpoints.
quadrature : {'gk21', 'gk15', 'trapezoid'}, optional
Quadrature rule to use on subintervals.
Options: 'gk21' (Gauss-Kronrod 21-point rule),
'gk15' (Gauss-Kronrod 15-point rule),
'trapezoid' (composite trapezoid rule).
Default: 'gk21' for finite intervals and 'gk15' for (semi-)infinite
full_output : bool, optional
Return an additional ``info`` dictionary.
args : tuple, optional
Extra arguments to pass to function, if any.
.. versionadded:: 1.8.0
Returns
-------
res : {float, array-like}
Estimate for the result
err : float
Error estimate for the result in the given norm
info : dict
Returned only when ``full_output=True``.
Info dictionary. Is an object with the attributes:
success : bool
Whether integration reached target precision.
status : int
Indicator for convergence, success (0),
failure (1), and failure due to rounding error (2).
neval : int
Number of function evaluations.
intervals : ndarray, shape (num_intervals, 2)
Start and end points of subdivision intervals.
integrals : ndarray, shape (num_intervals, ...)
Integral for each interval.
Note that at most ``cache_size`` values are recorded,
and the array may contains *nan* for missing items.
errors : ndarray, shape (num_intervals,)
Estimated integration error for each interval.
Notes
-----
The algorithm mainly follows the implementation of QUADPACK's
DQAG* algorithms, implementing global error control and adaptive
subdivision.
The algorithm here has some differences to the QUADPACK approach:
Instead of subdividing one interval at a time, the algorithm
subdivides N intervals with largest errors at once. This enables
(partial) parallelization of the integration.
The logic of subdividing "next largest" intervals first is then
not implemented, and we rely on the above extension to avoid
concentrating on "small" intervals only.
The Wynn epsilon table extrapolation is not used (QUADPACK uses it
for infinite intervals). This is because the algorithm here is
supposed to work on vector-valued functions, in an user-specified
norm, and the extension of the epsilon algorithm to this case does
not appear to be widely agreed. For max-norm, using elementwise
Wynn epsilon could be possible, but we do not do this here with
the hope that the epsilon extrapolation is mainly useful in
special cases.
References
----------
[1] R. Piessens, E. de Doncker, QUADPACK (1983).
Examples
--------
We can compute integrations of a vector-valued function:
>>> from scipy.integrate import quad_vec
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> alpha = np.linspace(0.0, 2.0, num=30)
>>> f = lambda x: x**alpha
>>> x0, x1 = 0, 2
>>> y, err = quad_vec(f, x0, x1)
>>> plt.plot(alpha, y)
>>> plt.xlabel(r"$\alpha$")
>>> plt.ylabel(r"$\int_{0}^{2} x^\alpha dx$")
>>> plt.show()
"""
a = float(a)
b = float(b)
if args:
if not isinstance(args, tuple):
args = (args,)
# create a wrapped function to allow the use of map and Pool.map
f = _FunctionWrapper(f, args)
# Use simple transformations to deal with integrals over infinite
# intervals.
kwargs = dict(epsabs=epsabs,
epsrel=epsrel,
norm=norm,
cache_size=cache_size,
limit=limit,
workers=workers,
points=points,
quadrature='gk15' if quadrature is None else quadrature,
full_output=full_output)
if np.isfinite(a) and np.isinf(b):
f2 = SemiInfiniteFunc(f, start=a, infty=b)
if points is not None:
kwargs['points'] = tuple(f2.get_t(xp) for xp in points)
return quad_vec(f2, 0, 1, **kwargs)
elif np.isfinite(b) and np.isinf(a):
f2 = SemiInfiniteFunc(f, start=b, infty=a)
if points is not None:
kwargs['points'] = tuple(f2.get_t(xp) for xp in points)
res = quad_vec(f2, 0, 1, **kwargs)
return (-res[0],) + res[1:]
elif np.isinf(a) and np.isinf(b):
sgn = -1 if b < a else 1
# NB. explicitly split integral at t=0, which separates
# the positive and negative sides
f2 = DoubleInfiniteFunc(f)
if points is not None:
kwargs['points'] = (0,) + tuple(f2.get_t(xp) for xp in points)
else:
kwargs['points'] = (0,)
if a != b:
res = quad_vec(f2, -1, 1, **kwargs)
else:
res = quad_vec(f2, 1, 1, **kwargs)
return (res[0]*sgn,) + res[1:]
elif not (np.isfinite(a) and np.isfinite(b)):
raise ValueError(f"invalid integration bounds a={a}, b={b}")
norm_funcs = {
None: _max_norm,
'max': _max_norm,
'2': np.linalg.norm
}
if callable(norm):
norm_func = norm
else:
norm_func = norm_funcs[norm]
parallel_count = 128
min_intervals = 2
try:
_quadrature = {None: _quadrature_gk21,
'gk21': _quadrature_gk21,
'gk15': _quadrature_gk15,
'trapz': _quadrature_trapezoid, # alias for backcompat
'trapezoid': _quadrature_trapezoid}[quadrature]
except KeyError as e:
raise ValueError(f"unknown quadrature {quadrature!r}") from e
# Initial interval set
if points is None:
initial_intervals = [(a, b)]
else:
prev = a
initial_intervals = []
for p in sorted(points):
p = float(p)
if not (a < p < b) or p == prev:
continue
initial_intervals.append((prev, p))
prev = p
initial_intervals.append((prev, b))
global_integral = None
global_error = None
rounding_error = None
interval_cache = None
intervals = []
neval = 0
for x1, x2 in initial_intervals:
ig, err, rnd = _quadrature(x1, x2, f, norm_func)
neval += _quadrature.num_eval
if global_integral is None:
if isinstance(ig, (float, complex)):
# Specialize for scalars
if norm_func in (_max_norm, np.linalg.norm):
norm_func = abs
global_integral = ig
global_error = float(err)
rounding_error = float(rnd)
cache_count = cache_size // _get_sizeof(ig)
interval_cache = LRUDict(cache_count)
else:
global_integral += ig
global_error += err
rounding_error += rnd
interval_cache[(x1, x2)] = copy.copy(ig)
intervals.append((-err, x1, x2))
heapq.heapify(intervals)
CONVERGED = 0
NOT_CONVERGED = 1
ROUNDING_ERROR = 2
NOT_A_NUMBER = 3
status_msg = {
CONVERGED: "Target precision reached.",
NOT_CONVERGED: "Target precision not reached.",
ROUNDING_ERROR: "Target precision could not be reached due to rounding error.",
NOT_A_NUMBER: "Non-finite values encountered."
}
# Process intervals
with MapWrapper(workers) as mapwrapper:
ier = NOT_CONVERGED
while intervals and len(intervals) < limit:
# Select intervals with largest errors for subdivision
tol = max(epsabs, epsrel*norm_func(global_integral))
to_process = []
err_sum = 0
for j in range(parallel_count):
if not intervals:
break
if j > 0 and err_sum > global_error - tol/8:
# avoid unnecessary parallel splitting
break
interval = heapq.heappop(intervals)
neg_old_err, a, b = interval
old_int = interval_cache.pop((a, b), None)
to_process.append(((-neg_old_err, a, b, old_int), f, norm_func, _quadrature))
err_sum += -neg_old_err
# Subdivide intervals
for dint, derr, dround_err, subint, dneval in mapwrapper(_subdivide_interval, to_process):
neval += dneval
global_integral += dint
global_error += derr
rounding_error += dround_err
for x in subint:
x1, x2, ig, err = x
interval_cache[(x1, x2)] = ig
heapq.heappush(intervals, (-err, x1, x2))
# Termination check
if len(intervals) >= min_intervals:
tol = max(epsabs, epsrel*norm_func(global_integral))
if global_error < tol/8:
ier = CONVERGED
break
if global_error < rounding_error:
ier = ROUNDING_ERROR
break
if not (np.isfinite(global_error) and np.isfinite(rounding_error)):
ier = NOT_A_NUMBER
break
res = global_integral
err = global_error + rounding_error
if full_output:
res_arr = np.asarray(res)
dummy = np.full(res_arr.shape, np.nan, dtype=res_arr.dtype)
integrals = np.array([interval_cache.get((z[1], z[2]), dummy)
for z in intervals], dtype=res_arr.dtype)
errors = np.array([-z[0] for z in intervals])
intervals = np.array([[z[1], z[2]] for z in intervals])
info = _Bunch(neval=neval,
success=(ier == CONVERGED),
status=ier,
message=status_msg[ier],
intervals=intervals,
integrals=integrals,
errors=errors)
return (res, err, info)
else:
return (res, err)
def _subdivide_interval(args):
interval, f, norm_func, _quadrature = args
old_err, a, b, old_int = interval
c = 0.5 * (a + b)
# Left-hand side
if getattr(_quadrature, 'cache_size', 0) > 0:
f = functools.lru_cache(_quadrature.cache_size)(f)
s1, err1, round1 = _quadrature(a, c, f, norm_func)
dneval = _quadrature.num_eval
s2, err2, round2 = _quadrature(c, b, f, norm_func)
dneval += _quadrature.num_eval
if old_int is None:
old_int, _, _ = _quadrature(a, b, f, norm_func)
dneval += _quadrature.num_eval
if getattr(_quadrature, 'cache_size', 0) > 0:
dneval = f.cache_info().misses
dint = s1 + s2 - old_int
derr = err1 + err2 - old_err
dround_err = round1 + round2
subintervals = ((a, c, s1, err1), (c, b, s2, err2))
return dint, derr, dround_err, subintervals, dneval
def _quadrature_trapezoid(x1, x2, f, norm_func):
"""
Composite trapezoid quadrature
"""
x3 = 0.5*(x1 + x2)
f1 = f(x1)
f2 = f(x2)
f3 = f(x3)
s2 = 0.25 * (x2 - x1) * (f1 + 2*f3 + f2)
round_err = 0.25 * abs(x2 - x1) * (float(norm_func(f1))
+ 2*float(norm_func(f3))
+ float(norm_func(f2))) * 2e-16
s1 = 0.5 * (x2 - x1) * (f1 + f2)
err = 1/3 * float(norm_func(s1 - s2))
return s2, err, round_err
_quadrature_trapezoid.cache_size = 3 * 3
_quadrature_trapezoid.num_eval = 3
def _quadrature_gk(a, b, f, norm_func, x, w, v):
"""
Generic Gauss-Kronrod quadrature
"""
fv = [0.0]*len(x)
c = 0.5 * (a + b)
h = 0.5 * (b - a)
# Gauss-Kronrod
s_k = 0.0
s_k_abs = 0.0
for i in range(len(x)):
ff = f(c + h*x[i])
fv[i] = ff
vv = v[i]
# \int f(x)
s_k += vv * ff
# \int |f(x)|
s_k_abs += vv * abs(ff)
# Gauss
s_g = 0.0
for i in range(len(w)):
s_g += w[i] * fv[2*i + 1]
# Quadrature of abs-deviation from average
s_k_dabs = 0.0
y0 = s_k / 2.0
for i in range(len(x)):
# \int |f(x) - y0|
s_k_dabs += v[i] * abs(fv[i] - y0)
# Use similar error estimation as quadpack
err = float(norm_func((s_k - s_g) * h))
dabs = float(norm_func(s_k_dabs * h))
if dabs != 0 and err != 0:
err = dabs * min(1.0, (200 * err / dabs)**1.5)
eps = sys.float_info.epsilon
round_err = float(norm_func(50 * eps * h * s_k_abs))
if round_err > sys.float_info.min:
err = max(err, round_err)
return h * s_k, err, round_err
def _quadrature_gk21(a, b, f, norm_func):
"""
Gauss-Kronrod 21 quadrature with error estimate
"""
# Gauss-Kronrod points
x = (0.995657163025808080735527280689003,
0.973906528517171720077964012084452,
0.930157491355708226001207180059508,
0.865063366688984510732096688423493,
0.780817726586416897063717578345042,
0.679409568299024406234327365114874,
0.562757134668604683339000099272694,
0.433395394129247190799265943165784,
0.294392862701460198131126603103866,
0.148874338981631210884826001129720,
0,
-0.148874338981631210884826001129720,
-0.294392862701460198131126603103866,
-0.433395394129247190799265943165784,
-0.562757134668604683339000099272694,
-0.679409568299024406234327365114874,
-0.780817726586416897063717578345042,
-0.865063366688984510732096688423493,
-0.930157491355708226001207180059508,
-0.973906528517171720077964012084452,
-0.995657163025808080735527280689003)
# 10-point weights
w = (0.066671344308688137593568809893332,
0.149451349150580593145776339657697,
0.219086362515982043995534934228163,
0.269266719309996355091226921569469,
0.295524224714752870173892994651338,
0.295524224714752870173892994651338,
0.269266719309996355091226921569469,
0.219086362515982043995534934228163,
0.149451349150580593145776339657697,
0.066671344308688137593568809893332)
# 21-point weights
v = (0.011694638867371874278064396062192,
0.032558162307964727478818972459390,
0.054755896574351996031381300244580,
0.075039674810919952767043140916190,
0.093125454583697605535065465083366,
0.109387158802297641899210590325805,
0.123491976262065851077958109831074,
0.134709217311473325928054001771707,
0.142775938577060080797094273138717,
0.147739104901338491374841515972068,
0.149445554002916905664936468389821,
0.147739104901338491374841515972068,
0.142775938577060080797094273138717,
0.134709217311473325928054001771707,
0.123491976262065851077958109831074,
0.109387158802297641899210590325805,
0.093125454583697605535065465083366,
0.075039674810919952767043140916190,
0.054755896574351996031381300244580,
0.032558162307964727478818972459390,
0.011694638867371874278064396062192)
return _quadrature_gk(a, b, f, norm_func, x, w, v)
_quadrature_gk21.num_eval = 21
def _quadrature_gk15(a, b, f, norm_func):
"""
Gauss-Kronrod 15 quadrature with error estimate
"""
# Gauss-Kronrod points
x = (0.991455371120812639206854697526329,
0.949107912342758524526189684047851,
0.864864423359769072789712788640926,
0.741531185599394439863864773280788,
0.586087235467691130294144838258730,
0.405845151377397166906606412076961,
0.207784955007898467600689403773245,
0.000000000000000000000000000000000,
-0.207784955007898467600689403773245,
-0.405845151377397166906606412076961,
-0.586087235467691130294144838258730,
-0.741531185599394439863864773280788,
-0.864864423359769072789712788640926,
-0.949107912342758524526189684047851,
-0.991455371120812639206854697526329)
# 7-point weights
w = (0.129484966168869693270611432679082,
0.279705391489276667901467771423780,
0.381830050505118944950369775488975,
0.417959183673469387755102040816327,
0.381830050505118944950369775488975,
0.279705391489276667901467771423780,
0.129484966168869693270611432679082)
# 15-point weights
v = (0.022935322010529224963732008058970,
0.063092092629978553290700663189204,
0.104790010322250183839876322541518,
0.140653259715525918745189590510238,
0.169004726639267902826583426598550,
0.190350578064785409913256402421014,
0.204432940075298892414161999234649,
0.209482141084727828012999174891714,
0.204432940075298892414161999234649,
0.190350578064785409913256402421014,
0.169004726639267902826583426598550,
0.140653259715525918745189590510238,
0.104790010322250183839876322541518,
0.063092092629978553290700663189204,
0.022935322010529224963732008058970)
return _quadrature_gk(a, b, f, norm_func, x, w, v)
_quadrature_gk15.num_eval = 15
| 21,166
| 31.365443
| 102
|
py
|
scipy
|
scipy-main/scipy/integrate/_ode.py
|
# Authors: Pearu Peterson, Pauli Virtanen, John Travers
"""
First-order ODE integrators.
User-friendly interface to various numerical integrators for solving a
system of first order ODEs with prescribed initial conditions::
d y(t)[i]
--------- = f(t,y(t))[i],
d t
y(t=0)[i] = y0[i],
where::
i = 0, ..., len(y0) - 1
class ode
---------
A generic interface class to numeric integrators. It has the following
methods::
integrator = ode(f, jac=None)
integrator = integrator.set_integrator(name, **params)
integrator = integrator.set_initial_value(y0, t0=0.0)
integrator = integrator.set_f_params(*args)
integrator = integrator.set_jac_params(*args)
y1 = integrator.integrate(t1, step=False, relax=False)
flag = integrator.successful()
class complex_ode
-----------------
This class has the same generic interface as ode, except it can handle complex
f, y and Jacobians by transparently translating them into the equivalent
real-valued system. It supports the real-valued solvers (i.e., not zvode) and is
an alternative to ode with the zvode solver, sometimes performing better.
"""
# XXX: Integrators must have:
# ===========================
# cvode - C version of vode and vodpk with many improvements.
# Get it from http://www.netlib.org/ode/cvode.tar.gz.
# To wrap cvode to Python, one must write the extension module by
# hand. Its interface is too much 'advanced C' that using f2py
# would be too complicated (or impossible).
#
# How to define a new integrator:
# ===============================
#
# class myodeint(IntegratorBase):
#
# runner = <odeint function> or None
#
# def __init__(self,...): # required
# <initialize>
#
# def reset(self,n,has_jac): # optional
# # n - the size of the problem (number of equations)
# # has_jac - whether user has supplied its own routine for Jacobian
# <allocate memory,initialize further>
#
# def run(self,f,jac,y0,t0,t1,f_params,jac_params): # required
# # this method is called to integrate from t=t0 to t=t1
# # with initial condition y0. f and jac are user-supplied functions
# # that define the problem. f_params,jac_params are additional
# # arguments
# # to these functions.
# <calculate y1>
# if <calculation was unsuccessful>:
# self.success = 0
# return t1,y1
#
# # In addition, one can define step() and run_relax() methods (they
# # take the same arguments as run()) if the integrator can support
# # these features (see IntegratorBase doc strings).
#
# if myodeint.runner:
# IntegratorBase.integrator_classes.append(myodeint)
__all__ = ['ode', 'complex_ode']
import re
import warnings
from numpy import asarray, array, zeros, isscalar, real, imag, vstack
from . import _vode
from . import _dop
from . import _lsoda
_dop_int_dtype = _dop.types.intvar.dtype
_vode_int_dtype = _vode.types.intvar.dtype
_lsoda_int_dtype = _lsoda.types.intvar.dtype
# ------------------------------------------------------------------------------
# User interface
# ------------------------------------------------------------------------------
class ode:
"""
A generic interface class to numeric integrators.
Solve an equation system :math:`y'(t) = f(t,y)` with (optional) ``jac = df/dy``.
*Note*: The first two arguments of ``f(t, y, ...)`` are in the
opposite order of the arguments in the system definition function used
by `scipy.integrate.odeint`.
Parameters
----------
f : callable ``f(t, y, *f_args)``
Right-hand side of the differential equation. t is a scalar,
``y.shape == (n,)``.
``f_args`` is set by calling ``set_f_params(*args)``.
`f` should return a scalar, array or list (not a tuple).
jac : callable ``jac(t, y, *jac_args)``, optional
Jacobian of the right-hand side, ``jac[i,j] = d f[i] / d y[j]``.
``jac_args`` is set by calling ``set_jac_params(*args)``.
Attributes
----------
t : float
Current time.
y : ndarray
Current variable values.
See also
--------
odeint : an integrator with a simpler interface based on lsoda from ODEPACK
quad : for finding the area under a curve
Notes
-----
Available integrators are listed below. They can be selected using
the `set_integrator` method.
"vode"
Real-valued Variable-coefficient Ordinary Differential Equation
solver, with fixed-leading-coefficient implementation. It provides
implicit Adams method (for non-stiff problems) and a method based on
backward differentiation formulas (BDF) (for stiff problems).
Source: http://www.netlib.org/ode/vode.f
.. warning::
This integrator is not re-entrant. You cannot have two `ode`
instances using the "vode" integrator at the same time.
This integrator accepts the following parameters in `set_integrator`
method of the `ode` class:
- atol : float or sequence
absolute tolerance for solution
- rtol : float or sequence
relative tolerance for solution
- lband : None or int
- uband : None or int
Jacobian band width, jac[i,j] != 0 for i-lband <= j <= i+uband.
Setting these requires your jac routine to return the jacobian
in packed format, jac_packed[i-j+uband, j] = jac[i,j]. The
dimension of the matrix must be (lband+uband+1, len(y)).
- method: 'adams' or 'bdf'
Which solver to use, Adams (non-stiff) or BDF (stiff)
- with_jacobian : bool
This option is only considered when the user has not supplied a
Jacobian function and has not indicated (by setting either band)
that the Jacobian is banded. In this case, `with_jacobian` specifies
whether the iteration method of the ODE solver's correction step is
chord iteration with an internally generated full Jacobian or
functional iteration with no Jacobian.
- nsteps : int
Maximum number of (internally defined) steps allowed during one
call to the solver.
- first_step : float
- min_step : float
- max_step : float
Limits for the step sizes used by the integrator.
- order : int
Maximum order used by the integrator,
order <= 12 for Adams, <= 5 for BDF.
"zvode"
Complex-valued Variable-coefficient Ordinary Differential Equation
solver, with fixed-leading-coefficient implementation. It provides
implicit Adams method (for non-stiff problems) and a method based on
backward differentiation formulas (BDF) (for stiff problems).
Source: http://www.netlib.org/ode/zvode.f
.. warning::
This integrator is not re-entrant. You cannot have two `ode`
instances using the "zvode" integrator at the same time.
This integrator accepts the same parameters in `set_integrator`
as the "vode" solver.
.. note::
When using ZVODE for a stiff system, it should only be used for
the case in which the function f is analytic, that is, when each f(i)
is an analytic function of each y(j). Analyticity means that the
partial derivative df(i)/dy(j) is a unique complex number, and this
fact is critical in the way ZVODE solves the dense or banded linear
systems that arise in the stiff case. For a complex stiff ODE system
in which f is not analytic, ZVODE is likely to have convergence
failures, and for this problem one should instead use DVODE on the
equivalent real system (in the real and imaginary parts of y).
"lsoda"
Real-valued Variable-coefficient Ordinary Differential Equation
solver, with fixed-leading-coefficient implementation. It provides
automatic method switching between implicit Adams method (for non-stiff
problems) and a method based on backward differentiation formulas (BDF)
(for stiff problems).
Source: http://www.netlib.org/odepack
.. warning::
This integrator is not re-entrant. You cannot have two `ode`
instances using the "lsoda" integrator at the same time.
This integrator accepts the following parameters in `set_integrator`
method of the `ode` class:
- atol : float or sequence
absolute tolerance for solution
- rtol : float or sequence
relative tolerance for solution
- lband : None or int
- uband : None or int
Jacobian band width, jac[i,j] != 0 for i-lband <= j <= i+uband.
Setting these requires your jac routine to return the jacobian
in packed format, jac_packed[i-j+uband, j] = jac[i,j].
- with_jacobian : bool
*Not used.*
- nsteps : int
Maximum number of (internally defined) steps allowed during one
call to the solver.
- first_step : float
- min_step : float
- max_step : float
Limits for the step sizes used by the integrator.
- max_order_ns : int
Maximum order used in the nonstiff case (default 12).
- max_order_s : int
Maximum order used in the stiff case (default 5).
- max_hnil : int
Maximum number of messages reporting too small step size (t + h = t)
(default 0)
- ixpr : int
Whether to generate extra printing at method switches (default False).
"dopri5"
This is an explicit runge-kutta method of order (4)5 due to Dormand &
Prince (with stepsize control and dense output).
Authors:
E. Hairer and G. Wanner
Universite de Geneve, Dept. de Mathematiques
CH-1211 Geneve 24, Switzerland
e-mail: ernst.hairer@math.unige.ch, gerhard.wanner@math.unige.ch
This code is described in [HNW93]_.
This integrator accepts the following parameters in set_integrator()
method of the ode class:
- atol : float or sequence
absolute tolerance for solution
- rtol : float or sequence
relative tolerance for solution
- nsteps : int
Maximum number of (internally defined) steps allowed during one
call to the solver.
- first_step : float
- max_step : float
- safety : float
Safety factor on new step selection (default 0.9)
- ifactor : float
- dfactor : float
Maximum factor to increase/decrease step size by in one step
- beta : float
Beta parameter for stabilised step size control.
- verbosity : int
Switch for printing messages (< 0 for no messages).
"dop853"
This is an explicit runge-kutta method of order 8(5,3) due to Dormand
& Prince (with stepsize control and dense output).
Options and references the same as "dopri5".
Examples
--------
A problem to integrate and the corresponding jacobian:
>>> from scipy.integrate import ode
>>>
>>> y0, t0 = [1.0j, 2.0], 0
>>>
>>> def f(t, y, arg1):
... return [1j*arg1*y[0] + y[1], -arg1*y[1]**2]
>>> def jac(t, y, arg1):
... return [[1j*arg1, 1], [0, -arg1*2*y[1]]]
The integration:
>>> r = ode(f, jac).set_integrator('zvode', method='bdf')
>>> r.set_initial_value(y0, t0).set_f_params(2.0).set_jac_params(2.0)
>>> t1 = 10
>>> dt = 1
>>> while r.successful() and r.t < t1:
... print(r.t+dt, r.integrate(r.t+dt))
1 [-0.71038232+0.23749653j 0.40000271+0.j ]
2.0 [0.19098503-0.52359246j 0.22222356+0.j ]
3.0 [0.47153208+0.52701229j 0.15384681+0.j ]
4.0 [-0.61905937+0.30726255j 0.11764744+0.j ]
5.0 [0.02340997-0.61418799j 0.09523835+0.j ]
6.0 [0.58643071+0.339819j 0.08000018+0.j ]
7.0 [-0.52070105+0.44525141j 0.06896565+0.j ]
8.0 [-0.15986733-0.61234476j 0.06060616+0.j ]
9.0 [0.64850462+0.15048982j 0.05405414+0.j ]
10.0 [-0.38404699+0.56382299j 0.04878055+0.j ]
References
----------
.. [HNW93] E. Hairer, S.P. Norsett and G. Wanner, Solving Ordinary
Differential Equations i. Nonstiff Problems. 2nd edition.
Springer Series in Computational Mathematics,
Springer-Verlag (1993)
"""
def __init__(self, f, jac=None):
self.stiff = 0
self.f = f
self.jac = jac
self.f_params = ()
self.jac_params = ()
self._y = []
@property
def y(self):
return self._y
def set_initial_value(self, y, t=0.0):
"""Set initial conditions y(t) = y."""
if isscalar(y):
y = [y]
n_prev = len(self._y)
if not n_prev:
self.set_integrator('') # find first available integrator
self._y = asarray(y, self._integrator.scalar)
self.t = t
self._integrator.reset(len(self._y), self.jac is not None)
return self
def set_integrator(self, name, **integrator_params):
"""
Set integrator by name.
Parameters
----------
name : str
Name of the integrator.
**integrator_params
Additional parameters for the integrator.
"""
integrator = find_integrator(name)
if integrator is None:
# FIXME: this really should be raise an exception. Will that break
# any code?
warnings.warn('No integrator name match with %r or is not '
'available.' % name)
else:
self._integrator = integrator(**integrator_params)
if not len(self._y):
self.t = 0.0
self._y = array([0.0], self._integrator.scalar)
self._integrator.reset(len(self._y), self.jac is not None)
return self
def integrate(self, t, step=False, relax=False):
"""Find y=y(t), set y as an initial condition, and return y.
Parameters
----------
t : float
The endpoint of the integration step.
step : bool
If True, and if the integrator supports the step method,
then perform a single integration step and return.
This parameter is provided in order to expose internals of
the implementation, and should not be changed from its default
value in most cases.
relax : bool
If True and if the integrator supports the run_relax method,
then integrate until t_1 >= t and return. ``relax`` is not
referenced if ``step=True``.
This parameter is provided in order to expose internals of
the implementation, and should not be changed from its default
value in most cases.
Returns
-------
y : float
The integrated value at t
"""
if step and self._integrator.supports_step:
mth = self._integrator.step
elif relax and self._integrator.supports_run_relax:
mth = self._integrator.run_relax
else:
mth = self._integrator.run
try:
self._y, self.t = mth(self.f, self.jac or (lambda: None),
self._y, self.t, t,
self.f_params, self.jac_params)
except SystemError as e:
# f2py issue with tuple returns, see ticket 1187.
raise ValueError(
'Function to integrate must not return a tuple.'
) from e
return self._y
def successful(self):
"""Check if integration was successful."""
try:
self._integrator
except AttributeError:
self.set_integrator('')
return self._integrator.success == 1
def get_return_code(self):
"""Extracts the return code for the integration to enable better control
if the integration fails.
In general, a return code > 0 implies success, while a return code < 0
implies failure.
Notes
-----
This section describes possible return codes and their meaning, for available
integrators that can be selected by `set_integrator` method.
"vode"
=========== =======
Return Code Message
=========== =======
2 Integration successful.
-1 Excess work done on this call. (Perhaps wrong MF.)
-2 Excess accuracy requested. (Tolerances too small.)
-3 Illegal input detected. (See printed message.)
-4 Repeated error test failures. (Check all input.)
-5 Repeated convergence failures. (Perhaps bad Jacobian
supplied or wrong choice of MF or tolerances.)
-6 Error weight became zero during problem. (Solution
component i vanished, and ATOL or ATOL(i) = 0.)
=========== =======
"zvode"
=========== =======
Return Code Message
=========== =======
2 Integration successful.
-1 Excess work done on this call. (Perhaps wrong MF.)
-2 Excess accuracy requested. (Tolerances too small.)
-3 Illegal input detected. (See printed message.)
-4 Repeated error test failures. (Check all input.)
-5 Repeated convergence failures. (Perhaps bad Jacobian
supplied or wrong choice of MF or tolerances.)
-6 Error weight became zero during problem. (Solution
component i vanished, and ATOL or ATOL(i) = 0.)
=========== =======
"dopri5"
=========== =======
Return Code Message
=========== =======
1 Integration successful.
2 Integration successful (interrupted by solout).
-1 Input is not consistent.
-2 Larger nsteps is needed.
-3 Step size becomes too small.
-4 Problem is probably stiff (interrupted).
=========== =======
"dop853"
=========== =======
Return Code Message
=========== =======
1 Integration successful.
2 Integration successful (interrupted by solout).
-1 Input is not consistent.
-2 Larger nsteps is needed.
-3 Step size becomes too small.
-4 Problem is probably stiff (interrupted).
=========== =======
"lsoda"
=========== =======
Return Code Message
=========== =======
2 Integration successful.
-1 Excess work done on this call (perhaps wrong Dfun type).
-2 Excess accuracy requested (tolerances too small).
-3 Illegal input detected (internal error).
-4 Repeated error test failures (internal error).
-5 Repeated convergence failures (perhaps bad Jacobian or tolerances).
-6 Error weight became zero during problem.
-7 Internal workspace insufficient to finish (internal error).
=========== =======
"""
try:
self._integrator
except AttributeError:
self.set_integrator('')
return self._integrator.istate
def set_f_params(self, *args):
"""Set extra parameters for user-supplied function f."""
self.f_params = args
return self
def set_jac_params(self, *args):
"""Set extra parameters for user-supplied function jac."""
self.jac_params = args
return self
def set_solout(self, solout):
"""
Set callable to be called at every successful integration step.
Parameters
----------
solout : callable
``solout(t, y)`` is called at each internal integrator step,
t is a scalar providing the current independent position
y is the current soloution ``y.shape == (n,)``
solout should return -1 to stop integration
otherwise it should return None or 0
"""
if self._integrator.supports_solout:
self._integrator.set_solout(solout)
if self._y is not None:
self._integrator.reset(len(self._y), self.jac is not None)
else:
raise ValueError("selected integrator does not support solout,"
" choose another one")
def _transform_banded_jac(bjac):
"""
Convert a real matrix of the form (for example)
[0 0 A B] [0 0 0 B]
[0 0 C D] [0 0 A D]
[E F G H] to [0 F C H]
[I J K L] [E J G L]
[I 0 K 0]
That is, every other column is shifted up one.
"""
# Shift every other column.
newjac = zeros((bjac.shape[0] + 1, bjac.shape[1]))
newjac[1:, ::2] = bjac[:, ::2]
newjac[:-1, 1::2] = bjac[:, 1::2]
return newjac
class complex_ode(ode):
"""
A wrapper of ode for complex systems.
This functions similarly as `ode`, but re-maps a complex-valued
equation system to a real-valued one before using the integrators.
Parameters
----------
f : callable ``f(t, y, *f_args)``
Rhs of the equation. t is a scalar, ``y.shape == (n,)``.
``f_args`` is set by calling ``set_f_params(*args)``.
jac : callable ``jac(t, y, *jac_args)``
Jacobian of the rhs, ``jac[i,j] = d f[i] / d y[j]``.
``jac_args`` is set by calling ``set_f_params(*args)``.
Attributes
----------
t : float
Current time.
y : ndarray
Current variable values.
Examples
--------
For usage examples, see `ode`.
"""
def __init__(self, f, jac=None):
self.cf = f
self.cjac = jac
if jac is None:
ode.__init__(self, self._wrap, None)
else:
ode.__init__(self, self._wrap, self._wrap_jac)
def _wrap(self, t, y, *f_args):
f = self.cf(*((t, y[::2] + 1j * y[1::2]) + f_args))
# self.tmp is a real-valued array containing the interleaved
# real and imaginary parts of f.
self.tmp[::2] = real(f)
self.tmp[1::2] = imag(f)
return self.tmp
def _wrap_jac(self, t, y, *jac_args):
# jac is the complex Jacobian computed by the user-defined function.
jac = self.cjac(*((t, y[::2] + 1j * y[1::2]) + jac_args))
# jac_tmp is the real version of the complex Jacobian. Each complex
# entry in jac, say 2+3j, becomes a 2x2 block of the form
# [2 -3]
# [3 2]
jac_tmp = zeros((2 * jac.shape[0], 2 * jac.shape[1]))
jac_tmp[1::2, 1::2] = jac_tmp[::2, ::2] = real(jac)
jac_tmp[1::2, ::2] = imag(jac)
jac_tmp[::2, 1::2] = -jac_tmp[1::2, ::2]
ml = getattr(self._integrator, 'ml', None)
mu = getattr(self._integrator, 'mu', None)
if ml is not None or mu is not None:
# Jacobian is banded. The user's Jacobian function has computed
# the complex Jacobian in packed format. The corresponding
# real-valued version has every other column shifted up.
jac_tmp = _transform_banded_jac(jac_tmp)
return jac_tmp
@property
def y(self):
return self._y[::2] + 1j * self._y[1::2]
def set_integrator(self, name, **integrator_params):
"""
Set integrator by name.
Parameters
----------
name : str
Name of the integrator
**integrator_params
Additional parameters for the integrator.
"""
if name == 'zvode':
raise ValueError("zvode must be used with ode, not complex_ode")
lband = integrator_params.get('lband')
uband = integrator_params.get('uband')
if lband is not None or uband is not None:
# The Jacobian is banded. Override the user-supplied bandwidths
# (which are for the complex Jacobian) with the bandwidths of
# the corresponding real-valued Jacobian wrapper of the complex
# Jacobian.
integrator_params['lband'] = 2 * (lband or 0) + 1
integrator_params['uband'] = 2 * (uband or 0) + 1
return ode.set_integrator(self, name, **integrator_params)
def set_initial_value(self, y, t=0.0):
"""Set initial conditions y(t) = y."""
y = asarray(y)
self.tmp = zeros(y.size * 2, 'float')
self.tmp[::2] = real(y)
self.tmp[1::2] = imag(y)
return ode.set_initial_value(self, self.tmp, t)
def integrate(self, t, step=False, relax=False):
"""Find y=y(t), set y as an initial condition, and return y.
Parameters
----------
t : float
The endpoint of the integration step.
step : bool
If True, and if the integrator supports the step method,
then perform a single integration step and return.
This parameter is provided in order to expose internals of
the implementation, and should not be changed from its default
value in most cases.
relax : bool
If True and if the integrator supports the run_relax method,
then integrate until t_1 >= t and return. ``relax`` is not
referenced if ``step=True``.
This parameter is provided in order to expose internals of
the implementation, and should not be changed from its default
value in most cases.
Returns
-------
y : float
The integrated value at t
"""
y = ode.integrate(self, t, step, relax)
return y[::2] + 1j * y[1::2]
def set_solout(self, solout):
"""
Set callable to be called at every successful integration step.
Parameters
----------
solout : callable
``solout(t, y)`` is called at each internal integrator step,
t is a scalar providing the current independent position
y is the current soloution ``y.shape == (n,)``
solout should return -1 to stop integration
otherwise it should return None or 0
"""
if self._integrator.supports_solout:
self._integrator.set_solout(solout, complex=True)
else:
raise TypeError("selected integrator does not support solouta,"
+ "choose another one")
# ------------------------------------------------------------------------------
# ODE integrators
# ------------------------------------------------------------------------------
def find_integrator(name):
for cl in IntegratorBase.integrator_classes:
if re.match(name, cl.__name__, re.I):
return cl
return None
class IntegratorConcurrencyError(RuntimeError):
"""
Failure due to concurrent usage of an integrator that can be used
only for a single problem at a time.
"""
def __init__(self, name):
msg = ("Integrator `%s` can be used to solve only a single problem "
"at a time. If you want to integrate multiple problems, "
"consider using a different integrator "
"(see `ode.set_integrator`)") % name
RuntimeError.__init__(self, msg)
class IntegratorBase:
runner = None # runner is None => integrator is not available
success = None # success==1 if integrator was called successfully
istate = None # istate > 0 means success, istate < 0 means failure
supports_run_relax = None
supports_step = None
supports_solout = False
integrator_classes = []
scalar = float
def acquire_new_handle(self):
# Some of the integrators have internal state (ancient
# Fortran...), and so only one instance can use them at a time.
# We keep track of this, and fail when concurrent usage is tried.
self.__class__.active_global_handle += 1
self.handle = self.__class__.active_global_handle
def check_handle(self):
if self.handle is not self.__class__.active_global_handle:
raise IntegratorConcurrencyError(self.__class__.__name__)
def reset(self, n, has_jac):
"""Prepare integrator for call: allocate memory, set flags, etc.
n - number of equations.
has_jac - if user has supplied function for evaluating Jacobian.
"""
def run(self, f, jac, y0, t0, t1, f_params, jac_params):
"""Integrate from t=t0 to t=t1 using y0 as an initial condition.
Return 2-tuple (y1,t1) where y1 is the result and t=t1
defines the stoppage coordinate of the result.
"""
raise NotImplementedError('all integrators must define '
'run(f, jac, t0, t1, y0, f_params, jac_params)')
def step(self, f, jac, y0, t0, t1, f_params, jac_params):
"""Make one integration step and return (y1,t1)."""
raise NotImplementedError('%s does not support step() method' %
self.__class__.__name__)
def run_relax(self, f, jac, y0, t0, t1, f_params, jac_params):
"""Integrate from t=t0 to t>=t1 and return (y1,t)."""
raise NotImplementedError('%s does not support run_relax() method' %
self.__class__.__name__)
# XXX: __str__ method for getting visual state of the integrator
def _vode_banded_jac_wrapper(jacfunc, ml, jac_params):
"""
Wrap a banded Jacobian function with a function that pads
the Jacobian with `ml` rows of zeros.
"""
def jac_wrapper(t, y):
jac = asarray(jacfunc(t, y, *jac_params))
padded_jac = vstack((jac, zeros((ml, jac.shape[1]))))
return padded_jac
return jac_wrapper
class vode(IntegratorBase):
runner = getattr(_vode, 'dvode', None)
messages = {-1: 'Excess work done on this call. (Perhaps wrong MF.)',
-2: 'Excess accuracy requested. (Tolerances too small.)',
-3: 'Illegal input detected. (See printed message.)',
-4: 'Repeated error test failures. (Check all input.)',
-5: 'Repeated convergence failures. (Perhaps bad'
' Jacobian supplied or wrong choice of MF or tolerances.)',
-6: 'Error weight became zero during problem. (Solution'
' component i vanished, and ATOL or ATOL(i) = 0.)'
}
supports_run_relax = 1
supports_step = 1
active_global_handle = 0
def __init__(self,
method='adams',
with_jacobian=False,
rtol=1e-6, atol=1e-12,
lband=None, uband=None,
order=12,
nsteps=500,
max_step=0.0, # corresponds to infinite
min_step=0.0,
first_step=0.0, # determined by solver
):
if re.match(method, r'adams', re.I):
self.meth = 1
elif re.match(method, r'bdf', re.I):
self.meth = 2
else:
raise ValueError('Unknown integration method %s' % method)
self.with_jacobian = with_jacobian
self.rtol = rtol
self.atol = atol
self.mu = uband
self.ml = lband
self.order = order
self.nsteps = nsteps
self.max_step = max_step
self.min_step = min_step
self.first_step = first_step
self.success = 1
self.initialized = False
def _determine_mf_and_set_bands(self, has_jac):
"""
Determine the `MF` parameter (Method Flag) for the Fortran subroutine `dvode`.
In the Fortran code, the legal values of `MF` are:
10, 11, 12, 13, 14, 15, 20, 21, 22, 23, 24, 25,
-11, -12, -14, -15, -21, -22, -24, -25
but this Python wrapper does not use negative values.
Returns
mf = 10*self.meth + miter
self.meth is the linear multistep method:
self.meth == 1: method="adams"
self.meth == 2: method="bdf"
miter is the correction iteration method:
miter == 0: Functional iteraton; no Jacobian involved.
miter == 1: Chord iteration with user-supplied full Jacobian.
miter == 2: Chord iteration with internally computed full Jacobian.
miter == 3: Chord iteration with internally computed diagonal Jacobian.
miter == 4: Chord iteration with user-supplied banded Jacobian.
miter == 5: Chord iteration with internally computed banded Jacobian.
Side effects: If either self.mu or self.ml is not None and the other is None,
then the one that is None is set to 0.
"""
jac_is_banded = self.mu is not None or self.ml is not None
if jac_is_banded:
if self.mu is None:
self.mu = 0
if self.ml is None:
self.ml = 0
# has_jac is True if the user provided a Jacobian function.
if has_jac:
if jac_is_banded:
miter = 4
else:
miter = 1
else:
if jac_is_banded:
if self.ml == self.mu == 0:
miter = 3 # Chord iteration with internal diagonal Jacobian.
else:
miter = 5 # Chord iteration with internal banded Jacobian.
else:
# self.with_jacobian is set by the user in the call to ode.set_integrator.
if self.with_jacobian:
miter = 2 # Chord iteration with internal full Jacobian.
else:
miter = 0 # Functional iteraton; no Jacobian involved.
mf = 10 * self.meth + miter
return mf
def reset(self, n, has_jac):
mf = self._determine_mf_and_set_bands(has_jac)
if mf == 10:
lrw = 20 + 16 * n
elif mf in [11, 12]:
lrw = 22 + 16 * n + 2 * n * n
elif mf == 13:
lrw = 22 + 17 * n
elif mf in [14, 15]:
lrw = 22 + 18 * n + (3 * self.ml + 2 * self.mu) * n
elif mf == 20:
lrw = 20 + 9 * n
elif mf in [21, 22]:
lrw = 22 + 9 * n + 2 * n * n
elif mf == 23:
lrw = 22 + 10 * n
elif mf in [24, 25]:
lrw = 22 + 11 * n + (3 * self.ml + 2 * self.mu) * n
else:
raise ValueError('Unexpected mf=%s' % mf)
if mf % 10 in [0, 3]:
liw = 30
else:
liw = 30 + n
rwork = zeros((lrw,), float)
rwork[4] = self.first_step
rwork[5] = self.max_step
rwork[6] = self.min_step
self.rwork = rwork
iwork = zeros((liw,), _vode_int_dtype)
if self.ml is not None:
iwork[0] = self.ml
if self.mu is not None:
iwork[1] = self.mu
iwork[4] = self.order
iwork[5] = self.nsteps
iwork[6] = 2 # mxhnil
self.iwork = iwork
self.call_args = [self.rtol, self.atol, 1, 1,
self.rwork, self.iwork, mf]
self.success = 1
self.initialized = False
def run(self, f, jac, y0, t0, t1, f_params, jac_params):
if self.initialized:
self.check_handle()
else:
self.initialized = True
self.acquire_new_handle()
if self.ml is not None and self.ml > 0:
# Banded Jacobian. Wrap the user-provided function with one
# that pads the Jacobian array with the extra `self.ml` rows
# required by the f2py-generated wrapper.
jac = _vode_banded_jac_wrapper(jac, self.ml, jac_params)
args = ((f, jac, y0, t0, t1) + tuple(self.call_args) +
(f_params, jac_params))
y1, t, istate = self.runner(*args)
self.istate = istate
if istate < 0:
unexpected_istate_msg = f'Unexpected istate={istate:d}'
warnings.warn('{:s}: {:s}'.format(self.__class__.__name__,
self.messages.get(istate, unexpected_istate_msg)))
self.success = 0
else:
self.call_args[3] = 2 # upgrade istate from 1 to 2
self.istate = 2
return y1, t
def step(self, *args):
itask = self.call_args[2]
self.call_args[2] = 2
r = self.run(*args)
self.call_args[2] = itask
return r
def run_relax(self, *args):
itask = self.call_args[2]
self.call_args[2] = 3
r = self.run(*args)
self.call_args[2] = itask
return r
if vode.runner is not None:
IntegratorBase.integrator_classes.append(vode)
class zvode(vode):
runner = getattr(_vode, 'zvode', None)
supports_run_relax = 1
supports_step = 1
scalar = complex
active_global_handle = 0
def reset(self, n, has_jac):
mf = self._determine_mf_and_set_bands(has_jac)
if mf in (10,):
lzw = 15 * n
elif mf in (11, 12):
lzw = 15 * n + 2 * n ** 2
elif mf in (-11, -12):
lzw = 15 * n + n ** 2
elif mf in (13,):
lzw = 16 * n
elif mf in (14, 15):
lzw = 17 * n + (3 * self.ml + 2 * self.mu) * n
elif mf in (-14, -15):
lzw = 16 * n + (2 * self.ml + self.mu) * n
elif mf in (20,):
lzw = 8 * n
elif mf in (21, 22):
lzw = 8 * n + 2 * n ** 2
elif mf in (-21, -22):
lzw = 8 * n + n ** 2
elif mf in (23,):
lzw = 9 * n
elif mf in (24, 25):
lzw = 10 * n + (3 * self.ml + 2 * self.mu) * n
elif mf in (-24, -25):
lzw = 9 * n + (2 * self.ml + self.mu) * n
lrw = 20 + n
if mf % 10 in (0, 3):
liw = 30
else:
liw = 30 + n
zwork = zeros((lzw,), complex)
self.zwork = zwork
rwork = zeros((lrw,), float)
rwork[4] = self.first_step
rwork[5] = self.max_step
rwork[6] = self.min_step
self.rwork = rwork
iwork = zeros((liw,), _vode_int_dtype)
if self.ml is not None:
iwork[0] = self.ml
if self.mu is not None:
iwork[1] = self.mu
iwork[4] = self.order
iwork[5] = self.nsteps
iwork[6] = 2 # mxhnil
self.iwork = iwork
self.call_args = [self.rtol, self.atol, 1, 1,
self.zwork, self.rwork, self.iwork, mf]
self.success = 1
self.initialized = False
if zvode.runner is not None:
IntegratorBase.integrator_classes.append(zvode)
class dopri5(IntegratorBase):
runner = getattr(_dop, 'dopri5', None)
name = 'dopri5'
supports_solout = True
messages = {1: 'computation successful',
2: 'computation successful (interrupted by solout)',
-1: 'input is not consistent',
-2: 'larger nsteps is needed',
-3: 'step size becomes too small',
-4: 'problem is probably stiff (interrupted)',
}
def __init__(self,
rtol=1e-6, atol=1e-12,
nsteps=500,
max_step=0.0,
first_step=0.0, # determined by solver
safety=0.9,
ifactor=10.0,
dfactor=0.2,
beta=0.0,
method=None,
verbosity=-1, # no messages if negative
):
self.rtol = rtol
self.atol = atol
self.nsteps = nsteps
self.max_step = max_step
self.first_step = first_step
self.safety = safety
self.ifactor = ifactor
self.dfactor = dfactor
self.beta = beta
self.verbosity = verbosity
self.success = 1
self.set_solout(None)
def set_solout(self, solout, complex=False):
self.solout = solout
self.solout_cmplx = complex
if solout is None:
self.iout = 0
else:
self.iout = 1
def reset(self, n, has_jac):
work = zeros((8 * n + 21,), float)
work[1] = self.safety
work[2] = self.dfactor
work[3] = self.ifactor
work[4] = self.beta
work[5] = self.max_step
work[6] = self.first_step
self.work = work
iwork = zeros((21,), _dop_int_dtype)
iwork[0] = self.nsteps
iwork[2] = self.verbosity
self.iwork = iwork
self.call_args = [self.rtol, self.atol, self._solout,
self.iout, self.work, self.iwork]
self.success = 1
def run(self, f, jac, y0, t0, t1, f_params, jac_params):
x, y, iwork, istate = self.runner(*((f, t0, y0, t1) +
tuple(self.call_args) + (f_params,)))
self.istate = istate
if istate < 0:
unexpected_istate_msg = f'Unexpected istate={istate:d}'
warnings.warn('{:s}: {:s}'.format(self.__class__.__name__,
self.messages.get(istate, unexpected_istate_msg)))
self.success = 0
return y, x
def _solout(self, nr, xold, x, y, nd, icomp, con):
if self.solout is not None:
if self.solout_cmplx:
y = y[::2] + 1j * y[1::2]
return self.solout(x, y)
else:
return 1
if dopri5.runner is not None:
IntegratorBase.integrator_classes.append(dopri5)
class dop853(dopri5):
runner = getattr(_dop, 'dop853', None)
name = 'dop853'
def __init__(self,
rtol=1e-6, atol=1e-12,
nsteps=500,
max_step=0.0,
first_step=0.0, # determined by solver
safety=0.9,
ifactor=6.0,
dfactor=0.3,
beta=0.0,
method=None,
verbosity=-1, # no messages if negative
):
super().__init__(rtol, atol, nsteps, max_step, first_step, safety,
ifactor, dfactor, beta, method, verbosity)
def reset(self, n, has_jac):
work = zeros((11 * n + 21,), float)
work[1] = self.safety
work[2] = self.dfactor
work[3] = self.ifactor
work[4] = self.beta
work[5] = self.max_step
work[6] = self.first_step
self.work = work
iwork = zeros((21,), _dop_int_dtype)
iwork[0] = self.nsteps
iwork[2] = self.verbosity
self.iwork = iwork
self.call_args = [self.rtol, self.atol, self._solout,
self.iout, self.work, self.iwork]
self.success = 1
if dop853.runner is not None:
IntegratorBase.integrator_classes.append(dop853)
class lsoda(IntegratorBase):
runner = getattr(_lsoda, 'lsoda', None)
active_global_handle = 0
messages = {
2: "Integration successful.",
-1: "Excess work done on this call (perhaps wrong Dfun type).",
-2: "Excess accuracy requested (tolerances too small).",
-3: "Illegal input detected (internal error).",
-4: "Repeated error test failures (internal error).",
-5: "Repeated convergence failures (perhaps bad Jacobian or tolerances).",
-6: "Error weight became zero during problem.",
-7: "Internal workspace insufficient to finish (internal error)."
}
def __init__(self,
with_jacobian=False,
rtol=1e-6, atol=1e-12,
lband=None, uband=None,
nsteps=500,
max_step=0.0, # corresponds to infinite
min_step=0.0,
first_step=0.0, # determined by solver
ixpr=0,
max_hnil=0,
max_order_ns=12,
max_order_s=5,
method=None
):
self.with_jacobian = with_jacobian
self.rtol = rtol
self.atol = atol
self.mu = uband
self.ml = lband
self.max_order_ns = max_order_ns
self.max_order_s = max_order_s
self.nsteps = nsteps
self.max_step = max_step
self.min_step = min_step
self.first_step = first_step
self.ixpr = ixpr
self.max_hnil = max_hnil
self.success = 1
self.initialized = False
def reset(self, n, has_jac):
# Calculate parameters for Fortran subroutine dvode.
if has_jac:
if self.mu is None and self.ml is None:
jt = 1
else:
if self.mu is None:
self.mu = 0
if self.ml is None:
self.ml = 0
jt = 4
else:
if self.mu is None and self.ml is None:
jt = 2
else:
if self.mu is None:
self.mu = 0
if self.ml is None:
self.ml = 0
jt = 5
lrn = 20 + (self.max_order_ns + 4) * n
if jt in [1, 2]:
lrs = 22 + (self.max_order_s + 4) * n + n * n
elif jt in [4, 5]:
lrs = 22 + (self.max_order_s + 5 + 2 * self.ml + self.mu) * n
else:
raise ValueError('Unexpected jt=%s' % jt)
lrw = max(lrn, lrs)
liw = 20 + n
rwork = zeros((lrw,), float)
rwork[4] = self.first_step
rwork[5] = self.max_step
rwork[6] = self.min_step
self.rwork = rwork
iwork = zeros((liw,), _lsoda_int_dtype)
if self.ml is not None:
iwork[0] = self.ml
if self.mu is not None:
iwork[1] = self.mu
iwork[4] = self.ixpr
iwork[5] = self.nsteps
iwork[6] = self.max_hnil
iwork[7] = self.max_order_ns
iwork[8] = self.max_order_s
self.iwork = iwork
self.call_args = [self.rtol, self.atol, 1, 1,
self.rwork, self.iwork, jt]
self.success = 1
self.initialized = False
def run(self, f, jac, y0, t0, t1, f_params, jac_params):
if self.initialized:
self.check_handle()
else:
self.initialized = True
self.acquire_new_handle()
args = [f, y0, t0, t1] + self.call_args[:-1] + \
[jac, self.call_args[-1], f_params, 0, jac_params]
y1, t, istate = self.runner(*args)
self.istate = istate
if istate < 0:
unexpected_istate_msg = f'Unexpected istate={istate:d}'
warnings.warn('{:s}: {:s}'.format(self.__class__.__name__,
self.messages.get(istate, unexpected_istate_msg)))
self.success = 0
else:
self.call_args[3] = 2 # upgrade istate from 1 to 2
self.istate = 2
return y1, t
def step(self, *args):
itask = self.call_args[2]
self.call_args[2] = 2
r = self.run(*args)
self.call_args[2] = itask
return r
def run_relax(self, *args):
itask = self.call_args[2]
self.call_args[2] = 3
r = self.run(*args)
self.call_args[2] = itask
return r
if lsoda.runner:
IntegratorBase.integrator_classes.append(lsoda)
| 47,921
| 33.903132
| 90
|
py
|
scipy
|
scipy-main/scipy/integrate/dop.py
|
# This file is not meant for public use and will be removed in SciPy v2.0.0.
import warnings
from . import _dop # type: ignore
__all__ = [ # noqa: F822
'dopri5',
'dop853'
]
def __dir__():
return __all__
def __getattr__(name):
if name not in __all__:
raise AttributeError(
"scipy.integrate.dop is deprecated and has no attribute "
f"{name}")
warnings.warn("The `scipy.integrate.dop` namespace is deprecated "
"and will be removed in SciPy v2.0.0.",
category=DeprecationWarning, stacklevel=2)
return getattr(_dop, name)
| 622
| 20.482759
| 76
|
py
|
scipy
|
scipy-main/scipy/integrate/__init__.py
|
"""
=============================================
Integration and ODEs (:mod:`scipy.integrate`)
=============================================
.. currentmodule:: scipy.integrate
Integrating functions, given function object
============================================
.. autosummary::
:toctree: generated/
quad -- General purpose integration
quad_vec -- General purpose integration of vector-valued functions
dblquad -- General purpose double integration
tplquad -- General purpose triple integration
nquad -- General purpose N-D integration
fixed_quad -- Integrate func(x) using Gaussian quadrature of order n
quadrature -- Integrate with given tolerance using Gaussian quadrature
romberg -- Integrate func using Romberg integration
newton_cotes -- Weights and error coefficient for Newton-Cotes integration
qmc_quad -- N-D integration using Quasi-Monte Carlo quadrature
IntegrationWarning -- Warning on issues during integration
AccuracyWarning -- Warning on issues during quadrature integration
Integrating functions, given fixed samples
==========================================
.. autosummary::
:toctree: generated/
trapezoid -- Use trapezoidal rule to compute integral.
cumulative_trapezoid -- Use trapezoidal rule to cumulatively compute integral.
simpson -- Use Simpson's rule to compute integral from samples.
romb -- Use Romberg Integration to compute integral from
-- (2**k + 1) evenly-spaced samples.
.. seealso::
:mod:`scipy.special` for orthogonal polynomials (special) for Gaussian
quadrature roots and weights for other weighting factors and regions.
Solving initial value problems for ODE systems
==============================================
The solvers are implemented as individual classes, which can be used directly
(low-level usage) or through a convenience function.
.. autosummary::
:toctree: generated/
solve_ivp -- Convenient function for ODE integration.
RK23 -- Explicit Runge-Kutta solver of order 3(2).
RK45 -- Explicit Runge-Kutta solver of order 5(4).
DOP853 -- Explicit Runge-Kutta solver of order 8.
Radau -- Implicit Runge-Kutta solver of order 5.
BDF -- Implicit multi-step variable order (1 to 5) solver.
LSODA -- LSODA solver from ODEPACK Fortran package.
OdeSolver -- Base class for ODE solvers.
DenseOutput -- Local interpolant for computing a dense output.
OdeSolution -- Class which represents a continuous ODE solution.
Old API
-------
These are the routines developed earlier for SciPy. They wrap older solvers
implemented in Fortran (mostly ODEPACK). While the interface to them is not
particularly convenient and certain features are missing compared to the new
API, the solvers themselves are of good quality and work fast as compiled
Fortran code. In some cases, it might be worth using this old API.
.. autosummary::
:toctree: generated/
odeint -- General integration of ordinary differential equations.
ode -- Integrate ODE using VODE and ZVODE routines.
complex_ode -- Convert a complex-valued ODE to real-valued and integrate.
Solving boundary value problems for ODE systems
===============================================
.. autosummary::
:toctree: generated/
solve_bvp -- Solve a boundary value problem for a system of ODEs.
""" # noqa: E501
from ._quadrature import *
from ._odepack_py import *
from ._quadpack_py import *
from ._ode import *
from ._bvp import solve_bvp
from ._ivp import (solve_ivp, OdeSolution, DenseOutput,
OdeSolver, RK23, RK45, DOP853, Radau, BDF, LSODA)
from ._quad_vec import quad_vec
# Deprecated namespaces, to be removed in v2.0.0
from . import dop, lsoda, vode, odepack, quadpack
__all__ = [s for s in dir() if not s.startswith('_')]
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
| 4,074
| 36.385321
| 81
|
py
|
scipy
|
scipy-main/scipy/integrate/_quadrature.py
|
from __future__ import annotations
from typing import TYPE_CHECKING, Callable, Any, cast
import numpy as np
import math
import warnings
from collections import namedtuple
from scipy.special import roots_legendre
from scipy.special import gammaln, logsumexp
from scipy._lib._util import _rng_spawn
from scipy._lib.deprecation import _NoValue
__all__ = ['fixed_quad', 'quadrature', 'romberg', 'romb',
'trapezoid', 'trapz', 'simps', 'simpson',
'cumulative_trapezoid', 'cumtrapz', 'newton_cotes',
'qmc_quad', 'AccuracyWarning']
def trapezoid(y, x=None, dx=1.0, axis=-1):
r"""
Integrate along the given axis using the composite trapezoidal rule.
If `x` is provided, the integration happens in sequence along its
elements - they are not sorted.
Integrate `y` (`x`) along each 1d slice on the given axis, compute
:math:`\int y(x) dx`.
When `x` is specified, this integrates along the parametric curve,
computing :math:`\int_t y(t) dt =
\int_t y(t) \left.\frac{dx}{dt}\right|_{x=x(t)} dt`.
Parameters
----------
y : array_like
Input array to integrate.
x : array_like, optional
The sample points corresponding to the `y` values. If `x` is None,
the sample points are assumed to be evenly spaced `dx` apart. The
default is None.
dx : scalar, optional
The spacing between sample points when `x` is None. The default is 1.
axis : int, optional
The axis along which to integrate.
Returns
-------
trapezoid : float or ndarray
Definite integral of `y` = n-dimensional array as approximated along
a single axis by the trapezoidal rule. If `y` is a 1-dimensional array,
then the result is a float. If `n` is greater than 1, then the result
is an `n`-1 dimensional array.
See Also
--------
cumulative_trapezoid, simpson, romb
Notes
-----
Image [2]_ illustrates trapezoidal rule -- y-axis locations of points
will be taken from `y` array, by default x-axis distances between
points will be 1.0, alternatively they can be provided with `x` array
or with `dx` scalar. Return value will be equal to combined area under
the red lines.
References
----------
.. [1] Wikipedia page: https://en.wikipedia.org/wiki/Trapezoidal_rule
.. [2] Illustration image:
https://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png
Examples
--------
Use the trapezoidal rule on evenly spaced points:
>>> import numpy as np
>>> from scipy import integrate
>>> integrate.trapezoid([1, 2, 3])
4.0
The spacing between sample points can be selected by either the
``x`` or ``dx`` arguments:
>>> integrate.trapezoid([1, 2, 3], x=[4, 6, 8])
8.0
>>> integrate.trapezoid([1, 2, 3], dx=2)
8.0
Using a decreasing ``x`` corresponds to integrating in reverse:
>>> integrate.trapezoid([1, 2, 3], x=[8, 6, 4])
-8.0
More generally ``x`` is used to integrate along a parametric curve. We can
estimate the integral :math:`\int_0^1 x^2 = 1/3` using:
>>> x = np.linspace(0, 1, num=50)
>>> y = x**2
>>> integrate.trapezoid(y, x)
0.33340274885464394
Or estimate the area of a circle, noting we repeat the sample which closes
the curve:
>>> theta = np.linspace(0, 2 * np.pi, num=1000, endpoint=True)
>>> integrate.trapezoid(np.cos(theta), x=np.sin(theta))
3.141571941375841
``trapezoid`` can be applied along a specified axis to do multiple
computations in one call:
>>> a = np.arange(6).reshape(2, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5]])
>>> integrate.trapezoid(a, axis=0)
array([1.5, 2.5, 3.5])
>>> integrate.trapezoid(a, axis=1)
array([2., 8.])
"""
# Future-proofing, in case NumPy moves from trapz to trapezoid for the same
# reasons as SciPy
if hasattr(np, 'trapezoid'):
return np.trapezoid(y, x=x, dx=dx, axis=axis)
else:
return np.trapz(y, x=x, dx=dx, axis=axis)
# Note: alias kept for backwards compatibility. Rename was done
# because trapz is a slur in colloquial English (see gh-12924).
def trapz(y, x=None, dx=1.0, axis=-1):
"""An alias of `trapezoid`.
`trapz` is kept for backwards compatibility. For new code, prefer
`trapezoid` instead.
"""
msg = ("'scipy.integrate.trapz' is deprecated in favour of "
"'scipy.integrate.trapezoid' and will be removed in SciPy 1.14.0")
warnings.warn(msg, DeprecationWarning, stacklevel=2)
return trapezoid(y, x=x, dx=dx, axis=axis)
class AccuracyWarning(Warning):
pass
if TYPE_CHECKING:
# workaround for mypy function attributes see:
# https://github.com/python/mypy/issues/2087#issuecomment-462726600
from typing import Protocol
class CacheAttributes(Protocol):
cache: dict[int, tuple[Any, Any]]
else:
CacheAttributes = Callable
def cache_decorator(func: Callable) -> CacheAttributes:
return cast(CacheAttributes, func)
@cache_decorator
def _cached_roots_legendre(n):
"""
Cache roots_legendre results to speed up calls of the fixed_quad
function.
"""
if n in _cached_roots_legendre.cache:
return _cached_roots_legendre.cache[n]
_cached_roots_legendre.cache[n] = roots_legendre(n)
return _cached_roots_legendre.cache[n]
_cached_roots_legendre.cache = dict()
def fixed_quad(func, a, b, args=(), n=5):
"""
Compute a definite integral using fixed-order Gaussian quadrature.
Integrate `func` from `a` to `b` using Gaussian quadrature of
order `n`.
Parameters
----------
func : callable
A Python function or method to integrate (must accept vector inputs).
If integrating a vector-valued function, the returned array must have
shape ``(..., len(x))``.
a : float
Lower limit of integration.
b : float
Upper limit of integration.
args : tuple, optional
Extra arguments to pass to function, if any.
n : int, optional
Order of quadrature integration. Default is 5.
Returns
-------
val : float
Gaussian quadrature approximation to the integral
none : None
Statically returned value of None
See Also
--------
quad : adaptive quadrature using QUADPACK
dblquad : double integrals
tplquad : triple integrals
romberg : adaptive Romberg quadrature
quadrature : adaptive Gaussian quadrature
romb : integrators for sampled data
simpson : integrators for sampled data
cumulative_trapezoid : cumulative integration for sampled data
ode : ODE integrator
odeint : ODE integrator
Examples
--------
>>> from scipy import integrate
>>> import numpy as np
>>> f = lambda x: x**8
>>> integrate.fixed_quad(f, 0.0, 1.0, n=4)
(0.1110884353741496, None)
>>> integrate.fixed_quad(f, 0.0, 1.0, n=5)
(0.11111111111111102, None)
>>> print(1/9.0) # analytical result
0.1111111111111111
>>> integrate.fixed_quad(np.cos, 0.0, np.pi/2, n=4)
(0.9999999771971152, None)
>>> integrate.fixed_quad(np.cos, 0.0, np.pi/2, n=5)
(1.000000000039565, None)
>>> np.sin(np.pi/2)-np.sin(0) # analytical result
1.0
"""
x, w = _cached_roots_legendre(n)
x = np.real(x)
if np.isinf(a) or np.isinf(b):
raise ValueError("Gaussian quadrature is only available for "
"finite limits.")
y = (b-a)*(x+1)/2.0 + a
return (b-a)/2.0 * np.sum(w*func(y, *args), axis=-1), None
def vectorize1(func, args=(), vec_func=False):
"""Vectorize the call to a function.
This is an internal utility function used by `romberg` and
`quadrature` to create a vectorized version of a function.
If `vec_func` is True, the function `func` is assumed to take vector
arguments.
Parameters
----------
func : callable
User defined function.
args : tuple, optional
Extra arguments for the function.
vec_func : bool, optional
True if the function func takes vector arguments.
Returns
-------
vfunc : callable
A function that will take a vector argument and return the
result.
"""
if vec_func:
def vfunc(x):
return func(x, *args)
else:
def vfunc(x):
if np.isscalar(x):
return func(x, *args)
x = np.asarray(x)
# call with first point to get output type
y0 = func(x[0], *args)
n = len(x)
dtype = getattr(y0, 'dtype', type(y0))
output = np.empty((n,), dtype=dtype)
output[0] = y0
for i in range(1, n):
output[i] = func(x[i], *args)
return output
return vfunc
def quadrature(func, a, b, args=(), tol=1.49e-8, rtol=1.49e-8, maxiter=50,
vec_func=True, miniter=1):
"""
Compute a definite integral using fixed-tolerance Gaussian quadrature.
Integrate `func` from `a` to `b` using Gaussian quadrature
with absolute tolerance `tol`.
Parameters
----------
func : function
A Python function or method to integrate.
a : float
Lower limit of integration.
b : float
Upper limit of integration.
args : tuple, optional
Extra arguments to pass to function.
tol, rtol : float, optional
Iteration stops when error between last two iterates is less than
`tol` OR the relative change is less than `rtol`.
maxiter : int, optional
Maximum order of Gaussian quadrature.
vec_func : bool, optional
True or False if func handles arrays as arguments (is
a "vector" function). Default is True.
miniter : int, optional
Minimum order of Gaussian quadrature.
Returns
-------
val : float
Gaussian quadrature approximation (within tolerance) to integral.
err : float
Difference between last two estimates of the integral.
See Also
--------
romberg : adaptive Romberg quadrature
fixed_quad : fixed-order Gaussian quadrature
quad : adaptive quadrature using QUADPACK
dblquad : double integrals
tplquad : triple integrals
romb : integrator for sampled data
simpson : integrator for sampled data
cumulative_trapezoid : cumulative integration for sampled data
ode : ODE integrator
odeint : ODE integrator
Examples
--------
>>> from scipy import integrate
>>> import numpy as np
>>> f = lambda x: x**8
>>> integrate.quadrature(f, 0.0, 1.0)
(0.11111111111111106, 4.163336342344337e-17)
>>> print(1/9.0) # analytical result
0.1111111111111111
>>> integrate.quadrature(np.cos, 0.0, np.pi/2)
(0.9999999999999536, 3.9611425250996035e-11)
>>> np.sin(np.pi/2)-np.sin(0) # analytical result
1.0
"""
if not isinstance(args, tuple):
args = (args,)
vfunc = vectorize1(func, args, vec_func=vec_func)
val = np.inf
err = np.inf
maxiter = max(miniter+1, maxiter)
for n in range(miniter, maxiter+1):
newval = fixed_quad(vfunc, a, b, (), n)[0]
err = abs(newval-val)
val = newval
if err < tol or err < rtol*abs(val):
break
else:
warnings.warn(
"maxiter (%d) exceeded. Latest difference = %e" % (maxiter, err),
AccuracyWarning)
return val, err
def tupleset(t, i, value):
l = list(t)
l[i] = value
return tuple(l)
# Note: alias kept for backwards compatibility. Rename was done
# because cumtrapz is a slur in colloquial English (see gh-12924).
def cumtrapz(y, x=None, dx=1.0, axis=-1, initial=None):
"""An alias of `cumulative_trapezoid`.
`cumtrapz` is kept for backwards compatibility. For new code, prefer
`cumulative_trapezoid` instead.
"""
msg = ("'scipy.integrate.cumtrapz' is deprecated in favour of "
"'scipy.integrate.cumulative_trapezoid' and will be removed "
"in SciPy 1.14.0")
warnings.warn(msg, DeprecationWarning, stacklevel=2)
return cumulative_trapezoid(y, x=x, dx=dx, axis=axis, initial=initial)
def cumulative_trapezoid(y, x=None, dx=1.0, axis=-1, initial=None):
"""
Cumulatively integrate y(x) using the composite trapezoidal rule.
Parameters
----------
y : array_like
Values to integrate.
x : array_like, optional
The coordinate to integrate along. If None (default), use spacing `dx`
between consecutive elements in `y`.
dx : float, optional
Spacing between elements of `y`. Only used if `x` is None.
axis : int, optional
Specifies the axis to cumulate. Default is -1 (last axis).
initial : scalar, optional
If given, insert this value at the beginning of the returned result.
0 or None are the only values accepted. Default is None, which means
`res` has one element less than `y` along the axis of integration.
.. deprecated:: 1.12.0
The option for non-zero inputs for `initial` will be deprecated in
SciPy 1.14.0. After this time, a ValueError will be raised if
`initial` is not None or 0.
Returns
-------
res : ndarray
The result of cumulative integration of `y` along `axis`.
If `initial` is None, the shape is such that the axis of integration
has one less value than `y`. If `initial` is given, the shape is equal
to that of `y`.
See Also
--------
numpy.cumsum, numpy.cumprod
quad : adaptive quadrature using QUADPACK
romberg : adaptive Romberg quadrature
quadrature : adaptive Gaussian quadrature
fixed_quad : fixed-order Gaussian quadrature
dblquad : double integrals
tplquad : triple integrals
romb : integrators for sampled data
ode : ODE integrators
odeint : ODE integrators
Examples
--------
>>> from scipy import integrate
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-2, 2, num=20)
>>> y = x
>>> y_int = integrate.cumulative_trapezoid(y, x, initial=0)
>>> plt.plot(x, y_int, 'ro', x, y[0] + 0.5 * x**2, 'b-')
>>> plt.show()
"""
y = np.asarray(y)
if x is None:
d = dx
else:
x = np.asarray(x)
if x.ndim == 1:
d = np.diff(x)
# reshape to correct shape
shape = [1] * y.ndim
shape[axis] = -1
d = d.reshape(shape)
elif len(x.shape) != len(y.shape):
raise ValueError("If given, shape of x must be 1-D or the "
"same as y.")
else:
d = np.diff(x, axis=axis)
if d.shape[axis] != y.shape[axis] - 1:
raise ValueError("If given, length of x along axis must be the "
"same as y.")
nd = len(y.shape)
slice1 = tupleset((slice(None),)*nd, axis, slice(1, None))
slice2 = tupleset((slice(None),)*nd, axis, slice(None, -1))
res = np.cumsum(d * (y[slice1] + y[slice2]) / 2.0, axis=axis)
if initial is not None:
if initial != 0:
warnings.warn(
"The option for values for `initial` other than None or 0 is "
"deprecated as of SciPy 1.12.0 and will raise a value error in"
" SciPy 1.14.0.",
DeprecationWarning, stacklevel=2
)
if not np.isscalar(initial):
raise ValueError("`initial` parameter should be a scalar.")
shape = list(res.shape)
shape[axis] = 1
res = np.concatenate([np.full(shape, initial, dtype=res.dtype), res],
axis=axis)
return res
def _basic_simpson(y, start, stop, x, dx, axis):
nd = len(y.shape)
if start is None:
start = 0
step = 2
slice_all = (slice(None),)*nd
slice0 = tupleset(slice_all, axis, slice(start, stop, step))
slice1 = tupleset(slice_all, axis, slice(start+1, stop+1, step))
slice2 = tupleset(slice_all, axis, slice(start+2, stop+2, step))
if x is None: # Even-spaced Simpson's rule.
result = np.sum(y[slice0] + 4.0*y[slice1] + y[slice2], axis=axis)
result *= dx / 3.0
else:
# Account for possibly different spacings.
# Simpson's rule changes a bit.
h = np.diff(x, axis=axis)
sl0 = tupleset(slice_all, axis, slice(start, stop, step))
sl1 = tupleset(slice_all, axis, slice(start+1, stop+1, step))
h0 = np.float64(h[sl0])
h1 = np.float64(h[sl1])
hsum = h0 + h1
hprod = h0 * h1
h0divh1 = np.true_divide(h0, h1, out=np.zeros_like(h0), where=h1 != 0)
tmp = hsum/6.0 * (y[slice0] *
(2.0 - np.true_divide(1.0, h0divh1,
out=np.zeros_like(h0divh1),
where=h0divh1 != 0)) +
y[slice1] * (hsum *
np.true_divide(hsum, hprod,
out=np.zeros_like(hsum),
where=hprod != 0)) +
y[slice2] * (2.0 - h0divh1))
result = np.sum(tmp, axis=axis)
return result
# Note: alias kept for backwards compatibility. simps was renamed to simpson
# because the former is a slur in colloquial English (see gh-12924).
def simps(y, x=None, dx=1.0, axis=-1, even=_NoValue):
"""An alias of `simpson`.
`simps` is kept for backwards compatibility. For new code, prefer
`simpson` instead.
"""
msg = ("'scipy.integrate.simps' is deprecated in favour of "
"'scipy.integrate.simpson' and will be removed in SciPy 1.14.0")
warnings.warn(msg, DeprecationWarning, stacklevel=2)
return simpson(y, x=x, dx=dx, axis=axis, even=even)
def simpson(y, x=None, dx=1.0, axis=-1, even=_NoValue):
"""
Integrate y(x) using samples along the given axis and the composite
Simpson's rule. If x is None, spacing of dx is assumed.
If there are an even number of samples, N, then there are an odd
number of intervals (N-1), but Simpson's rule requires an even number
of intervals. The parameter 'even' controls how this is handled.
Parameters
----------
y : array_like
Array to be integrated.
x : array_like, optional
If given, the points at which `y` is sampled.
dx : float, optional
Spacing of integration points along axis of `x`. Only used when
`x` is None. Default is 1.
axis : int, optional
Axis along which to integrate. Default is the last axis.
even : {None, 'simpson', 'avg', 'first', 'last'}, optional
'avg' : Average two results:
1) use the first N-2 intervals with
a trapezoidal rule on the last interval and
2) use the last
N-2 intervals with a trapezoidal rule on the first interval.
'first' : Use Simpson's rule for the first N-2 intervals with
a trapezoidal rule on the last interval.
'last' : Use Simpson's rule for the last N-2 intervals with a
trapezoidal rule on the first interval.
None : equivalent to 'simpson' (default)
'simpson' : Use Simpson's rule for the first N-2 intervals with the
addition of a 3-point parabolic segment for the last
interval using equations outlined by Cartwright [1]_.
If the axis to be integrated over only has two points then
the integration falls back to a trapezoidal integration.
.. versionadded:: 1.11.0
.. versionchanged:: 1.11.0
The newly added 'simpson' option is now the default as it is more
accurate in most situations.
.. deprecated:: 1.11.0
Parameter `even` is deprecated and will be removed in SciPy
1.14.0. After this time the behaviour for an even number of
points will follow that of `even='simpson'`.
Returns
-------
float
The estimated integral computed with the composite Simpson's rule.
See Also
--------
quad : adaptive quadrature using QUADPACK
romberg : adaptive Romberg quadrature
quadrature : adaptive Gaussian quadrature
fixed_quad : fixed-order Gaussian quadrature
dblquad : double integrals
tplquad : triple integrals
romb : integrators for sampled data
cumulative_trapezoid : cumulative integration for sampled data
ode : ODE integrators
odeint : ODE integrators
Notes
-----
For an odd number of samples that are equally spaced the result is
exact if the function is a polynomial of order 3 or less. If
the samples are not equally spaced, then the result is exact only
if the function is a polynomial of order 2 or less.
References
----------
.. [1] Cartwright, Kenneth V. Simpson's Rule Cumulative Integration with
MS Excel and Irregularly-spaced Data. Journal of Mathematical
Sciences and Mathematics Education. 12 (2): 1-9
Examples
--------
>>> from scipy import integrate
>>> import numpy as np
>>> x = np.arange(0, 10)
>>> y = np.arange(0, 10)
>>> integrate.simpson(y, x)
40.5
>>> y = np.power(x, 3)
>>> integrate.simpson(y, x)
1640.5
>>> integrate.quad(lambda x: x**3, 0, 9)[0]
1640.25
>>> integrate.simpson(y, x, even='first')
1644.5
"""
y = np.asarray(y)
nd = len(y.shape)
N = y.shape[axis]
last_dx = dx
first_dx = dx
returnshape = 0
if x is not None:
x = np.asarray(x)
if len(x.shape) == 1:
shapex = [1] * nd
shapex[axis] = x.shape[0]
saveshape = x.shape
returnshape = 1
x = x.reshape(tuple(shapex))
elif len(x.shape) != len(y.shape):
raise ValueError("If given, shape of x must be 1-D or the "
"same as y.")
if x.shape[axis] != N:
raise ValueError("If given, length of x along axis must be the "
"same as y.")
# even keyword parameter is deprecated
if even is not _NoValue:
warnings.warn(
"The 'even' keyword is deprecated as of SciPy 1.11.0 and will be "
"removed in SciPy 1.14.0",
DeprecationWarning, stacklevel=2
)
if N % 2 == 0:
val = 0.0
result = 0.0
slice_all = (slice(None),) * nd
# default is 'simpson'
even = even if even not in (_NoValue, None) else "simpson"
if even not in ['avg', 'last', 'first', 'simpson']:
raise ValueError(
"Parameter 'even' must be 'simpson', "
"'avg', 'last', or 'first'."
)
if N == 2:
# need at least 3 points in integration axis to form parabolic
# segment. If there are two points then any of 'avg', 'first',
# 'last' should give the same result.
slice1 = tupleset(slice_all, axis, -1)
slice2 = tupleset(slice_all, axis, -2)
if x is not None:
last_dx = x[slice1] - x[slice2]
val += 0.5 * last_dx * (y[slice1] + y[slice2])
# calculation is finished. Set `even` to None to skip other
# scenarios
even = None
if even == 'simpson':
# use Simpson's rule on first intervals
result = _basic_simpson(y, 0, N-3, x, dx, axis)
slice1 = tupleset(slice_all, axis, -1)
slice2 = tupleset(slice_all, axis, -2)
slice3 = tupleset(slice_all, axis, -3)
h = np.asfarray([dx, dx])
if x is not None:
# grab the last two spacings from the appropriate axis
hm2 = tupleset(slice_all, axis, slice(-2, -1, 1))
hm1 = tupleset(slice_all, axis, slice(-1, None, 1))
diffs = np.float64(np.diff(x, axis=axis))
h = [np.squeeze(diffs[hm2], axis=axis),
np.squeeze(diffs[hm1], axis=axis)]
# This is the correction for the last interval according to
# Cartwright.
# However, I used the equations given at
# https://en.wikipedia.org/wiki/Simpson%27s_rule#Composite_Simpson's_rule_for_irregularly_spaced_data
# A footnote on Wikipedia says:
# Cartwright 2017, Equation 8. The equation in Cartwright is
# calculating the first interval whereas the equations in the
# Wikipedia article are adjusting for the last integral. If the
# proper algebraic substitutions are made, the equation results in
# the values shown.
num = 2 * h[1] ** 2 + 3 * h[0] * h[1]
den = 6 * (h[1] + h[0])
alpha = np.true_divide(
num,
den,
out=np.zeros_like(den),
where=den != 0
)
num = h[1] ** 2 + 3.0 * h[0] * h[1]
den = 6 * h[0]
beta = np.true_divide(
num,
den,
out=np.zeros_like(den),
where=den != 0
)
num = 1 * h[1] ** 3
den = 6 * h[0] * (h[0] + h[1])
eta = np.true_divide(
num,
den,
out=np.zeros_like(den),
where=den != 0
)
result += alpha*y[slice1] + beta*y[slice2] - eta*y[slice3]
# The following code (down to result=result+val) can be removed
# once the 'even' keyword is removed.
# Compute using Simpson's rule on first intervals
if even in ['avg', 'first']:
slice1 = tupleset(slice_all, axis, -1)
slice2 = tupleset(slice_all, axis, -2)
if x is not None:
last_dx = x[slice1] - x[slice2]
val += 0.5*last_dx*(y[slice1]+y[slice2])
result = _basic_simpson(y, 0, N-3, x, dx, axis)
# Compute using Simpson's rule on last set of intervals
if even in ['avg', 'last']:
slice1 = tupleset(slice_all, axis, 0)
slice2 = tupleset(slice_all, axis, 1)
if x is not None:
first_dx = x[tuple(slice2)] - x[tuple(slice1)]
val += 0.5*first_dx*(y[slice2]+y[slice1])
result += _basic_simpson(y, 1, N-2, x, dx, axis)
if even == 'avg':
val /= 2.0
result /= 2.0
result = result + val
else:
result = _basic_simpson(y, 0, N-2, x, dx, axis)
if returnshape:
x = x.reshape(saveshape)
return result
def romb(y, dx=1.0, axis=-1, show=False):
"""
Romberg integration using samples of a function.
Parameters
----------
y : array_like
A vector of ``2**k + 1`` equally-spaced samples of a function.
dx : float, optional
The sample spacing. Default is 1.
axis : int, optional
The axis along which to integrate. Default is -1 (last axis).
show : bool, optional
When `y` is a single 1-D array, then if this argument is True
print the table showing Richardson extrapolation from the
samples. Default is False.
Returns
-------
romb : ndarray
The integrated result for `axis`.
See Also
--------
quad : adaptive quadrature using QUADPACK
romberg : adaptive Romberg quadrature
quadrature : adaptive Gaussian quadrature
fixed_quad : fixed-order Gaussian quadrature
dblquad : double integrals
tplquad : triple integrals
simpson : integrators for sampled data
cumulative_trapezoid : cumulative integration for sampled data
ode : ODE integrators
odeint : ODE integrators
Examples
--------
>>> from scipy import integrate
>>> import numpy as np
>>> x = np.arange(10, 14.25, 0.25)
>>> y = np.arange(3, 12)
>>> integrate.romb(y)
56.0
>>> y = np.sin(np.power(x, 2.5))
>>> integrate.romb(y)
-0.742561336672229
>>> integrate.romb(y, show=True)
Richardson Extrapolation Table for Romberg Integration
======================================================
-0.81576
4.63862 6.45674
-1.10581 -3.02062 -3.65245
-2.57379 -3.06311 -3.06595 -3.05664
-1.34093 -0.92997 -0.78776 -0.75160 -0.74256
======================================================
-0.742561336672229 # may vary
"""
y = np.asarray(y)
nd = len(y.shape)
Nsamps = y.shape[axis]
Ninterv = Nsamps-1
n = 1
k = 0
while n < Ninterv:
n <<= 1
k += 1
if n != Ninterv:
raise ValueError("Number of samples must be one plus a "
"non-negative power of 2.")
R = {}
slice_all = (slice(None),) * nd
slice0 = tupleset(slice_all, axis, 0)
slicem1 = tupleset(slice_all, axis, -1)
h = Ninterv * np.asarray(dx, dtype=float)
R[(0, 0)] = (y[slice0] + y[slicem1])/2.0*h
slice_R = slice_all
start = stop = step = Ninterv
for i in range(1, k+1):
start >>= 1
slice_R = tupleset(slice_R, axis, slice(start, stop, step))
step >>= 1
R[(i, 0)] = 0.5*(R[(i-1, 0)] + h*y[slice_R].sum(axis=axis))
for j in range(1, i+1):
prev = R[(i, j-1)]
R[(i, j)] = prev + (prev-R[(i-1, j-1)]) / ((1 << (2*j))-1)
h /= 2.0
if show:
if not np.isscalar(R[(0, 0)]):
print("*** Printing table only supported for integrals" +
" of a single data set.")
else:
try:
precis = show[0]
except (TypeError, IndexError):
precis = 5
try:
width = show[1]
except (TypeError, IndexError):
width = 8
formstr = "%%%d.%df" % (width, precis)
title = "Richardson Extrapolation Table for Romberg Integration"
print(title, "=" * len(title), sep="\n", end="\n")
for i in range(k+1):
for j in range(i+1):
print(formstr % R[(i, j)], end=" ")
print()
print("=" * len(title))
return R[(k, k)]
# Romberg quadratures for numeric integration.
#
# Written by Scott M. Ransom <ransom@cfa.harvard.edu>
# last revision: 14 Nov 98
#
# Cosmetic changes by Konrad Hinsen <hinsen@cnrs-orleans.fr>
# last revision: 1999-7-21
#
# Adapted to SciPy by Travis Oliphant <oliphant.travis@ieee.org>
# last revision: Dec 2001
def _difftrap(function, interval, numtraps):
"""
Perform part of the trapezoidal rule to integrate a function.
Assume that we had called difftrap with all lower powers-of-2
starting with 1. Calling difftrap only returns the summation
of the new ordinates. It does _not_ multiply by the width
of the trapezoids. This must be performed by the caller.
'function' is the function to evaluate (must accept vector arguments).
'interval' is a sequence with lower and upper limits
of integration.
'numtraps' is the number of trapezoids to use (must be a
power-of-2).
"""
if numtraps <= 0:
raise ValueError("numtraps must be > 0 in difftrap().")
elif numtraps == 1:
return 0.5*(function(interval[0])+function(interval[1]))
else:
numtosum = numtraps/2
h = float(interval[1]-interval[0])/numtosum
lox = interval[0] + 0.5 * h
points = lox + h * np.arange(numtosum)
s = np.sum(function(points), axis=0)
return s
def _romberg_diff(b, c, k):
"""
Compute the differences for the Romberg quadrature corrections.
See Forman Acton's "Real Computing Made Real," p 143.
"""
tmp = 4.0**k
return (tmp * c - b)/(tmp - 1.0)
def _printresmat(function, interval, resmat):
# Print the Romberg result matrix.
i = j = 0
print('Romberg integration of', repr(function), end=' ')
print('from', interval)
print('')
print('%6s %9s %9s' % ('Steps', 'StepSize', 'Results'))
for i in range(len(resmat)):
print('%6d %9f' % (2**i, (interval[1]-interval[0])/(2.**i)), end=' ')
for j in range(i+1):
print('%9f' % (resmat[i][j]), end=' ')
print('')
print('')
print('The final result is', resmat[i][j], end=' ')
print('after', 2**(len(resmat)-1)+1, 'function evaluations.')
def romberg(function, a, b, args=(), tol=1.48e-8, rtol=1.48e-8, show=False,
divmax=10, vec_func=False):
"""
Romberg integration of a callable function or method.
Returns the integral of `function` (a function of one variable)
over the interval (`a`, `b`).
If `show` is 1, the triangular array of the intermediate results
will be printed. If `vec_func` is True (default is False), then
`function` is assumed to support vector arguments.
Parameters
----------
function : callable
Function to be integrated.
a : float
Lower limit of integration.
b : float
Upper limit of integration.
Returns
-------
results : float
Result of the integration.
Other Parameters
----------------
args : tuple, optional
Extra arguments to pass to function. Each element of `args` will
be passed as a single argument to `func`. Default is to pass no
extra arguments.
tol, rtol : float, optional
The desired absolute and relative tolerances. Defaults are 1.48e-8.
show : bool, optional
Whether to print the results. Default is False.
divmax : int, optional
Maximum order of extrapolation. Default is 10.
vec_func : bool, optional
Whether `func` handles arrays as arguments (i.e., whether it is a
"vector" function). Default is False.
See Also
--------
fixed_quad : Fixed-order Gaussian quadrature.
quad : Adaptive quadrature using QUADPACK.
dblquad : Double integrals.
tplquad : Triple integrals.
romb : Integrators for sampled data.
simpson : Integrators for sampled data.
cumulative_trapezoid : Cumulative integration for sampled data.
ode : ODE integrator.
odeint : ODE integrator.
References
----------
.. [1] 'Romberg's method' https://en.wikipedia.org/wiki/Romberg%27s_method
Examples
--------
Integrate a gaussian from 0 to 1 and compare to the error function.
>>> from scipy import integrate
>>> from scipy.special import erf
>>> import numpy as np
>>> gaussian = lambda x: 1/np.sqrt(np.pi) * np.exp(-x**2)
>>> result = integrate.romberg(gaussian, 0, 1, show=True)
Romberg integration of <function vfunc at ...> from [0, 1]
::
Steps StepSize Results
1 1.000000 0.385872
2 0.500000 0.412631 0.421551
4 0.250000 0.419184 0.421368 0.421356
8 0.125000 0.420810 0.421352 0.421350 0.421350
16 0.062500 0.421215 0.421350 0.421350 0.421350 0.421350
32 0.031250 0.421317 0.421350 0.421350 0.421350 0.421350 0.421350
The final result is 0.421350396475 after 33 function evaluations.
>>> print("%g %g" % (2*result, erf(1)))
0.842701 0.842701
"""
if np.isinf(a) or np.isinf(b):
raise ValueError("Romberg integration only available "
"for finite limits.")
vfunc = vectorize1(function, args, vec_func=vec_func)
n = 1
interval = [a, b]
intrange = b - a
ordsum = _difftrap(vfunc, interval, n)
result = intrange * ordsum
resmat = [[result]]
err = np.inf
last_row = resmat[0]
for i in range(1, divmax+1):
n *= 2
ordsum += _difftrap(vfunc, interval, n)
row = [intrange * ordsum / n]
for k in range(i):
row.append(_romberg_diff(last_row[k], row[k], k+1))
result = row[i]
lastresult = last_row[i-1]
if show:
resmat.append(row)
err = abs(result - lastresult)
if err < tol or err < rtol * abs(result):
break
last_row = row
else:
warnings.warn(
"divmax (%d) exceeded. Latest difference = %e" % (divmax, err),
AccuracyWarning)
if show:
_printresmat(vfunc, interval, resmat)
return result
# Coefficients for Newton-Cotes quadrature
#
# These are the points being used
# to construct the local interpolating polynomial
# a are the weights for Newton-Cotes integration
# B is the error coefficient.
# error in these coefficients grows as N gets larger.
# or as samples are closer and closer together
# You can use maxima to find these rational coefficients
# for equally spaced data using the commands
# a(i,N) := integrate(product(r-j,j,0,i-1) * product(r-j,j,i+1,N),r,0,N) / ((N-i)! * i!) * (-1)^(N-i);
# Be(N) := N^(N+2)/(N+2)! * (N/(N+3) - sum((i/N)^(N+2)*a(i,N),i,0,N));
# Bo(N) := N^(N+1)/(N+1)! * (N/(N+2) - sum((i/N)^(N+1)*a(i,N),i,0,N));
# B(N) := (if (mod(N,2)=0) then Be(N) else Bo(N));
#
# pre-computed for equally-spaced weights
#
# num_a, den_a, int_a, num_B, den_B = _builtincoeffs[N]
#
# a = num_a*array(int_a)/den_a
# B = num_B*1.0 / den_B
#
# integrate(f(x),x,x_0,x_N) = dx*sum(a*f(x_i)) + B*(dx)^(2k+3) f^(2k+2)(x*)
# where k = N // 2
#
_builtincoeffs = {
1: (1,2,[1,1],-1,12),
2: (1,3,[1,4,1],-1,90),
3: (3,8,[1,3,3,1],-3,80),
4: (2,45,[7,32,12,32,7],-8,945),
5: (5,288,[19,75,50,50,75,19],-275,12096),
6: (1,140,[41,216,27,272,27,216,41],-9,1400),
7: (7,17280,[751,3577,1323,2989,2989,1323,3577,751],-8183,518400),
8: (4,14175,[989,5888,-928,10496,-4540,10496,-928,5888,989],
-2368,467775),
9: (9,89600,[2857,15741,1080,19344,5778,5778,19344,1080,
15741,2857], -4671, 394240),
10: (5,299376,[16067,106300,-48525,272400,-260550,427368,
-260550,272400,-48525,106300,16067],
-673175, 163459296),
11: (11,87091200,[2171465,13486539,-3237113, 25226685,-9595542,
15493566,15493566,-9595542,25226685,-3237113,
13486539,2171465], -2224234463, 237758976000),
12: (1, 5255250, [1364651,9903168,-7587864,35725120,-51491295,
87516288,-87797136,87516288,-51491295,35725120,
-7587864,9903168,1364651], -3012, 875875),
13: (13, 402361344000,[8181904909, 56280729661, -31268252574,
156074417954,-151659573325,206683437987,
-43111992612,-43111992612,206683437987,
-151659573325,156074417954,-31268252574,
56280729661,8181904909], -2639651053,
344881152000),
14: (7, 2501928000, [90241897,710986864,-770720657,3501442784,
-6625093363,12630121616,-16802270373,19534438464,
-16802270373,12630121616,-6625093363,3501442784,
-770720657,710986864,90241897], -3740727473,
1275983280000)
}
def newton_cotes(rn, equal=0):
r"""
Return weights and error coefficient for Newton-Cotes integration.
Suppose we have (N+1) samples of f at the positions
x_0, x_1, ..., x_N. Then an N-point Newton-Cotes formula for the
integral between x_0 and x_N is:
:math:`\int_{x_0}^{x_N} f(x)dx = \Delta x \sum_{i=0}^{N} a_i f(x_i)
+ B_N (\Delta x)^{N+2} f^{N+1} (\xi)`
where :math:`\xi \in [x_0,x_N]`
and :math:`\Delta x = \frac{x_N-x_0}{N}` is the average samples spacing.
If the samples are equally-spaced and N is even, then the error
term is :math:`B_N (\Delta x)^{N+3} f^{N+2}(\xi)`.
Parameters
----------
rn : int
The integer order for equally-spaced data or the relative positions of
the samples with the first sample at 0 and the last at N, where N+1 is
the length of `rn`. N is the order of the Newton-Cotes integration.
equal : int, optional
Set to 1 to enforce equally spaced data.
Returns
-------
an : ndarray
1-D array of weights to apply to the function at the provided sample
positions.
B : float
Error coefficient.
Notes
-----
Normally, the Newton-Cotes rules are used on smaller integration
regions and a composite rule is used to return the total integral.
Examples
--------
Compute the integral of sin(x) in [0, :math:`\pi`]:
>>> from scipy.integrate import newton_cotes
>>> import numpy as np
>>> def f(x):
... return np.sin(x)
>>> a = 0
>>> b = np.pi
>>> exact = 2
>>> for N in [2, 4, 6, 8, 10]:
... x = np.linspace(a, b, N + 1)
... an, B = newton_cotes(N, 1)
... dx = (b - a) / N
... quad = dx * np.sum(an * f(x))
... error = abs(quad - exact)
... print('{:2d} {:10.9f} {:.5e}'.format(N, quad, error))
...
2 2.094395102 9.43951e-02
4 1.998570732 1.42927e-03
6 2.000017814 1.78136e-05
8 1.999999835 1.64725e-07
10 2.000000001 1.14677e-09
"""
try:
N = len(rn)-1
if equal:
rn = np.arange(N+1)
elif np.all(np.diff(rn) == 1):
equal = 1
except Exception:
N = rn
rn = np.arange(N+1)
equal = 1
if equal and N in _builtincoeffs:
na, da, vi, nb, db = _builtincoeffs[N]
an = na * np.array(vi, dtype=float) / da
return an, float(nb)/db
if (rn[0] != 0) or (rn[-1] != N):
raise ValueError("The sample positions must start at 0"
" and end at N")
yi = rn / float(N)
ti = 2 * yi - 1
nvec = np.arange(N+1)
C = ti ** nvec[:, np.newaxis]
Cinv = np.linalg.inv(C)
# improve precision of result
for i in range(2):
Cinv = 2*Cinv - Cinv.dot(C).dot(Cinv)
vec = 2.0 / (nvec[::2]+1)
ai = Cinv[:, ::2].dot(vec) * (N / 2.)
if (N % 2 == 0) and equal:
BN = N/(N+3.)
power = N+2
else:
BN = N/(N+2.)
power = N+1
BN = BN - np.dot(yi**power, ai)
p1 = power+1
fac = power*math.log(N) - gammaln(p1)
fac = math.exp(fac)
return ai, BN*fac
def _qmc_quad_iv(func, a, b, n_points, n_estimates, qrng, log):
# lazy import to avoid issues with partially-initialized submodule
if not hasattr(qmc_quad, 'qmc'):
from scipy import stats
qmc_quad.stats = stats
else:
stats = qmc_quad.stats
if not callable(func):
message = "`func` must be callable."
raise TypeError(message)
# a, b will be modified, so copy. Oh well if it's copied twice.
a = np.atleast_1d(a).copy()
b = np.atleast_1d(b).copy()
a, b = np.broadcast_arrays(a, b)
dim = a.shape[0]
try:
func((a + b) / 2)
except Exception as e:
message = ("`func` must evaluate the integrand at points within "
"the integration range; e.g. `func( (a + b) / 2)` "
"must return the integrand at the centroid of the "
"integration volume.")
raise ValueError(message) from e
try:
func(np.array([a, b]).T)
vfunc = func
except Exception as e:
message = ("Exception encountered when attempting vectorized call to "
f"`func`: {e}. For better performance, `func` should "
"accept two-dimensional array `x` with shape `(len(a), "
"n_points)` and return an array of the integrand value at "
"each of the `n_points.")
warnings.warn(message, stacklevel=3)
def vfunc(x):
return np.apply_along_axis(func, axis=-1, arr=x)
n_points_int = np.int64(n_points)
if n_points != n_points_int:
message = "`n_points` must be an integer."
raise TypeError(message)
n_estimates_int = np.int64(n_estimates)
if n_estimates != n_estimates_int:
message = "`n_estimates` must be an integer."
raise TypeError(message)
if qrng is None:
qrng = stats.qmc.Halton(dim)
elif not isinstance(qrng, stats.qmc.QMCEngine):
message = "`qrng` must be an instance of scipy.stats.qmc.QMCEngine."
raise TypeError(message)
if qrng.d != a.shape[0]:
message = ("`qrng` must be initialized with dimensionality equal to "
"the number of variables in `a`, i.e., "
"`qrng.random().shape[-1]` must equal `a.shape[0]`.")
raise ValueError(message)
rng_seed = getattr(qrng, 'rng_seed', None)
rng = stats._qmc.check_random_state(rng_seed)
if log not in {True, False}:
message = "`log` must be boolean (`True` or `False`)."
raise TypeError(message)
return (vfunc, a, b, n_points_int, n_estimates_int, qrng, rng, log, stats)
QMCQuadResult = namedtuple('QMCQuadResult', ['integral', 'standard_error'])
def qmc_quad(func, a, b, *, n_estimates=8, n_points=1024, qrng=None,
log=False):
"""
Compute an integral in N-dimensions using Quasi-Monte Carlo quadrature.
Parameters
----------
func : callable
The integrand. Must accept a single argument ``x``, an array which
specifies the point(s) at which to evaluate the scalar-valued
integrand, and return the value(s) of the integrand.
For efficiency, the function should be vectorized to accept an array of
shape ``(d, n_points)``, where ``d`` is the number of variables (i.e.
the dimensionality of the function domain) and `n_points` is the number
of quadrature points, and return an array of shape ``(n_points,)``,
the integrand at each quadrature point.
a, b : array-like
One-dimensional arrays specifying the lower and upper integration
limits, respectively, of each of the ``d`` variables.
n_estimates, n_points : int, optional
`n_estimates` (default: 8) statistically independent QMC samples, each
of `n_points` (default: 1024) points, will be generated by `qrng`.
The total number of points at which the integrand `func` will be
evaluated is ``n_points * n_estimates``. See Notes for details.
qrng : `~scipy.stats.qmc.QMCEngine`, optional
An instance of the QMCEngine from which to sample QMC points.
The QMCEngine must be initialized to a number of dimensions ``d``
corresponding with the number of variables ``x1, ..., xd`` passed to
`func`.
The provided QMCEngine is used to produce the first integral estimate.
If `n_estimates` is greater than one, additional QMCEngines are
spawned from the first (with scrambling enabled, if it is an option.)
If a QMCEngine is not provided, the default `scipy.stats.qmc.Halton`
will be initialized with the number of dimensions determine from
the length of `a`.
log : boolean, default: False
When set to True, `func` returns the log of the integrand, and
the result object contains the log of the integral.
Returns
-------
result : object
A result object with attributes:
integral : float
The estimate of the integral.
standard_error :
The error estimate. See Notes for interpretation.
Notes
-----
Values of the integrand at each of the `n_points` points of a QMC sample
are used to produce an estimate of the integral. This estimate is drawn
from a population of possible estimates of the integral, the value of
which we obtain depends on the particular points at which the integral
was evaluated. We perform this process `n_estimates` times, each time
evaluating the integrand at different scrambled QMC points, effectively
drawing i.i.d. random samples from the population of integral estimates.
The sample mean :math:`m` of these integral estimates is an
unbiased estimator of the true value of the integral, and the standard
error of the mean :math:`s` of these estimates may be used to generate
confidence intervals using the t distribution with ``n_estimates - 1``
degrees of freedom. Perhaps counter-intuitively, increasing `n_points`
while keeping the total number of function evaluation points
``n_points * n_estimates`` fixed tends to reduce the actual error, whereas
increasing `n_estimates` tends to decrease the error estimate.
Examples
--------
QMC quadrature is particularly useful for computing integrals in higher
dimensions. An example integrand is the probability density function
of a multivariate normal distribution.
>>> import numpy as np
>>> from scipy import stats
>>> dim = 8
>>> mean = np.zeros(dim)
>>> cov = np.eye(dim)
>>> def func(x):
... # `multivariate_normal` expects the _last_ axis to correspond with
... # the dimensionality of the space, so `x` must be transposed
... return stats.multivariate_normal.pdf(x.T, mean, cov)
To compute the integral over the unit hypercube:
>>> from scipy.integrate import qmc_quad
>>> a = np.zeros(dim)
>>> b = np.ones(dim)
>>> rng = np.random.default_rng()
>>> qrng = stats.qmc.Halton(d=dim, seed=rng)
>>> n_estimates = 8
>>> res = qmc_quad(func, a, b, n_estimates=n_estimates, qrng=qrng)
>>> res.integral, res.standard_error
(0.00018429555666024108, 1.0389431116001344e-07)
A two-sided, 99% confidence interval for the integral may be estimated
as:
>>> t = stats.t(df=n_estimates-1, loc=res.integral,
... scale=res.standard_error)
>>> t.interval(0.99)
(0.0001839319802536469, 0.00018465913306683527)
Indeed, the value reported by `scipy.stats.multivariate_normal` is
within this range.
>>> stats.multivariate_normal.cdf(b, mean, cov, lower_limit=a)
0.00018430867675187443
"""
args = _qmc_quad_iv(func, a, b, n_points, n_estimates, qrng, log)
func, a, b, n_points, n_estimates, qrng, rng, log, stats = args
def sum_product(integrands, dA, log=False):
if log:
return logsumexp(integrands) + np.log(dA)
else:
return np.sum(integrands * dA)
def mean(estimates, log=False):
if log:
return logsumexp(estimates) - np.log(n_estimates)
else:
return np.mean(estimates)
def std(estimates, m=None, ddof=0, log=False):
m = m or mean(estimates, log)
if log:
estimates, m = np.broadcast_arrays(estimates, m)
temp = np.vstack((estimates, m + np.pi * 1j))
diff = logsumexp(temp, axis=0)
return np.real(0.5 * (logsumexp(2 * diff)
- np.log(n_estimates - ddof)))
else:
return np.std(estimates, ddof=ddof)
def sem(estimates, m=None, s=None, log=False):
m = m or mean(estimates, log)
s = s or std(estimates, m, ddof=1, log=log)
if log:
return s - 0.5*np.log(n_estimates)
else:
return s / np.sqrt(n_estimates)
# The sign of the integral depends on the order of the limits. Fix this by
# ensuring that lower bounds are indeed lower and setting sign of resulting
# integral manually
if np.any(a == b):
message = ("A lower limit was equal to an upper limit, so the value "
"of the integral is zero by definition.")
warnings.warn(message, stacklevel=2)
return QMCQuadResult(-np.inf if log else 0, 0)
i_swap = b < a
sign = (-1)**(i_swap.sum(axis=-1)) # odd # of swaps -> negative
a[i_swap], b[i_swap] = b[i_swap], a[i_swap]
A = np.prod(b - a)
dA = A / n_points
estimates = np.zeros(n_estimates)
rngs = _rng_spawn(qrng.rng, n_estimates)
for i in range(n_estimates):
# Generate integral estimate
sample = qrng.random(n_points)
# The rationale for transposing is that this allows users to easily
# unpack `x` into separate variables, if desired. This is consistent
# with the `xx` array passed into the `scipy.integrate.nquad` `func`.
x = stats.qmc.scale(sample, a, b).T # (n_dim, n_points)
integrands = func(x)
estimates[i] = sum_product(integrands, dA, log)
# Get a new, independently-scrambled QRNG for next time
qrng = type(qrng)(seed=rngs[i], **qrng._init_quad)
integral = mean(estimates, log)
standard_error = sem(estimates, m=integral, log=log)
integral = integral + np.pi*1j if (log and sign < 0) else integral*sign
return QMCQuadResult(integral, standard_error)
| 53,017
| 33.629654
| 113
|
py
|
scipy
|
scipy-main/scipy/integrate/quadpack.py
|
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.integrate` namespace for importing the functions
# included below.
import warnings
from . import _quadpack_py
__all__ = [ # noqa: F822
"quad",
"dblquad",
"tplquad",
"nquad",
"IntegrationWarning",
"error",
]
def __dir__():
return __all__
def __getattr__(name):
if name not in __all__:
raise AttributeError(
"scipy.integrate.quadpack is deprecated and has no attribute "
f"{name}. Try looking in scipy.integrate instead.")
warnings.warn(f"Please use `{name}` from the `scipy.integrate` namespace, "
"the `scipy.integrate.quadpack` namespace is deprecated.",
category=DeprecationWarning, stacklevel=2)
return getattr(_quadpack_py, name)
| 845
| 24.636364
| 79
|
py
|
scipy
|
scipy-main/scipy/integrate/tests/test_bvp.py
|
import sys
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import numpy as np
from numpy.testing import (assert_, assert_array_equal, assert_allclose,
assert_equal)
from pytest import raises as assert_raises
from scipy.sparse import coo_matrix
from scipy.special import erf
from scipy.integrate._bvp import (modify_mesh, estimate_fun_jac,
estimate_bc_jac, compute_jac_indices,
construct_global_jac, solve_bvp)
def exp_fun(x, y):
return np.vstack((y[1], y[0]))
def exp_fun_jac(x, y):
df_dy = np.empty((2, 2, x.shape[0]))
df_dy[0, 0] = 0
df_dy[0, 1] = 1
df_dy[1, 0] = 1
df_dy[1, 1] = 0
return df_dy
def exp_bc(ya, yb):
return np.hstack((ya[0] - 1, yb[0]))
def exp_bc_complex(ya, yb):
return np.hstack((ya[0] - 1 - 1j, yb[0]))
def exp_bc_jac(ya, yb):
dbc_dya = np.array([
[1, 0],
[0, 0]
])
dbc_dyb = np.array([
[0, 0],
[1, 0]
])
return dbc_dya, dbc_dyb
def exp_sol(x):
return (np.exp(-x) - np.exp(x - 2)) / (1 - np.exp(-2))
def sl_fun(x, y, p):
return np.vstack((y[1], -p[0]**2 * y[0]))
def sl_fun_jac(x, y, p):
n, m = y.shape
df_dy = np.empty((n, 2, m))
df_dy[0, 0] = 0
df_dy[0, 1] = 1
df_dy[1, 0] = -p[0]**2
df_dy[1, 1] = 0
df_dp = np.empty((n, 1, m))
df_dp[0, 0] = 0
df_dp[1, 0] = -2 * p[0] * y[0]
return df_dy, df_dp
def sl_bc(ya, yb, p):
return np.hstack((ya[0], yb[0], ya[1] - p[0]))
def sl_bc_jac(ya, yb, p):
dbc_dya = np.zeros((3, 2))
dbc_dya[0, 0] = 1
dbc_dya[2, 1] = 1
dbc_dyb = np.zeros((3, 2))
dbc_dyb[1, 0] = 1
dbc_dp = np.zeros((3, 1))
dbc_dp[2, 0] = -1
return dbc_dya, dbc_dyb, dbc_dp
def sl_sol(x, p):
return np.sin(p[0] * x)
def emden_fun(x, y):
return np.vstack((y[1], -y[0]**5))
def emden_fun_jac(x, y):
df_dy = np.empty((2, 2, x.shape[0]))
df_dy[0, 0] = 0
df_dy[0, 1] = 1
df_dy[1, 0] = -5 * y[0]**4
df_dy[1, 1] = 0
return df_dy
def emden_bc(ya, yb):
return np.array([ya[1], yb[0] - (3/4)**0.5])
def emden_bc_jac(ya, yb):
dbc_dya = np.array([
[0, 1],
[0, 0]
])
dbc_dyb = np.array([
[0, 0],
[1, 0]
])
return dbc_dya, dbc_dyb
def emden_sol(x):
return (1 + x**2/3)**-0.5
def undefined_fun(x, y):
return np.zeros_like(y)
def undefined_bc(ya, yb):
return np.array([ya[0], yb[0] - 1])
def big_fun(x, y):
f = np.zeros_like(y)
f[::2] = y[1::2]
return f
def big_bc(ya, yb):
return np.hstack((ya[::2], yb[::2] - 1))
def big_sol(x, n):
y = np.ones((2 * n, x.size))
y[::2] = x
return x
def big_fun_with_parameters(x, y, p):
""" Big version of sl_fun, with two parameters.
The two differential equations represented by sl_fun are broadcast to the
number of rows of y, rotating between the parameters p[0] and p[1].
Here are the differential equations:
dy[0]/dt = y[1]
dy[1]/dt = -p[0]**2 * y[0]
dy[2]/dt = y[3]
dy[3]/dt = -p[1]**2 * y[2]
dy[4]/dt = y[5]
dy[5]/dt = -p[0]**2 * y[4]
dy[6]/dt = y[7]
dy[7]/dt = -p[1]**2 * y[6]
.
.
.
"""
f = np.zeros_like(y)
f[::2] = y[1::2]
f[1::4] = -p[0]**2 * y[::4]
f[3::4] = -p[1]**2 * y[2::4]
return f
def big_fun_with_parameters_jac(x, y, p):
# big version of sl_fun_jac, with two parameters
n, m = y.shape
df_dy = np.zeros((n, n, m))
df_dy[range(0, n, 2), range(1, n, 2)] = 1
df_dy[range(1, n, 4), range(0, n, 4)] = -p[0]**2
df_dy[range(3, n, 4), range(2, n, 4)] = -p[1]**2
df_dp = np.zeros((n, 2, m))
df_dp[range(1, n, 4), 0] = -2 * p[0] * y[range(0, n, 4)]
df_dp[range(3, n, 4), 1] = -2 * p[1] * y[range(2, n, 4)]
return df_dy, df_dp
def big_bc_with_parameters(ya, yb, p):
# big version of sl_bc, with two parameters
return np.hstack((ya[::2], yb[::2], ya[1] - p[0], ya[3] - p[1]))
def big_bc_with_parameters_jac(ya, yb, p):
# big version of sl_bc_jac, with two parameters
n = ya.shape[0]
dbc_dya = np.zeros((n + 2, n))
dbc_dyb = np.zeros((n + 2, n))
dbc_dya[range(n // 2), range(0, n, 2)] = 1
dbc_dyb[range(n // 2, n), range(0, n, 2)] = 1
dbc_dp = np.zeros((n + 2, 2))
dbc_dp[n, 0] = -1
dbc_dya[n, 1] = 1
dbc_dp[n + 1, 1] = -1
dbc_dya[n + 1, 3] = 1
return dbc_dya, dbc_dyb, dbc_dp
def big_sol_with_parameters(x, p):
# big version of sl_sol, with two parameters
return np.vstack((np.sin(p[0] * x), np.sin(p[1] * x)))
def shock_fun(x, y):
eps = 1e-3
return np.vstack((
y[1],
-(x * y[1] + eps * np.pi**2 * np.cos(np.pi * x) +
np.pi * x * np.sin(np.pi * x)) / eps
))
def shock_bc(ya, yb):
return np.array([ya[0] + 2, yb[0]])
def shock_sol(x):
eps = 1e-3
k = np.sqrt(2 * eps)
return np.cos(np.pi * x) + erf(x / k) / erf(1 / k)
def nonlin_bc_fun(x, y):
# laplace eq.
return np.stack([y[1], np.zeros_like(x)])
def nonlin_bc_bc(ya, yb):
phiA, phipA = ya
phiC, phipC = yb
kappa, ioA, ioC, V, f = 1.64, 0.01, 1.0e-4, 0.5, 38.9
# Butler-Volmer Kinetics at Anode
hA = 0.0-phiA-0.0
iA = ioA * (np.exp(f*hA) - np.exp(-f*hA))
res0 = iA + kappa * phipA
# Butler-Volmer Kinetics at Cathode
hC = V - phiC - 1.0
iC = ioC * (np.exp(f*hC) - np.exp(-f*hC))
res1 = iC - kappa*phipC
return np.array([res0, res1])
def nonlin_bc_sol(x):
return -0.13426436116763119 - 1.1308709 * x
def test_modify_mesh():
x = np.array([0, 1, 3, 9], dtype=float)
x_new = modify_mesh(x, np.array([0]), np.array([2]))
assert_array_equal(x_new, np.array([0, 0.5, 1, 3, 5, 7, 9]))
x = np.array([-6, -3, 0, 3, 6], dtype=float)
x_new = modify_mesh(x, np.array([1], dtype=int), np.array([0, 2, 3]))
assert_array_equal(x_new, [-6, -5, -4, -3, -1.5, 0, 1, 2, 3, 4, 5, 6])
def test_compute_fun_jac():
x = np.linspace(0, 1, 5)
y = np.empty((2, x.shape[0]))
y[0] = 0.01
y[1] = 0.02
p = np.array([])
df_dy, df_dp = estimate_fun_jac(lambda x, y, p: exp_fun(x, y), x, y, p)
df_dy_an = exp_fun_jac(x, y)
assert_allclose(df_dy, df_dy_an)
assert_(df_dp is None)
x = np.linspace(0, np.pi, 5)
y = np.empty((2, x.shape[0]))
y[0] = np.sin(x)
y[1] = np.cos(x)
p = np.array([1.0])
df_dy, df_dp = estimate_fun_jac(sl_fun, x, y, p)
df_dy_an, df_dp_an = sl_fun_jac(x, y, p)
assert_allclose(df_dy, df_dy_an)
assert_allclose(df_dp, df_dp_an)
x = np.linspace(0, 1, 10)
y = np.empty((2, x.shape[0]))
y[0] = (3/4)**0.5
y[1] = 1e-4
p = np.array([])
df_dy, df_dp = estimate_fun_jac(lambda x, y, p: emden_fun(x, y), x, y, p)
df_dy_an = emden_fun_jac(x, y)
assert_allclose(df_dy, df_dy_an)
assert_(df_dp is None)
def test_compute_bc_jac():
ya = np.array([-1.0, 2])
yb = np.array([0.5, 3])
p = np.array([])
dbc_dya, dbc_dyb, dbc_dp = estimate_bc_jac(
lambda ya, yb, p: exp_bc(ya, yb), ya, yb, p)
dbc_dya_an, dbc_dyb_an = exp_bc_jac(ya, yb)
assert_allclose(dbc_dya, dbc_dya_an)
assert_allclose(dbc_dyb, dbc_dyb_an)
assert_(dbc_dp is None)
ya = np.array([0.0, 1])
yb = np.array([0.0, -1])
p = np.array([0.5])
dbc_dya, dbc_dyb, dbc_dp = estimate_bc_jac(sl_bc, ya, yb, p)
dbc_dya_an, dbc_dyb_an, dbc_dp_an = sl_bc_jac(ya, yb, p)
assert_allclose(dbc_dya, dbc_dya_an)
assert_allclose(dbc_dyb, dbc_dyb_an)
assert_allclose(dbc_dp, dbc_dp_an)
ya = np.array([0.5, 100])
yb = np.array([-1000, 10.5])
p = np.array([])
dbc_dya, dbc_dyb, dbc_dp = estimate_bc_jac(
lambda ya, yb, p: emden_bc(ya, yb), ya, yb, p)
dbc_dya_an, dbc_dyb_an = emden_bc_jac(ya, yb)
assert_allclose(dbc_dya, dbc_dya_an)
assert_allclose(dbc_dyb, dbc_dyb_an)
assert_(dbc_dp is None)
def test_compute_jac_indices():
n = 2
m = 4
k = 2
i, j = compute_jac_indices(n, m, k)
s = coo_matrix((np.ones_like(i), (i, j))).toarray()
s_true = np.array([
[1, 1, 1, 1, 0, 0, 0, 0, 1, 1],
[1, 1, 1, 1, 0, 0, 0, 0, 1, 1],
[0, 0, 1, 1, 1, 1, 0, 0, 1, 1],
[0, 0, 1, 1, 1, 1, 0, 0, 1, 1],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[1, 1, 0, 0, 0, 0, 1, 1, 1, 1],
[1, 1, 0, 0, 0, 0, 1, 1, 1, 1],
[1, 1, 0, 0, 0, 0, 1, 1, 1, 1],
[1, 1, 0, 0, 0, 0, 1, 1, 1, 1],
])
assert_array_equal(s, s_true)
def test_compute_global_jac():
n = 2
m = 5
k = 1
i_jac, j_jac = compute_jac_indices(2, 5, 1)
x = np.linspace(0, 1, 5)
h = np.diff(x)
y = np.vstack((np.sin(np.pi * x), np.pi * np.cos(np.pi * x)))
p = np.array([3.0])
f = sl_fun(x, y, p)
x_middle = x[:-1] + 0.5 * h
y_middle = 0.5 * (y[:, :-1] + y[:, 1:]) - h/8 * (f[:, 1:] - f[:, :-1])
df_dy, df_dp = sl_fun_jac(x, y, p)
df_dy_middle, df_dp_middle = sl_fun_jac(x_middle, y_middle, p)
dbc_dya, dbc_dyb, dbc_dp = sl_bc_jac(y[:, 0], y[:, -1], p)
J = construct_global_jac(n, m, k, i_jac, j_jac, h, df_dy, df_dy_middle,
df_dp, df_dp_middle, dbc_dya, dbc_dyb, dbc_dp)
J = J.toarray()
def J_block(h, p):
return np.array([
[h**2*p**2/12 - 1, -0.5*h, -h**2*p**2/12 + 1, -0.5*h],
[0.5*h*p**2, h**2*p**2/12 - 1, 0.5*h*p**2, 1 - h**2*p**2/12]
])
J_true = np.zeros((m * n + k, m * n + k))
for i in range(m - 1):
J_true[i * n: (i + 1) * n, i * n: (i + 2) * n] = J_block(h[i], p[0])
J_true[:(m - 1) * n:2, -1] = p * h**2/6 * (y[0, :-1] - y[0, 1:])
J_true[1:(m - 1) * n:2, -1] = p * (h * (y[0, :-1] + y[0, 1:]) +
h**2/6 * (y[1, :-1] - y[1, 1:]))
J_true[8, 0] = 1
J_true[9, 8] = 1
J_true[10, 1] = 1
J_true[10, 10] = -1
assert_allclose(J, J_true, rtol=1e-10)
df_dy, df_dp = estimate_fun_jac(sl_fun, x, y, p)
df_dy_middle, df_dp_middle = estimate_fun_jac(sl_fun, x_middle, y_middle, p)
dbc_dya, dbc_dyb, dbc_dp = estimate_bc_jac(sl_bc, y[:, 0], y[:, -1], p)
J = construct_global_jac(n, m, k, i_jac, j_jac, h, df_dy, df_dy_middle,
df_dp, df_dp_middle, dbc_dya, dbc_dyb, dbc_dp)
J = J.toarray()
assert_allclose(J, J_true, rtol=2e-8, atol=2e-8)
def test_parameter_validation():
x = [0, 1, 0.5]
y = np.zeros((2, 3))
assert_raises(ValueError, solve_bvp, exp_fun, exp_bc, x, y)
x = np.linspace(0, 1, 5)
y = np.zeros((2, 4))
assert_raises(ValueError, solve_bvp, exp_fun, exp_bc, x, y)
def fun(x, y, p):
return exp_fun(x, y)
def bc(ya, yb, p):
return exp_bc(ya, yb)
y = np.zeros((2, x.shape[0]))
assert_raises(ValueError, solve_bvp, fun, bc, x, y, p=[1])
def wrong_shape_fun(x, y):
return np.zeros(3)
assert_raises(ValueError, solve_bvp, wrong_shape_fun, bc, x, y)
S = np.array([[0, 0]])
assert_raises(ValueError, solve_bvp, exp_fun, exp_bc, x, y, S=S)
def test_no_params():
x = np.linspace(0, 1, 5)
x_test = np.linspace(0, 1, 100)
y = np.zeros((2, x.shape[0]))
for fun_jac in [None, exp_fun_jac]:
for bc_jac in [None, exp_bc_jac]:
sol = solve_bvp(exp_fun, exp_bc, x, y, fun_jac=fun_jac,
bc_jac=bc_jac)
assert_equal(sol.status, 0)
assert_(sol.success)
assert_equal(sol.x.size, 5)
sol_test = sol.sol(x_test)
assert_allclose(sol_test[0], exp_sol(x_test), atol=1e-5)
f_test = exp_fun(x_test, sol_test)
r = sol.sol(x_test, 1) - f_test
rel_res = r / (1 + np.abs(f_test))
norm_res = np.sum(rel_res**2, axis=0)**0.5
assert_(np.all(norm_res < 1e-3))
assert_(np.all(sol.rms_residuals < 1e-3))
assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10)
assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10)
def test_with_params():
x = np.linspace(0, np.pi, 5)
x_test = np.linspace(0, np.pi, 100)
y = np.ones((2, x.shape[0]))
for fun_jac in [None, sl_fun_jac]:
for bc_jac in [None, sl_bc_jac]:
sol = solve_bvp(sl_fun, sl_bc, x, y, p=[0.5], fun_jac=fun_jac,
bc_jac=bc_jac)
assert_equal(sol.status, 0)
assert_(sol.success)
assert_(sol.x.size < 10)
assert_allclose(sol.p, [1], rtol=1e-4)
sol_test = sol.sol(x_test)
assert_allclose(sol_test[0], sl_sol(x_test, [1]),
rtol=1e-4, atol=1e-4)
f_test = sl_fun(x_test, sol_test, [1])
r = sol.sol(x_test, 1) - f_test
rel_res = r / (1 + np.abs(f_test))
norm_res = np.sum(rel_res ** 2, axis=0) ** 0.5
assert_(np.all(norm_res < 1e-3))
assert_(np.all(sol.rms_residuals < 1e-3))
assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10)
assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10)
def test_singular_term():
x = np.linspace(0, 1, 10)
x_test = np.linspace(0.05, 1, 100)
y = np.empty((2, 10))
y[0] = (3/4)**0.5
y[1] = 1e-4
S = np.array([[0, 0], [0, -2]])
for fun_jac in [None, emden_fun_jac]:
for bc_jac in [None, emden_bc_jac]:
sol = solve_bvp(emden_fun, emden_bc, x, y, S=S, fun_jac=fun_jac,
bc_jac=bc_jac)
assert_equal(sol.status, 0)
assert_(sol.success)
assert_equal(sol.x.size, 10)
sol_test = sol.sol(x_test)
assert_allclose(sol_test[0], emden_sol(x_test), atol=1e-5)
f_test = emden_fun(x_test, sol_test) + S.dot(sol_test) / x_test
r = sol.sol(x_test, 1) - f_test
rel_res = r / (1 + np.abs(f_test))
norm_res = np.sum(rel_res ** 2, axis=0) ** 0.5
assert_(np.all(norm_res < 1e-3))
assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10)
assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10)
def test_complex():
# The test is essentially the same as test_no_params, but boundary
# conditions are turned into complex.
x = np.linspace(0, 1, 5)
x_test = np.linspace(0, 1, 100)
y = np.zeros((2, x.shape[0]), dtype=complex)
for fun_jac in [None, exp_fun_jac]:
for bc_jac in [None, exp_bc_jac]:
sol = solve_bvp(exp_fun, exp_bc_complex, x, y, fun_jac=fun_jac,
bc_jac=bc_jac)
assert_equal(sol.status, 0)
assert_(sol.success)
sol_test = sol.sol(x_test)
assert_allclose(sol_test[0].real, exp_sol(x_test), atol=1e-5)
assert_allclose(sol_test[0].imag, exp_sol(x_test), atol=1e-5)
f_test = exp_fun(x_test, sol_test)
r = sol.sol(x_test, 1) - f_test
rel_res = r / (1 + np.abs(f_test))
norm_res = np.sum(np.real(rel_res * np.conj(rel_res)),
axis=0) ** 0.5
assert_(np.all(norm_res < 1e-3))
assert_(np.all(sol.rms_residuals < 1e-3))
assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10)
assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10)
def test_failures():
x = np.linspace(0, 1, 2)
y = np.zeros((2, x.size))
res = solve_bvp(exp_fun, exp_bc, x, y, tol=1e-5, max_nodes=5)
assert_equal(res.status, 1)
assert_(not res.success)
x = np.linspace(0, 1, 5)
y = np.zeros((2, x.size))
res = solve_bvp(undefined_fun, undefined_bc, x, y)
assert_equal(res.status, 2)
assert_(not res.success)
def test_big_problem():
n = 30
x = np.linspace(0, 1, 5)
y = np.zeros((2 * n, x.size))
sol = solve_bvp(big_fun, big_bc, x, y)
assert_equal(sol.status, 0)
assert_(sol.success)
sol_test = sol.sol(x)
assert_allclose(sol_test[0], big_sol(x, n))
f_test = big_fun(x, sol_test)
r = sol.sol(x, 1) - f_test
rel_res = r / (1 + np.abs(f_test))
norm_res = np.sum(np.real(rel_res * np.conj(rel_res)), axis=0) ** 0.5
assert_(np.all(norm_res < 1e-3))
assert_(np.all(sol.rms_residuals < 1e-3))
assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10)
assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10)
def test_big_problem_with_parameters():
n = 30
x = np.linspace(0, np.pi, 5)
x_test = np.linspace(0, np.pi, 100)
y = np.ones((2 * n, x.size))
for fun_jac in [None, big_fun_with_parameters_jac]:
for bc_jac in [None, big_bc_with_parameters_jac]:
sol = solve_bvp(big_fun_with_parameters, big_bc_with_parameters, x,
y, p=[0.5, 0.5], fun_jac=fun_jac, bc_jac=bc_jac)
assert_equal(sol.status, 0)
assert_(sol.success)
assert_allclose(sol.p, [1, 1], rtol=1e-4)
sol_test = sol.sol(x_test)
for isol in range(0, n, 4):
assert_allclose(sol_test[isol],
big_sol_with_parameters(x_test, [1, 1])[0],
rtol=1e-4, atol=1e-4)
assert_allclose(sol_test[isol + 2],
big_sol_with_parameters(x_test, [1, 1])[1],
rtol=1e-4, atol=1e-4)
f_test = big_fun_with_parameters(x_test, sol_test, [1, 1])
r = sol.sol(x_test, 1) - f_test
rel_res = r / (1 + np.abs(f_test))
norm_res = np.sum(rel_res ** 2, axis=0) ** 0.5
assert_(np.all(norm_res < 1e-3))
assert_(np.all(sol.rms_residuals < 1e-3))
assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10)
assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10)
def test_shock_layer():
x = np.linspace(-1, 1, 5)
x_test = np.linspace(-1, 1, 100)
y = np.zeros((2, x.size))
sol = solve_bvp(shock_fun, shock_bc, x, y)
assert_equal(sol.status, 0)
assert_(sol.success)
assert_(sol.x.size < 110)
sol_test = sol.sol(x_test)
assert_allclose(sol_test[0], shock_sol(x_test), rtol=1e-5, atol=1e-5)
f_test = shock_fun(x_test, sol_test)
r = sol.sol(x_test, 1) - f_test
rel_res = r / (1 + np.abs(f_test))
norm_res = np.sum(rel_res ** 2, axis=0) ** 0.5
assert_(np.all(norm_res < 1e-3))
assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10)
assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10)
def test_nonlin_bc():
x = np.linspace(0, 0.1, 5)
x_test = x
y = np.zeros([2, x.size])
sol = solve_bvp(nonlin_bc_fun, nonlin_bc_bc, x, y)
assert_equal(sol.status, 0)
assert_(sol.success)
assert_(sol.x.size < 8)
sol_test = sol.sol(x_test)
assert_allclose(sol_test[0], nonlin_bc_sol(x_test), rtol=1e-5, atol=1e-5)
f_test = nonlin_bc_fun(x_test, sol_test)
r = sol.sol(x_test, 1) - f_test
rel_res = r / (1 + np.abs(f_test))
norm_res = np.sum(rel_res ** 2, axis=0) ** 0.5
assert_(np.all(norm_res < 1e-3))
assert_allclose(sol.sol(sol.x), sol.y, rtol=1e-10, atol=1e-10)
assert_allclose(sol.sol(sol.x, 1), sol.yp, rtol=1e-10, atol=1e-10)
def test_verbose():
# Smoke test that checks the printing does something and does not crash
x = np.linspace(0, 1, 5)
y = np.zeros((2, x.shape[0]))
for verbose in [0, 1, 2]:
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
sol = solve_bvp(exp_fun, exp_bc, x, y, verbose=verbose)
text = sys.stdout.getvalue()
finally:
sys.stdout = old_stdout
assert_(sol.success)
if verbose == 0:
assert_(not text, text)
if verbose >= 1:
assert_("Solved in" in text, text)
if verbose >= 2:
assert_("Max residual" in text, text)
| 20,181
| 27.345506
| 80
|
py
|
scipy
|
scipy-main/scipy/integrate/tests/test__quad_vec.py
|
import pytest
import numpy as np
from numpy.testing import assert_allclose
from scipy.integrate import quad_vec
from multiprocessing.dummy import Pool
quadrature_params = pytest.mark.parametrize(
'quadrature', [None, "gk15", "gk21", "trapezoid"])
@quadrature_params
def test_quad_vec_simple(quadrature):
n = np.arange(10)
def f(x):
return x ** n
for epsabs in [0.1, 1e-3, 1e-6]:
if quadrature == 'trapezoid' and epsabs < 1e-4:
# slow: skip
continue
kwargs = dict(epsabs=epsabs, quadrature=quadrature)
exact = 2**(n+1)/(n + 1)
res, err = quad_vec(f, 0, 2, norm='max', **kwargs)
assert_allclose(res, exact, rtol=0, atol=epsabs)
res, err = quad_vec(f, 0, 2, norm='2', **kwargs)
assert np.linalg.norm(res - exact) < epsabs
res, err = quad_vec(f, 0, 2, norm='max', points=(0.5, 1.0), **kwargs)
assert_allclose(res, exact, rtol=0, atol=epsabs)
res, err, *rest = quad_vec(f, 0, 2, norm='max',
epsrel=1e-8,
full_output=True,
limit=10000,
**kwargs)
assert_allclose(res, exact, rtol=0, atol=epsabs)
@quadrature_params
def test_quad_vec_simple_inf(quadrature):
def f(x):
return 1 / (1 + np.float64(x) ** 2)
for epsabs in [0.1, 1e-3, 1e-6]:
if quadrature == 'trapezoid' and epsabs < 1e-4:
# slow: skip
continue
kwargs = dict(norm='max', epsabs=epsabs, quadrature=quadrature)
res, err = quad_vec(f, 0, np.inf, **kwargs)
assert_allclose(res, np.pi/2, rtol=0, atol=max(epsabs, err))
res, err = quad_vec(f, 0, -np.inf, **kwargs)
assert_allclose(res, -np.pi/2, rtol=0, atol=max(epsabs, err))
res, err = quad_vec(f, -np.inf, 0, **kwargs)
assert_allclose(res, np.pi/2, rtol=0, atol=max(epsabs, err))
res, err = quad_vec(f, np.inf, 0, **kwargs)
assert_allclose(res, -np.pi/2, rtol=0, atol=max(epsabs, err))
res, err = quad_vec(f, -np.inf, np.inf, **kwargs)
assert_allclose(res, np.pi, rtol=0, atol=max(epsabs, err))
res, err = quad_vec(f, np.inf, -np.inf, **kwargs)
assert_allclose(res, -np.pi, rtol=0, atol=max(epsabs, err))
res, err = quad_vec(f, np.inf, np.inf, **kwargs)
assert_allclose(res, 0, rtol=0, atol=max(epsabs, err))
res, err = quad_vec(f, -np.inf, -np.inf, **kwargs)
assert_allclose(res, 0, rtol=0, atol=max(epsabs, err))
res, err = quad_vec(f, 0, np.inf, points=(1.0, 2.0), **kwargs)
assert_allclose(res, np.pi/2, rtol=0, atol=max(epsabs, err))
def f(x):
return np.sin(x + 2) / (1 + x ** 2)
exact = np.pi / np.e * np.sin(2)
epsabs = 1e-5
res, err, info = quad_vec(f, -np.inf, np.inf, limit=1000, norm='max', epsabs=epsabs,
quadrature=quadrature, full_output=True)
assert info.status == 1
assert_allclose(res, exact, rtol=0, atol=max(epsabs, 1.5 * err))
def test_quad_vec_args():
def f(x, a):
return x * (x + a) * np.arange(3)
a = 2
exact = np.array([0, 4/3, 8/3])
res, err = quad_vec(f, 0, 1, args=(a,))
assert_allclose(res, exact, rtol=0, atol=1e-4)
def _lorenzian(x):
return 1 / (1 + x**2)
def test_quad_vec_pool():
f = _lorenzian
res, err = quad_vec(f, -np.inf, np.inf, norm='max', epsabs=1e-4, workers=4)
assert_allclose(res, np.pi, rtol=0, atol=1e-4)
with Pool(10) as pool:
def f(x):
return 1 / (1 + x ** 2)
res, err = quad_vec(f, -np.inf, np.inf, norm='max', epsabs=1e-4, workers=pool.map)
assert_allclose(res, np.pi, rtol=0, atol=1e-4)
def _func_with_args(x, a):
return x * (x + a) * np.arange(3)
@pytest.mark.parametrize('extra_args', [2, (2,)])
@pytest.mark.parametrize('workers', [1, 10])
def test_quad_vec_pool_args(extra_args, workers):
f = _func_with_args
exact = np.array([0, 4/3, 8/3])
res, err = quad_vec(f, 0, 1, args=extra_args, workers=workers)
assert_allclose(res, exact, rtol=0, atol=1e-4)
with Pool(workers) as pool:
res, err = quad_vec(f, 0, 1, args=extra_args, workers=pool.map)
assert_allclose(res, exact, rtol=0, atol=1e-4)
@quadrature_params
def test_num_eval(quadrature):
def f(x):
count[0] += 1
return x**5
count = [0]
res = quad_vec(f, 0, 1, norm='max', full_output=True, quadrature=quadrature)
assert res[2].neval == count[0]
def test_info():
def f(x):
return np.ones((3, 2, 1))
res, err, info = quad_vec(f, 0, 1, norm='max', full_output=True)
assert info.success is True
assert info.status == 0
assert info.message == 'Target precision reached.'
assert info.neval > 0
assert info.intervals.shape[1] == 2
assert info.integrals.shape == (info.intervals.shape[0], 3, 2, 1)
assert info.errors.shape == (info.intervals.shape[0],)
def test_nan_inf():
def f_nan(x):
return np.nan
def f_inf(x):
return np.inf if x < 0.1 else 1/x
res, err, info = quad_vec(f_nan, 0, 1, full_output=True)
assert info.status == 3
res, err, info = quad_vec(f_inf, 0, 1, full_output=True)
assert info.status == 3
@pytest.mark.parametrize('a,b', [(0, 1), (0, np.inf), (np.inf, 0),
(-np.inf, np.inf), (np.inf, -np.inf)])
def test_points(a, b):
# Check that initial interval splitting is done according to
# `points`, by checking that consecutive sets of 15 point (for
# gk15) function evaluations lie between `points`
points = (0, 0.25, 0.5, 0.75, 1.0)
points += tuple(-x for x in points)
quadrature_points = 15
interval_sets = []
count = 0
def f(x):
nonlocal count
if count % quadrature_points == 0:
interval_sets.append(set())
count += 1
interval_sets[-1].add(float(x))
return 0.0
quad_vec(f, a, b, points=points, quadrature='gk15', limit=0)
# Check that all point sets lie in a single `points` interval
for p in interval_sets:
j = np.searchsorted(sorted(points), tuple(p))
assert np.all(j == j[0])
| 6,286
| 28.938095
| 90
|
py
|
scipy
|
scipy-main/scipy/integrate/tests/test_quadpack.py
|
import sys
import math
import numpy as np
from numpy import sqrt, cos, sin, arctan, exp, log, pi
from numpy.testing import (assert_,
assert_allclose, assert_array_less, assert_almost_equal)
import pytest
from scipy.integrate import quad, dblquad, tplquad, nquad
from scipy.special import erf, erfc
from scipy._lib._ccallback import LowLevelCallable
import ctypes
import ctypes.util
from scipy._lib._ccallback_c import sine_ctypes
import scipy.integrate._test_multivariate as clib_test
def assert_quad(value_and_err, tabled_value, error_tolerance=1.5e-8):
value, err = value_and_err
assert_allclose(value, tabled_value, atol=err, rtol=0)
if error_tolerance is not None:
assert_array_less(err, error_tolerance)
def get_clib_test_routine(name, restype, *argtypes):
ptr = getattr(clib_test, name)
return ctypes.cast(ptr, ctypes.CFUNCTYPE(restype, *argtypes))
class TestCtypesQuad:
def setup_method(self):
if sys.platform == 'win32':
files = ['api-ms-win-crt-math-l1-1-0.dll']
elif sys.platform == 'darwin':
files = ['libm.dylib']
else:
files = ['libm.so', 'libm.so.6']
for file in files:
try:
self.lib = ctypes.CDLL(file)
break
except OSError:
pass
else:
# This test doesn't work on some Linux platforms (Fedora for
# example) that put an ld script in libm.so - see gh-5370
pytest.skip("Ctypes can't import libm.so")
restype = ctypes.c_double
argtypes = (ctypes.c_double,)
for name in ['sin', 'cos', 'tan']:
func = getattr(self.lib, name)
func.restype = restype
func.argtypes = argtypes
def test_typical(self):
assert_quad(quad(self.lib.sin, 0, 5), quad(math.sin, 0, 5)[0])
assert_quad(quad(self.lib.cos, 0, 5), quad(math.cos, 0, 5)[0])
assert_quad(quad(self.lib.tan, 0, 1), quad(math.tan, 0, 1)[0])
def test_ctypes_sine(self):
quad(LowLevelCallable(sine_ctypes), 0, 1)
def test_ctypes_variants(self):
sin_0 = get_clib_test_routine('_sin_0', ctypes.c_double,
ctypes.c_double, ctypes.c_void_p)
sin_1 = get_clib_test_routine('_sin_1', ctypes.c_double,
ctypes.c_int, ctypes.POINTER(ctypes.c_double),
ctypes.c_void_p)
sin_2 = get_clib_test_routine('_sin_2', ctypes.c_double,
ctypes.c_double)
sin_3 = get_clib_test_routine('_sin_3', ctypes.c_double,
ctypes.c_int, ctypes.POINTER(ctypes.c_double))
sin_4 = get_clib_test_routine('_sin_3', ctypes.c_double,
ctypes.c_int, ctypes.c_double)
all_sigs = [sin_0, sin_1, sin_2, sin_3, sin_4]
legacy_sigs = [sin_2, sin_4]
legacy_only_sigs = [sin_4]
# LowLevelCallables work for new signatures
for j, func in enumerate(all_sigs):
callback = LowLevelCallable(func)
if func in legacy_only_sigs:
pytest.raises(ValueError, quad, callback, 0, pi)
else:
assert_allclose(quad(callback, 0, pi)[0], 2.0)
# Plain ctypes items work only for legacy signatures
for j, func in enumerate(legacy_sigs):
if func in legacy_sigs:
assert_allclose(quad(func, 0, pi)[0], 2.0)
else:
pytest.raises(ValueError, quad, func, 0, pi)
class TestMultivariateCtypesQuad:
def setup_method(self):
restype = ctypes.c_double
argtypes = (ctypes.c_int, ctypes.c_double)
for name in ['_multivariate_typical', '_multivariate_indefinite',
'_multivariate_sin']:
func = get_clib_test_routine(name, restype, *argtypes)
setattr(self, name, func)
def test_typical(self):
# 1) Typical function with two extra arguments:
assert_quad(quad(self._multivariate_typical, 0, pi, (2, 1.8)),
0.30614353532540296487)
def test_indefinite(self):
# 2) Infinite integration limits --- Euler's constant
assert_quad(quad(self._multivariate_indefinite, 0, np.inf),
0.577215664901532860606512)
def test_threadsafety(self):
# Ensure multivariate ctypes are threadsafe
def threadsafety(y):
return y + quad(self._multivariate_sin, 0, 1)[0]
assert_quad(quad(threadsafety, 0, 1), 0.9596976941318602)
class TestQuad:
def test_typical(self):
# 1) Typical function with two extra arguments:
def myfunc(x, n, z): # Bessel function integrand
return cos(n*x-z*sin(x))/pi
assert_quad(quad(myfunc, 0, pi, (2, 1.8)), 0.30614353532540296487)
def test_indefinite(self):
# 2) Infinite integration limits --- Euler's constant
def myfunc(x): # Euler's constant integrand
return -exp(-x)*log(x)
assert_quad(quad(myfunc, 0, np.inf), 0.577215664901532860606512)
def test_singular(self):
# 3) Singular points in region of integration.
def myfunc(x):
if 0 < x < 2.5:
return sin(x)
elif 2.5 <= x <= 5.0:
return exp(-x)
else:
return 0.0
assert_quad(quad(myfunc, 0, 10, points=[2.5, 5.0]),
1 - cos(2.5) + exp(-2.5) - exp(-5.0))
def test_sine_weighted_finite(self):
# 4) Sine weighted integral (finite limits)
def myfunc(x, a):
return exp(a*(x-1))
ome = 2.0**3.4
assert_quad(quad(myfunc, 0, 1, args=20, weight='sin', wvar=ome),
(20*sin(ome)-ome*cos(ome)+ome*exp(-20))/(20**2 + ome**2))
def test_sine_weighted_infinite(self):
# 5) Sine weighted integral (infinite limits)
def myfunc(x, a):
return exp(-x*a)
a = 4.0
ome = 3.0
assert_quad(quad(myfunc, 0, np.inf, args=a, weight='sin', wvar=ome),
ome/(a**2 + ome**2))
def test_cosine_weighted_infinite(self):
# 6) Cosine weighted integral (negative infinite limits)
def myfunc(x, a):
return exp(x*a)
a = 2.5
ome = 2.3
assert_quad(quad(myfunc, -np.inf, 0, args=a, weight='cos', wvar=ome),
a/(a**2 + ome**2))
def test_algebraic_log_weight(self):
# 6) Algebraic-logarithmic weight.
def myfunc(x, a):
return 1/(1+x+2**(-a))
a = 1.5
assert_quad(quad(myfunc, -1, 1, args=a, weight='alg',
wvar=(-0.5, -0.5)),
pi/sqrt((1+2**(-a))**2 - 1))
def test_cauchypv_weight(self):
# 7) Cauchy prinicpal value weighting w(x) = 1/(x-c)
def myfunc(x, a):
return 2.0**(-a)/((x-1)**2+4.0**(-a))
a = 0.4
tabledValue = ((2.0**(-0.4)*log(1.5) -
2.0**(-1.4)*log((4.0**(-a)+16) / (4.0**(-a)+1)) -
arctan(2.0**(a+2)) -
arctan(2.0**a)) /
(4.0**(-a) + 1))
assert_quad(quad(myfunc, 0, 5, args=0.4, weight='cauchy', wvar=2.0),
tabledValue, error_tolerance=1.9e-8)
def test_b_less_than_a(self):
def f(x, p, q):
return p * np.exp(-q*x)
val_1, err_1 = quad(f, 0, np.inf, args=(2, 3))
val_2, err_2 = quad(f, np.inf, 0, args=(2, 3))
assert_allclose(val_1, -val_2, atol=max(err_1, err_2))
def test_b_less_than_a_2(self):
def f(x, s):
return np.exp(-x**2 / 2 / s) / np.sqrt(2.*s)
val_1, err_1 = quad(f, -np.inf, np.inf, args=(2,))
val_2, err_2 = quad(f, np.inf, -np.inf, args=(2,))
assert_allclose(val_1, -val_2, atol=max(err_1, err_2))
def test_b_less_than_a_3(self):
def f(x):
return 1.0
val_1, err_1 = quad(f, 0, 1, weight='alg', wvar=(0, 0))
val_2, err_2 = quad(f, 1, 0, weight='alg', wvar=(0, 0))
assert_allclose(val_1, -val_2, atol=max(err_1, err_2))
def test_b_less_than_a_full_output(self):
def f(x):
return 1.0
res_1 = quad(f, 0, 1, weight='alg', wvar=(0, 0), full_output=True)
res_2 = quad(f, 1, 0, weight='alg', wvar=(0, 0), full_output=True)
err = max(res_1[1], res_2[1])
assert_allclose(res_1[0], -res_2[0], atol=err)
def test_double_integral(self):
# 8) Double Integral test
def simpfunc(y, x): # Note order of arguments.
return x+y
a, b = 1.0, 2.0
assert_quad(dblquad(simpfunc, a, b, lambda x: x, lambda x: 2*x),
5/6.0 * (b**3.0-a**3.0))
def test_double_integral2(self):
def func(x0, x1, t0, t1):
return x0 + x1 + t0 + t1
def g(x):
return x
def h(x):
return 2 * x
args = 1, 2
assert_quad(dblquad(func, 1, 2, g, h, args=args),35./6 + 9*.5)
def test_double_integral3(self):
def func(x0, x1):
return x0 + x1 + 1 + 2
assert_quad(dblquad(func, 1, 2, 1, 2),6.)
@pytest.mark.parametrize(
"x_lower, x_upper, y_lower, y_upper, expected",
[
# Multiple integration of a function in n = 2 variables: f(x, y, z)
# over domain D = [-inf, 0] for all n.
(-np.inf, 0, -np.inf, 0, np.pi / 4),
# Multiple integration of a function in n = 2 variables: f(x, y, z)
# over domain D = [-inf, -1] for each n (one at a time).
(-np.inf, -1, -np.inf, 0, np.pi / 4 * erfc(1)),
(-np.inf, 0, -np.inf, -1, np.pi / 4 * erfc(1)),
# Multiple integration of a function in n = 2 variables: f(x, y, z)
# over domain D = [-inf, -1] for all n.
(-np.inf, -1, -np.inf, -1, np.pi / 4 * (erfc(1) ** 2)),
# Multiple integration of a function in n = 2 variables: f(x, y, z)
# over domain D = [-inf, 1] for each n (one at a time).
(-np.inf, 1, -np.inf, 0, np.pi / 4 * (erf(1) + 1)),
(-np.inf, 0, -np.inf, 1, np.pi / 4 * (erf(1) + 1)),
# Multiple integration of a function in n = 2 variables: f(x, y, z)
# over domain D = [-inf, 1] for all n.
(-np.inf, 1, -np.inf, 1, np.pi / 4 * ((erf(1) + 1) ** 2)),
# Multiple integration of a function in n = 2 variables: f(x, y, z)
# over domain Dx = [-inf, -1] and Dy = [-inf, 1].
(-np.inf, -1, -np.inf, 1, np.pi / 4 * ((erf(1) + 1) * erfc(1))),
# Multiple integration of a function in n = 2 variables: f(x, y, z)
# over domain Dx = [-inf, 1] and Dy = [-inf, -1].
(-np.inf, 1, -np.inf, -1, np.pi / 4 * ((erf(1) + 1) * erfc(1))),
# Multiple integration of a function in n = 2 variables: f(x, y, z)
# over domain D = [0, inf] for all n.
(0, np.inf, 0, np.inf, np.pi / 4),
# Multiple integration of a function in n = 2 variables: f(x, y, z)
# over domain D = [1, inf] for each n (one at a time).
(1, np.inf, 0, np.inf, np.pi / 4 * erfc(1)),
(0, np.inf, 1, np.inf, np.pi / 4 * erfc(1)),
# Multiple integration of a function in n = 2 variables: f(x, y, z)
# over domain D = [1, inf] for all n.
(1, np.inf, 1, np.inf, np.pi / 4 * (erfc(1) ** 2)),
# Multiple integration of a function in n = 2 variables: f(x, y, z)
# over domain D = [-1, inf] for each n (one at a time).
(-1, np.inf, 0, np.inf, np.pi / 4 * (erf(1) + 1)),
(0, np.inf, -1, np.inf, np.pi / 4 * (erf(1) + 1)),
# Multiple integration of a function in n = 2 variables: f(x, y, z)
# over domain D = [-1, inf] for all n.
(-1, np.inf, -1, np.inf, np.pi / 4 * ((erf(1) + 1) ** 2)),
# Multiple integration of a function in n = 2 variables: f(x, y, z)
# over domain Dx = [-1, inf] and Dy = [1, inf].
(-1, np.inf, 1, np.inf, np.pi / 4 * ((erf(1) + 1) * erfc(1))),
# Multiple integration of a function in n = 2 variables: f(x, y, z)
# over domain Dx = [1, inf] and Dy = [-1, inf].
(1, np.inf, -1, np.inf, np.pi / 4 * ((erf(1) + 1) * erfc(1))),
# Multiple integration of a function in n = 2 variables: f(x, y, z)
# over domain D = [-inf, inf] for all n.
(-np.inf, np.inf, -np.inf, np.inf, np.pi)
]
)
def test_double_integral_improper(
self, x_lower, x_upper, y_lower, y_upper, expected
):
# The Gaussian Integral.
def f(x, y):
return np.exp(-x ** 2 - y ** 2)
assert_quad(
dblquad(f, x_lower, x_upper, y_lower, y_upper),
expected,
error_tolerance=3e-8
)
def test_triple_integral(self):
# 9) Triple Integral test
def simpfunc(z, y, x, t): # Note order of arguments.
return (x+y+z)*t
a, b = 1.0, 2.0
assert_quad(tplquad(simpfunc, a, b,
lambda x: x, lambda x: 2*x,
lambda x, y: x - y, lambda x, y: x + y,
(2.,)),
2*8/3.0 * (b**4.0 - a**4.0))
@pytest.mark.parametrize(
"x_lower, x_upper, y_lower, y_upper, z_lower, z_upper, expected",
[
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain D = [-inf, 0] for all n.
(-np.inf, 0, -np.inf, 0, -np.inf, 0, (np.pi ** (3 / 2)) / 8),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain D = [-inf, -1] for each n (one at a time).
(-np.inf, -1, -np.inf, 0, -np.inf, 0,
(np.pi ** (3 / 2)) / 8 * erfc(1)),
(-np.inf, 0, -np.inf, -1, -np.inf, 0,
(np.pi ** (3 / 2)) / 8 * erfc(1)),
(-np.inf, 0, -np.inf, 0, -np.inf, -1,
(np.pi ** (3 / 2)) / 8 * erfc(1)),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain D = [-inf, -1] for each n (two at a time).
(-np.inf, -1, -np.inf, -1, -np.inf, 0,
(np.pi ** (3 / 2)) / 8 * (erfc(1) ** 2)),
(-np.inf, -1, -np.inf, 0, -np.inf, -1,
(np.pi ** (3 / 2)) / 8 * (erfc(1) ** 2)),
(-np.inf, 0, -np.inf, -1, -np.inf, -1,
(np.pi ** (3 / 2)) / 8 * (erfc(1) ** 2)),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain D = [-inf, -1] for all n.
(-np.inf, -1, -np.inf, -1, -np.inf, -1,
(np.pi ** (3 / 2)) / 8 * (erfc(1) ** 3)),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain Dx = [-inf, -1] and Dy = Dz = [-inf, 1].
(-np.inf, -1, -np.inf, 1, -np.inf, 1,
(np.pi ** (3 / 2)) / 8 * (((erf(1) + 1) ** 2) * erfc(1))),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain Dx = Dy = [-inf, -1] and Dz = [-inf, 1].
(-np.inf, -1, -np.inf, -1, -np.inf, 1,
(np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) * (erfc(1) ** 2))),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain Dx = Dz = [-inf, -1] and Dy = [-inf, 1].
(-np.inf, -1, -np.inf, 1, -np.inf, -1,
(np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) * (erfc(1) ** 2))),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain Dx = [-inf, 1] and Dy = Dz = [-inf, -1].
(-np.inf, 1, -np.inf, -1, -np.inf, -1,
(np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) * (erfc(1) ** 2))),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain Dx = Dy = [-inf, 1] and Dz = [-inf, -1].
(-np.inf, 1, -np.inf, 1, -np.inf, -1,
(np.pi ** (3 / 2)) / 8 * (((erf(1) + 1) ** 2) * erfc(1))),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain Dx = Dz = [-inf, 1] and Dy = [-inf, -1].
(-np.inf, 1, -np.inf, -1, -np.inf, 1,
(np.pi ** (3 / 2)) / 8 * (((erf(1) + 1) ** 2) * erfc(1))),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain D = [-inf, 1] for each n (one at a time).
(-np.inf, 1, -np.inf, 0, -np.inf, 0,
(np.pi ** (3 / 2)) / 8 * (erf(1) + 1)),
(-np.inf, 0, -np.inf, 1, -np.inf, 0,
(np.pi ** (3 / 2)) / 8 * (erf(1) + 1)),
(-np.inf, 0, -np.inf, 0, -np.inf, 1,
(np.pi ** (3 / 2)) / 8 * (erf(1) + 1)),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain D = [-inf, 1] for each n (two at a time).
(-np.inf, 1, -np.inf, 1, -np.inf, 0,
(np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 2)),
(-np.inf, 1, -np.inf, 0, -np.inf, 1,
(np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 2)),
(-np.inf, 0, -np.inf, 1, -np.inf, 1,
(np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 2)),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain D = [-inf, 1] for all n.
(-np.inf, 1, -np.inf, 1, -np.inf, 1,
(np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 3)),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain D = [0, inf] for all n.
(0, np.inf, 0, np.inf, 0, np.inf, (np.pi ** (3 / 2)) / 8),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain D = [1, inf] for each n (one at a time).
(1, np.inf, 0, np.inf, 0, np.inf,
(np.pi ** (3 / 2)) / 8 * erfc(1)),
(0, np.inf, 1, np.inf, 0, np.inf,
(np.pi ** (3 / 2)) / 8 * erfc(1)),
(0, np.inf, 0, np.inf, 1, np.inf,
(np.pi ** (3 / 2)) / 8 * erfc(1)),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain D = [1, inf] for each n (two at a time).
(1, np.inf, 1, np.inf, 0, np.inf,
(np.pi ** (3 / 2)) / 8 * (erfc(1) ** 2)),
(1, np.inf, 0, np.inf, 1, np.inf,
(np.pi ** (3 / 2)) / 8 * (erfc(1) ** 2)),
(0, np.inf, 1, np.inf, 1, np.inf,
(np.pi ** (3 / 2)) / 8 * (erfc(1) ** 2)),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain D = [1, inf] for all n.
(1, np.inf, 1, np.inf, 1, np.inf,
(np.pi ** (3 / 2)) / 8 * (erfc(1) ** 3)),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain D = [-1, inf] for each n (one at a time).
(-1, np.inf, 0, np.inf, 0, np.inf,
(np.pi ** (3 / 2)) / 8 * (erf(1) + 1)),
(0, np.inf, -1, np.inf, 0, np.inf,
(np.pi ** (3 / 2)) / 8 * (erf(1) + 1)),
(0, np.inf, 0, np.inf, -1, np.inf,
(np.pi ** (3 / 2)) / 8 * (erf(1) + 1)),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain D = [-1, inf] for each n (two at a time).
(-1, np.inf, -1, np.inf, 0, np.inf,
(np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 2)),
(-1, np.inf, 0, np.inf, -1, np.inf,
(np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 2)),
(0, np.inf, -1, np.inf, -1, np.inf,
(np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 2)),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain D = [-1, inf] for all n.
(-1, np.inf, -1, np.inf, -1, np.inf,
(np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 3)),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain Dx = [1, inf] and Dy = Dz = [-1, inf].
(1, np.inf, -1, np.inf, -1, np.inf,
(np.pi ** (3 / 2)) / 8 * (((erf(1) + 1) ** 2) * erfc(1))),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain Dx = Dy = [1, inf] and Dz = [-1, inf].
(1, np.inf, 1, np.inf, -1, np.inf,
(np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) * (erfc(1) ** 2))),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain Dx = Dz = [1, inf] and Dy = [-1, inf].
(1, np.inf, -1, np.inf, 1, np.inf,
(np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) * (erfc(1) ** 2))),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain Dx = [-1, inf] and Dy = Dz = [1, inf].
(-1, np.inf, 1, np.inf, 1, np.inf,
(np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) * (erfc(1) ** 2))),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain Dx = Dy = [-1, inf] and Dz = [1, inf].
(-1, np.inf, -1, np.inf, 1, np.inf,
(np.pi ** (3 / 2)) / 8 * (((erf(1) + 1) ** 2) * erfc(1))),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain Dx = Dz = [-1, inf] and Dy = [1, inf].
(-1, np.inf, 1, np.inf, -1, np.inf,
(np.pi ** (3 / 2)) / 8 * (((erf(1) + 1) ** 2) * erfc(1))),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain D = [-inf, inf] for all n.
(-np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf,
np.pi ** (3 / 2)),
],
)
def test_triple_integral_improper(
self,
x_lower,
x_upper,
y_lower,
y_upper,
z_lower,
z_upper,
expected
):
# The Gaussian Integral.
def f(x, y, z):
return np.exp(-x ** 2 - y ** 2 - z ** 2)
assert_quad(
tplquad(f, x_lower, x_upper, y_lower, y_upper, z_lower, z_upper),
expected,
error_tolerance=6e-8
)
def test_complex(self):
def tfunc(x):
return np.exp(1j*x)
assert np.allclose(
quad(tfunc, 0, np.pi/2, complex_func=True)[0],
1+1j)
# We consider a divergent case in order to force quadpack
# to return an error message. The output is compared
# against what is returned by explicit integration
# of the parts.
kwargs = {'a': 0, 'b': np.inf, 'full_output': True,
'weight': 'cos', 'wvar': 1}
res_c = quad(tfunc, complex_func=True, **kwargs)
res_r = quad(lambda x: np.real(np.exp(1j*x)),
complex_func=False,
**kwargs)
res_i = quad(lambda x: np.imag(np.exp(1j*x)),
complex_func=False,
**kwargs)
np.testing.assert_equal(res_c[0], res_r[0] + 1j*res_i[0])
np.testing.assert_equal(res_c[1], res_r[1] + 1j*res_i[1])
assert len(res_c[2]['real']) == len(res_r[2:]) == 3
assert res_c[2]['real'][2] == res_r[4]
assert res_c[2]['real'][1] == res_r[3]
assert res_c[2]['real'][0]['lst'] == res_r[2]['lst']
assert len(res_c[2]['imag']) == len(res_i[2:]) == 1
assert res_c[2]['imag'][0]['lst'] == res_i[2]['lst']
class TestNQuad:
def test_fixed_limits(self):
def func1(x0, x1, x2, x3):
val = (x0**2 + x1*x2 - x3**3 + np.sin(x0) +
(1 if (x0 - 0.2*x3 - 0.5 - 0.25*x1 > 0) else 0))
return val
def opts_basic(*args):
return {'points': [0.2*args[2] + 0.5 + 0.25*args[0]]}
res = nquad(func1, [[0, 1], [-1, 1], [.13, .8], [-.15, 1]],
opts=[opts_basic, {}, {}, {}], full_output=True)
assert_quad(res[:-1], 1.5267454070738635)
assert_(res[-1]['neval'] > 0 and res[-1]['neval'] < 4e5)
def test_variable_limits(self):
scale = .1
def func2(x0, x1, x2, x3, t0, t1):
val = (x0*x1*x3**2 + np.sin(x2) + 1 +
(1 if x0 + t1*x1 - t0 > 0 else 0))
return val
def lim0(x1, x2, x3, t0, t1):
return [scale * (x1**2 + x2 + np.cos(x3)*t0*t1 + 1) - 1,
scale * (x1**2 + x2 + np.cos(x3)*t0*t1 + 1) + 1]
def lim1(x2, x3, t0, t1):
return [scale * (t0*x2 + t1*x3) - 1,
scale * (t0*x2 + t1*x3) + 1]
def lim2(x3, t0, t1):
return [scale * (x3 + t0**2*t1**3) - 1,
scale * (x3 + t0**2*t1**3) + 1]
def lim3(t0, t1):
return [scale * (t0 + t1) - 1, scale * (t0 + t1) + 1]
def opts0(x1, x2, x3, t0, t1):
return {'points': [t0 - t1*x1]}
def opts1(x2, x3, t0, t1):
return {}
def opts2(x3, t0, t1):
return {}
def opts3(t0, t1):
return {}
res = nquad(func2, [lim0, lim1, lim2, lim3], args=(0, 0),
opts=[opts0, opts1, opts2, opts3])
assert_quad(res, 25.066666666666663)
def test_square_separate_ranges_and_opts(self):
def f(y, x):
return 1.0
assert_quad(nquad(f, [[-1, 1], [-1, 1]], opts=[{}, {}]), 4.0)
def test_square_aliased_ranges_and_opts(self):
def f(y, x):
return 1.0
r = [-1, 1]
opt = {}
assert_quad(nquad(f, [r, r], opts=[opt, opt]), 4.0)
def test_square_separate_fn_ranges_and_opts(self):
def f(y, x):
return 1.0
def fn_range0(*args):
return (-1, 1)
def fn_range1(*args):
return (-1, 1)
def fn_opt0(*args):
return {}
def fn_opt1(*args):
return {}
ranges = [fn_range0, fn_range1]
opts = [fn_opt0, fn_opt1]
assert_quad(nquad(f, ranges, opts=opts), 4.0)
def test_square_aliased_fn_ranges_and_opts(self):
def f(y, x):
return 1.0
def fn_range(*args):
return (-1, 1)
def fn_opt(*args):
return {}
ranges = [fn_range, fn_range]
opts = [fn_opt, fn_opt]
assert_quad(nquad(f, ranges, opts=opts), 4.0)
def test_matching_quad(self):
def func(x):
return x**2 + 1
res, reserr = quad(func, 0, 4)
res2, reserr2 = nquad(func, ranges=[[0, 4]])
assert_almost_equal(res, res2)
assert_almost_equal(reserr, reserr2)
def test_matching_dblquad(self):
def func2d(x0, x1):
return x0**2 + x1**3 - x0 * x1 + 1
res, reserr = dblquad(func2d, -2, 2, lambda x: -3, lambda x: 3)
res2, reserr2 = nquad(func2d, [[-3, 3], (-2, 2)])
assert_almost_equal(res, res2)
assert_almost_equal(reserr, reserr2)
def test_matching_tplquad(self):
def func3d(x0, x1, x2, c0, c1):
return x0**2 + c0 * x1**3 - x0 * x1 + 1 + c1 * np.sin(x2)
res = tplquad(func3d, -1, 2, lambda x: -2, lambda x: 2,
lambda x, y: -np.pi, lambda x, y: np.pi,
args=(2, 3))
res2 = nquad(func3d, [[-np.pi, np.pi], [-2, 2], (-1, 2)], args=(2, 3))
assert_almost_equal(res, res2)
def test_dict_as_opts(self):
try:
nquad(lambda x, y: x * y, [[0, 1], [0, 1]], opts={'epsrel': 0.0001})
except TypeError:
assert False
| 27,983
| 40.274336
| 84
|
py
|
scipy
|
scipy-main/scipy/integrate/tests/test_quadrature.py
|
import pytest
import numpy as np
from numpy import cos, sin, pi
from numpy.testing import (assert_equal, assert_almost_equal, assert_allclose,
assert_, suppress_warnings)
from scipy.integrate import (quadrature, romberg, romb, newton_cotes,
cumulative_trapezoid, cumtrapz, trapz, trapezoid,
quad, simpson, simps, fixed_quad, AccuracyWarning,
qmc_quad)
from scipy import stats, special as sc
class TestFixedQuad:
def test_scalar(self):
n = 4
expected = 1/(2*n)
got, _ = fixed_quad(lambda x: x**(2*n - 1), 0, 1, n=n)
# quadrature exact for this input
assert_allclose(got, expected, rtol=1e-12)
def test_vector(self):
n = 4
p = np.arange(1, 2*n)
expected = 1/(p + 1)
got, _ = fixed_quad(lambda x: x**p[:, None], 0, 1, n=n)
assert_allclose(got, expected, rtol=1e-12)
class TestQuadrature:
def quad(self, x, a, b, args):
raise NotImplementedError
def test_quadrature(self):
# Typical function with two extra arguments:
def myfunc(x, n, z): # Bessel function integrand
return cos(n*x-z*sin(x))/pi
val, err = quadrature(myfunc, 0, pi, (2, 1.8))
table_val = 0.30614353532540296487
assert_almost_equal(val, table_val, decimal=7)
def test_quadrature_rtol(self):
def myfunc(x, n, z): # Bessel function integrand
return 1e90 * cos(n*x-z*sin(x))/pi
val, err = quadrature(myfunc, 0, pi, (2, 1.8), rtol=1e-10)
table_val = 1e90 * 0.30614353532540296487
assert_allclose(val, table_val, rtol=1e-10)
def test_quadrature_miniter(self):
# Typical function with two extra arguments:
def myfunc(x, n, z): # Bessel function integrand
return cos(n*x-z*sin(x))/pi
table_val = 0.30614353532540296487
for miniter in [5, 52]:
val, err = quadrature(myfunc, 0, pi, (2, 1.8), miniter=miniter)
assert_almost_equal(val, table_val, decimal=7)
assert_(err < 1.0)
def test_quadrature_single_args(self):
def myfunc(x, n):
return 1e90 * cos(n*x-1.8*sin(x))/pi
val, err = quadrature(myfunc, 0, pi, args=2, rtol=1e-10)
table_val = 1e90 * 0.30614353532540296487
assert_allclose(val, table_val, rtol=1e-10)
def test_romberg(self):
# Typical function with two extra arguments:
def myfunc(x, n, z): # Bessel function integrand
return cos(n*x-z*sin(x))/pi
val = romberg(myfunc, 0, pi, args=(2, 1.8))
table_val = 0.30614353532540296487
assert_almost_equal(val, table_val, decimal=7)
def test_romberg_rtol(self):
# Typical function with two extra arguments:
def myfunc(x, n, z): # Bessel function integrand
return 1e19*cos(n*x-z*sin(x))/pi
val = romberg(myfunc, 0, pi, args=(2, 1.8), rtol=1e-10)
table_val = 1e19*0.30614353532540296487
assert_allclose(val, table_val, rtol=1e-10)
def test_romb(self):
assert_equal(romb(np.arange(17)), 128)
def test_romb_gh_3731(self):
# Check that romb makes maximal use of data points
x = np.arange(2**4+1)
y = np.cos(0.2*x)
val = romb(y)
val2, err = quad(lambda x: np.cos(0.2*x), x.min(), x.max())
assert_allclose(val, val2, rtol=1e-8, atol=0)
# should be equal to romb with 2**k+1 samples
with suppress_warnings() as sup:
sup.filter(AccuracyWarning, "divmax .4. exceeded")
val3 = romberg(lambda x: np.cos(0.2*x), x.min(), x.max(), divmax=4)
assert_allclose(val, val3, rtol=1e-12, atol=0)
def test_non_dtype(self):
# Check that we work fine with functions returning float
import math
valmath = romberg(math.sin, 0, 1)
expected_val = 0.45969769413185085
assert_almost_equal(valmath, expected_val, decimal=7)
def test_newton_cotes(self):
"""Test the first few degrees, for evenly spaced points."""
n = 1
wts, errcoff = newton_cotes(n, 1)
assert_equal(wts, n*np.array([0.5, 0.5]))
assert_almost_equal(errcoff, -n**3/12.0)
n = 2
wts, errcoff = newton_cotes(n, 1)
assert_almost_equal(wts, n*np.array([1.0, 4.0, 1.0])/6.0)
assert_almost_equal(errcoff, -n**5/2880.0)
n = 3
wts, errcoff = newton_cotes(n, 1)
assert_almost_equal(wts, n*np.array([1.0, 3.0, 3.0, 1.0])/8.0)
assert_almost_equal(errcoff, -n**5/6480.0)
n = 4
wts, errcoff = newton_cotes(n, 1)
assert_almost_equal(wts, n*np.array([7.0, 32.0, 12.0, 32.0, 7.0])/90.0)
assert_almost_equal(errcoff, -n**7/1935360.0)
def test_newton_cotes2(self):
"""Test newton_cotes with points that are not evenly spaced."""
x = np.array([0.0, 1.5, 2.0])
y = x**2
wts, errcoff = newton_cotes(x)
exact_integral = 8.0/3
numeric_integral = np.dot(wts, y)
assert_almost_equal(numeric_integral, exact_integral)
x = np.array([0.0, 1.4, 2.1, 3.0])
y = x**2
wts, errcoff = newton_cotes(x)
exact_integral = 9.0
numeric_integral = np.dot(wts, y)
assert_almost_equal(numeric_integral, exact_integral)
# ignore the DeprecationWarning emitted by the even kwd
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
def test_simpson(self):
y = np.arange(17)
assert_equal(simpson(y), 128)
assert_equal(simpson(y, dx=0.5), 64)
assert_equal(simpson(y, x=np.linspace(0, 4, 17)), 32)
y = np.arange(4)
x = 2**y
assert_equal(simpson(y, x=x, even='avg'), 13.875)
assert_equal(simpson(y, x=x, even='first'), 13.75)
assert_equal(simpson(y, x=x, even='last'), 14)
# `even='simpson'`
# integral should be exactly 21
x = np.linspace(1, 4, 4)
def f(x):
return x**2
assert_allclose(simpson(f(x), x=x, even='simpson'), 21.0)
assert_allclose(simpson(f(x), x=x, even='avg'), 21 + 1/6)
# integral should be exactly 114
x = np.linspace(1, 7, 4)
assert_allclose(simpson(f(x), dx=2.0, even='simpson'), 114)
assert_allclose(simpson(f(x), dx=2.0, even='avg'), 115 + 1/3)
# `even='simpson'`, test multi-axis behaviour
a = np.arange(16).reshape(4, 4)
x = np.arange(64.).reshape(4, 4, 4)
y = f(x)
for i in range(3):
r = simpson(y, x=x, even='simpson', axis=i)
it = np.nditer(a, flags=['multi_index'])
for _ in it:
idx = list(it.multi_index)
idx.insert(i, slice(None))
integral = x[tuple(idx)][-1]**3 / 3 - x[tuple(idx)][0]**3 / 3
assert_allclose(r[it.multi_index], integral)
# test when integration axis only has two points
x = np.arange(16).reshape(8, 2)
y = f(x)
for even in ['simpson', 'avg', 'first', 'last']:
r = simpson(y, x=x, even=even, axis=-1)
integral = 0.5 * (y[:, 1] + y[:, 0]) * (x[:, 1] - x[:, 0])
assert_allclose(r, integral)
# odd points, test multi-axis behaviour
a = np.arange(25).reshape(5, 5)
x = np.arange(125).reshape(5, 5, 5)
y = f(x)
for i in range(3):
r = simpson(y, x=x, axis=i)
it = np.nditer(a, flags=['multi_index'])
for _ in it:
idx = list(it.multi_index)
idx.insert(i, slice(None))
integral = x[tuple(idx)][-1]**3 / 3 - x[tuple(idx)][0]**3 / 3
assert_allclose(r[it.multi_index], integral)
# Tests for checking base case
x = np.array([3])
y = np.power(x, 2)
assert_allclose(simpson(y, x=x, axis=0), 0.0)
assert_allclose(simpson(y, x=x, axis=-1), 0.0)
x = np.array([3, 3, 3, 3])
y = np.power(x, 2)
assert_allclose(simpson(y, x=x, axis=0), 0.0)
assert_allclose(simpson(y, x=x, axis=-1), 0.0)
x = np.array([[1, 2, 4, 8], [1, 2, 4, 8], [1, 2, 4, 8]])
y = np.power(x, 2)
zero_axis = [0.0, 0.0, 0.0, 0.0]
default_axis = [170 + 1/3] * 3 # 8**3 / 3 - 1/3
assert_allclose(simpson(y, x=x, axis=0), zero_axis)
# the following should be exact for even='simpson'
assert_allclose(simpson(y, x=x, axis=-1), default_axis)
x = np.array([[1, 2, 4, 8], [1, 2, 4, 8], [1, 8, 16, 32]])
y = np.power(x, 2)
zero_axis = [0.0, 136.0, 1088.0, 8704.0]
default_axis = [170 + 1/3, 170 + 1/3, 32**3 / 3 - 1/3]
assert_allclose(simpson(y, x=x, axis=0), zero_axis)
assert_allclose(simpson(y, x=x, axis=-1), default_axis)
def test_simpson_even_is_deprecated(self):
x = np.linspace(0, 3, 4)
y = x**2
with pytest.deprecated_call():
simpson(y, x=x, even='first')
@pytest.mark.parametrize('droplast', [False, True])
def test_simpson_2d_integer_no_x(self, droplast):
# The inputs are 2d integer arrays. The results should be
# identical to the results when the inputs are floating point.
y = np.array([[2, 2, 4, 4, 8, 8, -4, 5],
[4, 4, 2, -4, 10, 22, -2, 10]])
if droplast:
y = y[:, :-1]
result = simpson(y, axis=-1)
expected = simpson(np.array(y, dtype=np.float64), axis=-1)
assert_equal(result, expected)
def test_simps(self):
# Basic coverage test for the alias
y = np.arange(5)
x = 2**y
with pytest.deprecated_call(match="simpson"):
assert_allclose(
simpson(y, x=x, dx=0.5),
simps(y, x=x, dx=0.5)
)
class TestCumulative_trapezoid:
def test_1d(self):
x = np.linspace(-2, 2, num=5)
y = x
y_int = cumulative_trapezoid(y, x, initial=0)
y_expected = [0., -1.5, -2., -1.5, 0.]
assert_allclose(y_int, y_expected)
y_int = cumulative_trapezoid(y, x, initial=None)
assert_allclose(y_int, y_expected[1:])
def test_y_nd_x_nd(self):
x = np.arange(3 * 2 * 4).reshape(3, 2, 4)
y = x
y_int = cumulative_trapezoid(y, x, initial=0)
y_expected = np.array([[[0., 0.5, 2., 4.5],
[0., 4.5, 10., 16.5]],
[[0., 8.5, 18., 28.5],
[0., 12.5, 26., 40.5]],
[[0., 16.5, 34., 52.5],
[0., 20.5, 42., 64.5]]])
assert_allclose(y_int, y_expected)
# Try with all axes
shapes = [(2, 2, 4), (3, 1, 4), (3, 2, 3)]
for axis, shape in zip([0, 1, 2], shapes):
y_int = cumulative_trapezoid(y, x, initial=0, axis=axis)
assert_equal(y_int.shape, (3, 2, 4))
y_int = cumulative_trapezoid(y, x, initial=None, axis=axis)
assert_equal(y_int.shape, shape)
def test_y_nd_x_1d(self):
y = np.arange(3 * 2 * 4).reshape(3, 2, 4)
x = np.arange(4)**2
# Try with all axes
ys_expected = (
np.array([[[4., 5., 6., 7.],
[8., 9., 10., 11.]],
[[40., 44., 48., 52.],
[56., 60., 64., 68.]]]),
np.array([[[2., 3., 4., 5.]],
[[10., 11., 12., 13.]],
[[18., 19., 20., 21.]]]),
np.array([[[0.5, 5., 17.5],
[4.5, 21., 53.5]],
[[8.5, 37., 89.5],
[12.5, 53., 125.5]],
[[16.5, 69., 161.5],
[20.5, 85., 197.5]]]))
for axis, y_expected in zip([0, 1, 2], ys_expected):
y_int = cumulative_trapezoid(y, x=x[:y.shape[axis]], axis=axis,
initial=None)
assert_allclose(y_int, y_expected)
def test_x_none(self):
y = np.linspace(-2, 2, num=5)
y_int = cumulative_trapezoid(y)
y_expected = [-1.5, -2., -1.5, 0.]
assert_allclose(y_int, y_expected)
y_int = cumulative_trapezoid(y, initial=0)
y_expected = [0, -1.5, -2., -1.5, 0.]
assert_allclose(y_int, y_expected)
y_int = cumulative_trapezoid(y, dx=3)
y_expected = [-4.5, -6., -4.5, 0.]
assert_allclose(y_int, y_expected)
y_int = cumulative_trapezoid(y, dx=3, initial=0)
y_expected = [0, -4.5, -6., -4.5, 0.]
assert_allclose(y_int, y_expected)
@pytest.mark.parametrize(
"initial", [1, 0.5]
)
def test_initial_warning(self, initial):
"""If initial is not None or 0, a ValueError is raised."""
y = np.linspace(0, 10, num=10)
with pytest.deprecated_call(match="`initial`"):
res = cumulative_trapezoid(y, initial=initial)
assert_allclose(res, [initial, *np.cumsum(y[1:] + y[:-1])/2])
def test_cumtrapz(self):
# Basic coverage test for the alias
x = np.arange(3 * 2 * 4).reshape(3, 2, 4)
y = x
with pytest.deprecated_call(match="cumulative_trapezoid"):
assert_allclose(cumulative_trapezoid(y, x, dx=0.5, axis=0, initial=0),
cumtrapz(y, x, dx=0.5, axis=0, initial=0),
rtol=1e-14)
class TestTrapezoid:
"""This function is tested in NumPy more extensive, just do some
basic due diligence here."""
def test_trapezoid(self):
y = np.arange(17)
assert_equal(trapezoid(y), 128)
assert_equal(trapezoid(y, dx=0.5), 64)
assert_equal(trapezoid(y, x=np.linspace(0, 4, 17)), 32)
y = np.arange(4)
x = 2**y
assert_equal(trapezoid(y, x=x, dx=0.1), 13.5)
def test_trapz(self):
# Basic coverage test for the alias
y = np.arange(4)
x = 2**y
with pytest.deprecated_call(match="trapezoid"):
assert_equal(trapezoid(y, x=x, dx=0.5, axis=0),
trapz(y, x=x, dx=0.5, axis=0))
class TestQMCQuad:
def test_input_validation(self):
message = "`func` must be callable."
with pytest.raises(TypeError, match=message):
qmc_quad("a duck", [0, 0], [1, 1])
message = "`func` must evaluate the integrand at points..."
with pytest.raises(ValueError, match=message):
qmc_quad(lambda: 1, [0, 0], [1, 1])
def func(x):
assert x.ndim == 1
return np.sum(x)
message = "Exception encountered when attempting vectorized call..."
with pytest.warns(UserWarning, match=message):
qmc_quad(func, [0, 0], [1, 1])
message = "`n_points` must be an integer."
with pytest.raises(TypeError, match=message):
qmc_quad(lambda x: 1, [0, 0], [1, 1], n_points=1024.5)
message = "`n_estimates` must be an integer."
with pytest.raises(TypeError, match=message):
qmc_quad(lambda x: 1, [0, 0], [1, 1], n_estimates=8.5)
message = "`qrng` must be an instance of scipy.stats.qmc.QMCEngine."
with pytest.raises(TypeError, match=message):
qmc_quad(lambda x: 1, [0, 0], [1, 1], qrng="a duck")
message = "`qrng` must be initialized with dimensionality equal to "
with pytest.raises(ValueError, match=message):
qmc_quad(lambda x: 1, [0, 0], [1, 1], qrng=stats.qmc.Sobol(1))
message = r"`log` must be boolean \(`True` or `False`\)."
with pytest.raises(TypeError, match=message):
qmc_quad(lambda x: 1, [0, 0], [1, 1], log=10)
def basic_test(self, n_points=2**8, n_estimates=8, signs=np.ones(2)):
ndim = 2
mean = np.zeros(ndim)
cov = np.eye(ndim)
def func(x):
return stats.multivariate_normal.pdf(x.T, mean, cov)
rng = np.random.default_rng(2879434385674690281)
qrng = stats.qmc.Sobol(ndim, seed=rng)
a = np.zeros(ndim)
b = np.ones(ndim) * signs
res = qmc_quad(func, a, b, n_points=n_points,
n_estimates=n_estimates, qrng=qrng)
ref = stats.multivariate_normal.cdf(b, mean, cov, lower_limit=a)
atol = sc.stdtrit(n_estimates-1, 0.995) * res.standard_error # 99% CI
assert_allclose(res.integral, ref, atol=atol)
assert np.prod(signs)*res.integral > 0
rng = np.random.default_rng(2879434385674690281)
qrng = stats.qmc.Sobol(ndim, seed=rng)
logres = qmc_quad(lambda *args: np.log(func(*args)), a, b,
n_points=n_points, n_estimates=n_estimates,
log=True, qrng=qrng)
assert_allclose(np.exp(logres.integral), res.integral, rtol=1e-14)
assert np.imag(logres.integral) == (np.pi if np.prod(signs) < 0 else 0)
assert_allclose(np.exp(logres.standard_error),
res.standard_error, rtol=1e-14, atol=1e-16)
@pytest.mark.parametrize("n_points", [2**8, 2**12])
@pytest.mark.parametrize("n_estimates", [8, 16])
def test_basic(self, n_points, n_estimates):
self.basic_test(n_points, n_estimates)
@pytest.mark.parametrize("signs", [[1, 1], [-1, -1], [-1, 1], [1, -1]])
def test_sign(self, signs):
self.basic_test(signs=signs)
@pytest.mark.parametrize("log", [False, True])
def test_zero(self, log):
message = "A lower limit was equal to an upper limit, so"
with pytest.warns(UserWarning, match=message):
res = qmc_quad(lambda x: 1, [0, 0], [0, 1], log=log)
assert res.integral == (-np.inf if log else 0)
assert res.standard_error == 0
def test_flexible_input(self):
# check that qrng is not required
# also checks that for 1d problems, a and b can be scalars
def func(x):
return stats.norm.pdf(x, scale=2)
res = qmc_quad(func, 0, 1)
ref = stats.norm.cdf(1, scale=2) - stats.norm.cdf(0, scale=2)
assert_allclose(res.integral, ref, 1e-2)
| 18,274
| 37.636364
| 82
|
py
|
scipy
|
scipy-main/scipy/integrate/tests/test_odeint_jac.py
|
import numpy as np
from numpy.testing import assert_equal, assert_allclose
from scipy.integrate import odeint
import scipy.integrate._test_odeint_banded as banded5x5
def rhs(y, t):
dydt = np.zeros_like(y)
banded5x5.banded5x5(t, y, dydt)
return dydt
def jac(y, t):
n = len(y)
jac = np.zeros((n, n), order='F')
banded5x5.banded5x5_jac(t, y, 1, 1, jac)
return jac
def bjac(y, t):
n = len(y)
bjac = np.zeros((4, n), order='F')
banded5x5.banded5x5_bjac(t, y, 1, 1, bjac)
return bjac
JACTYPE_FULL = 1
JACTYPE_BANDED = 4
def check_odeint(jactype):
if jactype == JACTYPE_FULL:
ml = None
mu = None
jacobian = jac
elif jactype == JACTYPE_BANDED:
ml = 2
mu = 1
jacobian = bjac
else:
raise ValueError(f"invalid jactype: {jactype!r}")
y0 = np.arange(1.0, 6.0)
# These tolerances must match the tolerances used in banded5x5.f.
rtol = 1e-11
atol = 1e-13
dt = 0.125
nsteps = 64
t = dt * np.arange(nsteps+1)
sol, info = odeint(rhs, y0, t,
Dfun=jacobian, ml=ml, mu=mu,
atol=atol, rtol=rtol, full_output=True)
yfinal = sol[-1]
odeint_nst = info['nst'][-1]
odeint_nfe = info['nfe'][-1]
odeint_nje = info['nje'][-1]
y1 = y0.copy()
# Pure Fortran solution. y1 is modified in-place.
nst, nfe, nje = banded5x5.banded5x5_solve(y1, nsteps, dt, jactype)
# It is likely that yfinal and y1 are *exactly* the same, but
# we'll be cautious and use assert_allclose.
assert_allclose(yfinal, y1, rtol=1e-12)
assert_equal((odeint_nst, odeint_nfe, odeint_nje), (nst, nfe, nje))
def test_odeint_full_jac():
check_odeint(JACTYPE_FULL)
def test_odeint_banded_jac():
check_odeint(JACTYPE_BANDED)
| 1,816
| 23.226667
| 71
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.