repo_name stringlengths 7 65 | path stringlengths 5 185 | copies stringlengths 1 4 | size stringlengths 4 6 | content stringlengths 977 990k | license stringclasses 14 values | hash stringlengths 32 32 | line_mean float64 7.18 99.4 | line_max int64 31 999 | alpha_frac float64 0.25 0.95 | ratio float64 1.5 7.84 | autogenerated bool 1 class | config_or_test bool 2 classes | has_no_keywords bool 2 classes | has_few_assignments bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
scikit-learn/scikit-learn | sklearn/linear_model/_glm/tests/test_glm.py | 4 | 41485 | # Authors: Christian Lorentzen <lorentzen.ch@gmail.com>
#
# License: BSD 3 clause
from functools import partial
import itertools
import warnings
import numpy as np
from numpy.testing import assert_allclose
import pytest
import scipy
from scipy import linalg
from scipy.optimize import minimize, root
from sklearn.base import clone
from sklearn._loss import HalfBinomialLoss, HalfPoissonLoss, HalfTweedieLoss
from sklearn._loss.glm_distribution import TweedieDistribution
from sklearn._loss.link import IdentityLink, LogLink
from sklearn.datasets import make_low_rank_matrix, make_regression
from sklearn.linear_model import (
GammaRegressor,
PoissonRegressor,
Ridge,
TweedieRegressor,
)
from sklearn.linear_model._glm import _GeneralizedLinearRegressor
from sklearn.linear_model._glm._newton_solver import NewtonCholeskySolver
from sklearn.linear_model._linear_loss import LinearModelLoss
from sklearn.exceptions import ConvergenceWarning
from sklearn.metrics import d2_tweedie_score, mean_poisson_deviance
from sklearn.model_selection import train_test_split
SOLVERS = ["lbfgs", "newton-cholesky"]
class BinomialRegressor(_GeneralizedLinearRegressor):
def _get_loss(self):
return HalfBinomialLoss()
def _special_minimize(fun, grad, x, tol_NM, tol):
# Find good starting point by Nelder-Mead
res_NM = minimize(
fun, x, method="Nelder-Mead", options={"xatol": tol_NM, "fatol": tol_NM}
)
# Now refine via root finding on the gradient of the function, which is
# more precise than minimizing the function itself.
res = root(
grad,
res_NM.x,
method="lm",
options={"ftol": tol, "xtol": tol, "gtol": tol},
)
return res.x
@pytest.fixture(scope="module")
def regression_data():
X, y = make_regression(
n_samples=107, n_features=10, n_informative=80, noise=0.5, random_state=2
)
return X, y
@pytest.fixture(
params=itertools.product(
["long", "wide"],
[
BinomialRegressor(),
PoissonRegressor(),
GammaRegressor(),
# TweedieRegressor(power=3.0), # too difficult
# TweedieRegressor(power=0, link="log"), # too difficult
TweedieRegressor(power=1.5),
],
),
ids=lambda param: f"{param[0]}-{param[1]}",
)
def glm_dataset(global_random_seed, request):
"""Dataset with GLM solutions, well conditioned X.
This is inspired by ols_ridge_dataset in test_ridge.py.
The construction is based on the SVD decomposition of X = U S V'.
Parameters
----------
type : {"long", "wide"}
If "long", then n_samples > n_features.
If "wide", then n_features > n_samples.
model : a GLM model
For "wide", we return the minimum norm solution:
min ||w||_2 subject to w = argmin deviance(X, y, w)
Note that the deviance is always minimized if y = inverse_link(X w) is possible to
achieve, which it is in the wide data case. Therefore, we can construct the
solution with minimum norm like (wide) OLS:
min ||w||_2 subject to link(y) = raw_prediction = X w
Returns
-------
model : GLM model
X : ndarray
Last column of 1, i.e. intercept.
y : ndarray
coef_unpenalized : ndarray
Minimum norm solutions, i.e. min sum(loss(w)) (with mininum ||w||_2 in
case of ambiguity)
Last coefficient is intercept.
coef_penalized : ndarray
GLM solution with alpha=l2_reg_strength=1, i.e.
min 1/n * sum(loss) + ||w[:-1]||_2^2.
Last coefficient is intercept.
l2_reg_strength : float
Always equal 1.
"""
data_type, model = request.param
# Make larger dim more than double as big as the smaller one.
# This helps when constructing singular matrices like (X, X).
if data_type == "long":
n_samples, n_features = 12, 4
else:
n_samples, n_features = 4, 12
k = min(n_samples, n_features)
rng = np.random.RandomState(global_random_seed)
X = make_low_rank_matrix(
n_samples=n_samples,
n_features=n_features,
effective_rank=k,
tail_strength=0.1,
random_state=rng,
)
X[:, -1] = 1 # last columns acts as intercept
U, s, Vt = linalg.svd(X, full_matrices=False)
assert np.all(s > 1e-3) # to be sure
assert np.max(s) / np.min(s) < 100 # condition number of X
if data_type == "long":
coef_unpenalized = rng.uniform(low=1, high=3, size=n_features)
coef_unpenalized *= rng.choice([-1, 1], size=n_features)
raw_prediction = X @ coef_unpenalized
else:
raw_prediction = rng.uniform(low=-3, high=3, size=n_samples)
# minimum norm solution min ||w||_2 such that raw_prediction = X w:
# w = X'(XX')^-1 raw_prediction = V s^-1 U' raw_prediction
coef_unpenalized = Vt.T @ np.diag(1 / s) @ U.T @ raw_prediction
linear_loss = LinearModelLoss(base_loss=model._get_loss(), fit_intercept=True)
sw = np.full(shape=n_samples, fill_value=1 / n_samples)
y = linear_loss.base_loss.link.inverse(raw_prediction)
# Add penalty l2_reg_strength * ||coef||_2^2 for l2_reg_strength=1 and solve with
# optimizer. Note that the problem is well conditioned such that we get accurate
# results.
l2_reg_strength = 1
fun = partial(
linear_loss.loss,
X=X[:, :-1],
y=y,
sample_weight=sw,
l2_reg_strength=l2_reg_strength,
)
grad = partial(
linear_loss.gradient,
X=X[:, :-1],
y=y,
sample_weight=sw,
l2_reg_strength=l2_reg_strength,
)
coef_penalized_with_intercept = _special_minimize(
fun, grad, coef_unpenalized, tol_NM=1e-6, tol=1e-14
)
linear_loss = LinearModelLoss(base_loss=model._get_loss(), fit_intercept=False)
fun = partial(
linear_loss.loss,
X=X[:, :-1],
y=y,
sample_weight=sw,
l2_reg_strength=l2_reg_strength,
)
grad = partial(
linear_loss.gradient,
X=X[:, :-1],
y=y,
sample_weight=sw,
l2_reg_strength=l2_reg_strength,
)
coef_penalized_without_intercept = _special_minimize(
fun, grad, coef_unpenalized[:-1], tol_NM=1e-6, tol=1e-14
)
# To be sure
assert np.linalg.norm(coef_penalized_with_intercept) < np.linalg.norm(
coef_unpenalized
)
return (
model,
X,
y,
coef_unpenalized,
coef_penalized_with_intercept,
coef_penalized_without_intercept,
l2_reg_strength,
)
@pytest.mark.parametrize("solver", SOLVERS)
@pytest.mark.parametrize("fit_intercept", [False, True])
def test_glm_regression(solver, fit_intercept, glm_dataset):
"""Test that GLM converges for all solvers to correct solution.
We work with a simple constructed data set with known solution.
"""
model, X, y, _, coef_with_intercept, coef_without_intercept, alpha = glm_dataset
params = dict(
alpha=alpha,
fit_intercept=fit_intercept,
solver=solver,
tol=1e-12,
max_iter=1000,
)
model = clone(model).set_params(**params)
X = X[:, :-1] # remove intercept
if fit_intercept:
coef = coef_with_intercept
intercept = coef[-1]
coef = coef[:-1]
else:
coef = coef_without_intercept
intercept = 0
model.fit(X, y)
rtol = 5e-5 if solver == "lbfgs" else 1e-9
assert model.intercept_ == pytest.approx(intercept, rel=rtol)
assert_allclose(model.coef_, coef, rtol=rtol)
# Same with sample_weight.
model = (
clone(model).set_params(**params).fit(X, y, sample_weight=np.ones(X.shape[0]))
)
assert model.intercept_ == pytest.approx(intercept, rel=rtol)
assert_allclose(model.coef_, coef, rtol=rtol)
@pytest.mark.parametrize("solver", SOLVERS)
@pytest.mark.parametrize("fit_intercept", [True, False])
def test_glm_regression_hstacked_X(solver, fit_intercept, glm_dataset):
"""Test that GLM converges for all solvers to correct solution on hstacked data.
We work with a simple constructed data set with known solution.
Fit on [X] with alpha is the same as fit on [X, X]/2 with alpha/2.
For long X, [X, X] is still a long but singular matrix.
"""
model, X, y, _, coef_with_intercept, coef_without_intercept, alpha = glm_dataset
n_samples, n_features = X.shape
params = dict(
alpha=alpha / 2,
fit_intercept=fit_intercept,
solver=solver,
tol=1e-12,
max_iter=1000,
)
model = clone(model).set_params(**params)
X = X[:, :-1] # remove intercept
X = 0.5 * np.concatenate((X, X), axis=1)
assert np.linalg.matrix_rank(X) <= min(n_samples, n_features - 1)
if fit_intercept:
coef = coef_with_intercept
intercept = coef[-1]
coef = coef[:-1]
else:
coef = coef_without_intercept
intercept = 0
with warnings.catch_warnings():
# XXX: Investigate if the ConvergenceWarning that can appear in some
# cases should be considered a bug or not. In the mean time we don't
# fail when the assertions below pass irrespective of the presence of
# the warning.
warnings.simplefilter("ignore", ConvergenceWarning)
model.fit(X, y)
rtol = 2e-4 if solver == "lbfgs" else 5e-9
assert model.intercept_ == pytest.approx(intercept, rel=rtol)
assert_allclose(model.coef_, np.r_[coef, coef], rtol=rtol)
@pytest.mark.parametrize("solver", SOLVERS)
@pytest.mark.parametrize("fit_intercept", [True, False])
def test_glm_regression_vstacked_X(solver, fit_intercept, glm_dataset):
"""Test that GLM converges for all solvers to correct solution on vstacked data.
We work with a simple constructed data set with known solution.
Fit on [X] with alpha is the same as fit on [X], [y]
[X], [y] with 1 * alpha.
It is the same alpha as the average loss stays the same.
For wide X, [X', X'] is a singular matrix.
"""
model, X, y, _, coef_with_intercept, coef_without_intercept, alpha = glm_dataset
n_samples, n_features = X.shape
params = dict(
alpha=alpha,
fit_intercept=fit_intercept,
solver=solver,
tol=1e-12,
max_iter=1000,
)
model = clone(model).set_params(**params)
X = X[:, :-1] # remove intercept
X = np.concatenate((X, X), axis=0)
assert np.linalg.matrix_rank(X) <= min(n_samples, n_features)
y = np.r_[y, y]
if fit_intercept:
coef = coef_with_intercept
intercept = coef[-1]
coef = coef[:-1]
else:
coef = coef_without_intercept
intercept = 0
model.fit(X, y)
rtol = 3e-5 if solver == "lbfgs" else 5e-9
assert model.intercept_ == pytest.approx(intercept, rel=rtol)
assert_allclose(model.coef_, coef, rtol=rtol)
@pytest.mark.parametrize("solver", SOLVERS)
@pytest.mark.parametrize("fit_intercept", [True, False])
def test_glm_regression_unpenalized(solver, fit_intercept, glm_dataset):
"""Test that unpenalized GLM converges for all solvers to correct solution.
We work with a simple constructed data set with known solution.
Note: This checks the minimum norm solution for wide X, i.e.
n_samples < n_features:
min ||w||_2 subject to w = argmin deviance(X, y, w)
"""
model, X, y, coef, _, _, _ = glm_dataset
n_samples, n_features = X.shape
alpha = 0 # unpenalized
params = dict(
alpha=alpha,
fit_intercept=fit_intercept,
solver=solver,
tol=1e-12,
max_iter=1000,
)
model = clone(model).set_params(**params)
if fit_intercept:
X = X[:, :-1] # remove intercept
intercept = coef[-1]
coef = coef[:-1]
else:
intercept = 0
with warnings.catch_warnings():
if solver.startswith("newton") and n_samples < n_features:
# The newton solvers should warn and automatically fallback to LBFGS
# in this case. The model should still converge.
warnings.filterwarnings("ignore", category=scipy.linalg.LinAlgWarning)
# XXX: Investigate if the ConvergenceWarning that can appear in some
# cases should be considered a bug or not. In the mean time we don't
# fail when the assertions below pass irrespective of the presence of
# the warning.
warnings.filterwarnings("ignore", category=ConvergenceWarning)
model.fit(X, y)
# FIXME: `assert_allclose(model.coef_, coef)` should work for all cases but fails
# for the wide/fat case with n_features > n_samples. Most current GLM solvers do
# NOT return the minimum norm solution with fit_intercept=True.
if n_samples > n_features:
rtol = 5e-5 if solver == "lbfgs" else 1e-7
assert model.intercept_ == pytest.approx(intercept)
assert_allclose(model.coef_, coef, rtol=rtol)
else:
# As it is an underdetermined problem, prediction = y. The following shows that
# we get a solution, i.e. a (non-unique) minimum of the objective function ...
rtol = 5e-5
if solver == "newton-cholesky":
rtol = 5e-4
assert_allclose(model.predict(X), y, rtol=rtol)
norm_solution = np.linalg.norm(np.r_[intercept, coef])
norm_model = np.linalg.norm(np.r_[model.intercept_, model.coef_])
if solver == "newton-cholesky":
# XXX: This solver shows random behaviour. Sometimes it finds solutions
# with norm_model <= norm_solution! So we check conditionally.
if norm_model < (1 + 1e-12) * norm_solution:
assert model.intercept_ == pytest.approx(intercept)
assert_allclose(model.coef_, coef, rtol=rtol)
elif solver == "lbfgs" and fit_intercept:
# But it is not the minimum norm solution. Otherwise the norms would be
# equal.
assert norm_model > (1 + 1e-12) * norm_solution
# See https://github.com/scikit-learn/scikit-learn/issues/23670.
# Note: Even adding a tiny penalty does not give the minimal norm solution.
# XXX: We could have naively expected LBFGS to find the minimal norm
# solution by adding a very small penalty. Even that fails for a reason we
# do not properly understand at this point.
else:
# When `fit_intercept=False`, LBFGS naturally converges to the minimum norm
# solution on this problem.
# XXX: Do we have any theoretical guarantees why this should be the case?
assert model.intercept_ == pytest.approx(intercept, rel=rtol)
assert_allclose(model.coef_, coef, rtol=rtol)
@pytest.mark.parametrize("solver", SOLVERS)
@pytest.mark.parametrize("fit_intercept", [True, False])
def test_glm_regression_unpenalized_hstacked_X(solver, fit_intercept, glm_dataset):
"""Test that unpenalized GLM converges for all solvers to correct solution.
We work with a simple constructed data set with known solution.
GLM fit on [X] is the same as fit on [X, X]/2.
For long X, [X, X] is a singular matrix and we check against the minimum norm
solution:
min ||w||_2 subject to w = argmin deviance(X, y, w)
"""
model, X, y, coef, _, _, _ = glm_dataset
n_samples, n_features = X.shape
alpha = 0 # unpenalized
params = dict(
alpha=alpha,
fit_intercept=fit_intercept,
solver=solver,
tol=1e-12,
max_iter=1000,
)
model = clone(model).set_params(**params)
if fit_intercept:
intercept = coef[-1]
coef = coef[:-1]
if n_samples > n_features:
X = X[:, :-1] # remove intercept
X = 0.5 * np.concatenate((X, X), axis=1)
else:
# To know the minimum norm solution, we keep one intercept column and do
# not divide by 2. Later on, we must take special care.
X = np.c_[X[:, :-1], X[:, :-1], X[:, -1]]
else:
intercept = 0
X = 0.5 * np.concatenate((X, X), axis=1)
assert np.linalg.matrix_rank(X) <= min(n_samples, n_features)
with warnings.catch_warnings():
if solver.startswith("newton"):
# The newton solvers should warn and automatically fallback to LBFGS
# in this case. The model should still converge.
warnings.filterwarnings("ignore", category=scipy.linalg.LinAlgWarning)
# XXX: Investigate if the ConvergenceWarning that can appear in some
# cases should be considered a bug or not. In the mean time we don't
# fail when the assertions below pass irrespective of the presence of
# the warning.
warnings.filterwarnings("ignore", category=ConvergenceWarning)
model.fit(X, y)
if fit_intercept and n_samples < n_features:
# Here we take special care.
model_intercept = 2 * model.intercept_
model_coef = 2 * model.coef_[:-1] # exclude the other intercept term.
# For minimum norm solution, we would have
# assert model.intercept_ == pytest.approx(model.coef_[-1])
else:
model_intercept = model.intercept_
model_coef = model.coef_
if n_samples > n_features:
assert model_intercept == pytest.approx(intercept)
rtol = 1e-4
assert_allclose(model_coef, np.r_[coef, coef], rtol=rtol)
else:
# As it is an underdetermined problem, prediction = y. The following shows that
# we get a solution, i.e. a (non-unique) minimum of the objective function ...
rtol = 1e-6 if solver == "lbfgs" else 5e-6
assert_allclose(model.predict(X), y, rtol=rtol)
if (solver == "lbfgs" and fit_intercept) or solver == "newton-cholesky":
# Same as in test_glm_regression_unpenalized.
# But it is not the minimum norm solution. Otherwise the norms would be
# equal.
norm_solution = np.linalg.norm(
0.5 * np.r_[intercept, intercept, coef, coef]
)
norm_model = np.linalg.norm(np.r_[model.intercept_, model.coef_])
assert norm_model > (1 + 1e-12) * norm_solution
# For minimum norm solution, we would have
# assert model.intercept_ == pytest.approx(model.coef_[-1])
else:
assert model_intercept == pytest.approx(intercept, rel=5e-6)
assert_allclose(model_coef, np.r_[coef, coef], rtol=1e-4)
@pytest.mark.parametrize("solver", SOLVERS)
@pytest.mark.parametrize("fit_intercept", [True, False])
def test_glm_regression_unpenalized_vstacked_X(solver, fit_intercept, glm_dataset):
"""Test that unpenalized GLM converges for all solvers to correct solution.
We work with a simple constructed data set with known solution.
GLM fit on [X] is the same as fit on [X], [y]
[X], [y].
For wide X, [X', X'] is a singular matrix and we check against the minimum norm
solution:
min ||w||_2 subject to w = argmin deviance(X, y, w)
"""
model, X, y, coef, _, _, _ = glm_dataset
n_samples, n_features = X.shape
alpha = 0 # unpenalized
params = dict(
alpha=alpha,
fit_intercept=fit_intercept,
solver=solver,
tol=1e-12,
max_iter=1000,
)
model = clone(model).set_params(**params)
if fit_intercept:
X = X[:, :-1] # remove intercept
intercept = coef[-1]
coef = coef[:-1]
else:
intercept = 0
X = np.concatenate((X, X), axis=0)
assert np.linalg.matrix_rank(X) <= min(n_samples, n_features)
y = np.r_[y, y]
with warnings.catch_warnings():
if solver.startswith("newton") and n_samples < n_features:
# The newton solvers should warn and automatically fallback to LBFGS
# in this case. The model should still converge.
warnings.filterwarnings("ignore", category=scipy.linalg.LinAlgWarning)
# XXX: Investigate if the ConvergenceWarning that can appear in some
# cases should be considered a bug or not. In the mean time we don't
# fail when the assertions below pass irrespective of the presence of
# the warning.
warnings.filterwarnings("ignore", category=ConvergenceWarning)
model.fit(X, y)
if n_samples > n_features:
rtol = 5e-5 if solver == "lbfgs" else 1e-6
assert model.intercept_ == pytest.approx(intercept)
assert_allclose(model.coef_, coef, rtol=rtol)
else:
# As it is an underdetermined problem, prediction = y. The following shows that
# we get a solution, i.e. a (non-unique) minimum of the objective function ...
rtol = 1e-6 if solver == "lbfgs" else 5e-6
assert_allclose(model.predict(X), y, rtol=rtol)
norm_solution = np.linalg.norm(np.r_[intercept, coef])
norm_model = np.linalg.norm(np.r_[model.intercept_, model.coef_])
if solver == "newton-cholesky":
# XXX: This solver shows random behaviour. Sometimes it finds solutions
# with norm_model <= norm_solution! So we check conditionally.
if not (norm_model > (1 + 1e-12) * norm_solution):
assert model.intercept_ == pytest.approx(intercept)
assert_allclose(model.coef_, coef, rtol=1e-4)
elif solver == "lbfgs" and fit_intercept:
# Same as in test_glm_regression_unpenalized.
# But it is not the minimum norm solution. Otherwise the norms would be
# equal.
assert norm_model > (1 + 1e-12) * norm_solution
else:
rtol = 1e-5 if solver == "newton-cholesky" else 1e-4
assert model.intercept_ == pytest.approx(intercept, rel=rtol)
assert_allclose(model.coef_, coef, rtol=rtol)
def test_sample_weights_validation():
"""Test the raised errors in the validation of sample_weight."""
# scalar value but not positive
X = [[1]]
y = [1]
weights = 0
glm = _GeneralizedLinearRegressor()
# Positive weights are accepted
glm.fit(X, y, sample_weight=1)
# 2d array
weights = [[0]]
with pytest.raises(ValueError, match="must be 1D array or scalar"):
glm.fit(X, y, weights)
# 1d but wrong length
weights = [1, 0]
msg = r"sample_weight.shape == \(2,\), expected \(1,\)!"
with pytest.raises(ValueError, match=msg):
glm.fit(X, y, weights)
@pytest.mark.parametrize(
"glm",
[
TweedieRegressor(power=3),
PoissonRegressor(),
GammaRegressor(),
TweedieRegressor(power=1.5),
],
)
def test_glm_wrong_y_range(glm):
y = np.array([-1, 2])
X = np.array([[1], [1]])
msg = r"Some value\(s\) of y are out of the valid range of the loss"
with pytest.raises(ValueError, match=msg):
glm.fit(X, y)
@pytest.mark.parametrize("fit_intercept", [False, True])
def test_glm_identity_regression(fit_intercept):
"""Test GLM regression with identity link on a simple dataset."""
coef = [1.0, 2.0]
X = np.array([[1, 1, 1, 1, 1], [0, 1, 2, 3, 4]]).T
y = np.dot(X, coef)
glm = _GeneralizedLinearRegressor(
alpha=0,
fit_intercept=fit_intercept,
tol=1e-12,
)
if fit_intercept:
glm.fit(X[:, 1:], y)
assert_allclose(glm.coef_, coef[1:], rtol=1e-10)
assert_allclose(glm.intercept_, coef[0], rtol=1e-10)
else:
glm.fit(X, y)
assert_allclose(glm.coef_, coef, rtol=1e-12)
@pytest.mark.parametrize("fit_intercept", [False, True])
@pytest.mark.parametrize("alpha", [0.0, 1.0])
@pytest.mark.parametrize(
"GLMEstimator", [_GeneralizedLinearRegressor, PoissonRegressor, GammaRegressor]
)
def test_glm_sample_weight_consistency(fit_intercept, alpha, GLMEstimator):
"""Test that the impact of sample_weight is consistent"""
rng = np.random.RandomState(0)
n_samples, n_features = 10, 5
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
glm_params = dict(alpha=alpha, fit_intercept=fit_intercept)
glm = GLMEstimator(**glm_params).fit(X, y)
coef = glm.coef_.copy()
# sample_weight=np.ones(..) should be equivalent to sample_weight=None
sample_weight = np.ones(y.shape)
glm.fit(X, y, sample_weight=sample_weight)
assert_allclose(glm.coef_, coef, rtol=1e-12)
# sample_weight are normalized to 1 so, scaling them has no effect
sample_weight = 2 * np.ones(y.shape)
glm.fit(X, y, sample_weight=sample_weight)
assert_allclose(glm.coef_, coef, rtol=1e-12)
# setting one element of sample_weight to 0 is equivalent to removing
# the corresponding sample
sample_weight = np.ones(y.shape)
sample_weight[-1] = 0
glm.fit(X, y, sample_weight=sample_weight)
coef1 = glm.coef_.copy()
glm.fit(X[:-1], y[:-1])
assert_allclose(glm.coef_, coef1, rtol=1e-12)
# check that multiplying sample_weight by 2 is equivalent
# to repeating corresponding samples twice
X2 = np.concatenate([X, X[: n_samples // 2]], axis=0)
y2 = np.concatenate([y, y[: n_samples // 2]])
sample_weight_1 = np.ones(len(y))
sample_weight_1[: n_samples // 2] = 2
glm1 = GLMEstimator(**glm_params).fit(X, y, sample_weight=sample_weight_1)
glm2 = GLMEstimator(**glm_params).fit(X2, y2, sample_weight=None)
assert_allclose(glm1.coef_, glm2.coef_)
@pytest.mark.parametrize("solver", SOLVERS)
@pytest.mark.parametrize("fit_intercept", [True, False])
@pytest.mark.parametrize(
"estimator",
[
PoissonRegressor(),
GammaRegressor(),
TweedieRegressor(power=3.0),
TweedieRegressor(power=0, link="log"),
TweedieRegressor(power=1.5),
TweedieRegressor(power=4.5),
],
)
def test_glm_log_regression(solver, fit_intercept, estimator):
"""Test GLM regression with log link on a simple dataset."""
coef = [0.2, -0.1]
X = np.array([[0, 1, 2, 3, 4], [1, 1, 1, 1, 1]]).T
y = np.exp(np.dot(X, coef))
glm = clone(estimator).set_params(
alpha=0,
fit_intercept=fit_intercept,
solver=solver,
tol=1e-8,
)
if fit_intercept:
res = glm.fit(X[:, :-1], y)
assert_allclose(res.coef_, coef[:-1], rtol=1e-6)
assert_allclose(res.intercept_, coef[-1], rtol=1e-6)
else:
res = glm.fit(X, y)
assert_allclose(res.coef_, coef, rtol=2e-6)
@pytest.mark.parametrize("solver", SOLVERS)
@pytest.mark.parametrize("fit_intercept", [True, False])
def test_warm_start(solver, fit_intercept, global_random_seed):
n_samples, n_features = 100, 10
X, y = make_regression(
n_samples=n_samples,
n_features=n_features,
n_informative=n_features - 2,
bias=fit_intercept * 1.0,
noise=1.0,
random_state=global_random_seed,
)
y = np.abs(y) # Poisson requires non-negative targets.
alpha = 1
params = {
"solver": solver,
"fit_intercept": fit_intercept,
"tol": 1e-10,
}
glm1 = PoissonRegressor(warm_start=False, max_iter=1000, alpha=alpha, **params)
glm1.fit(X, y)
glm2 = PoissonRegressor(warm_start=True, max_iter=1, alpha=alpha, **params)
# As we intentionally set max_iter=1 such that the solver should raise a
# ConvergenceWarning.
with pytest.warns(ConvergenceWarning):
glm2.fit(X, y)
linear_loss = LinearModelLoss(
base_loss=glm1._get_loss(),
fit_intercept=fit_intercept,
)
sw = np.full_like(y, fill_value=1 / n_samples)
objective_glm1 = linear_loss.loss(
coef=np.r_[glm1.coef_, glm1.intercept_] if fit_intercept else glm1.coef_,
X=X,
y=y,
sample_weight=sw,
l2_reg_strength=alpha,
)
objective_glm2 = linear_loss.loss(
coef=np.r_[glm2.coef_, glm2.intercept_] if fit_intercept else glm2.coef_,
X=X,
y=y,
sample_weight=sw,
l2_reg_strength=alpha,
)
assert objective_glm1 < objective_glm2
glm2.set_params(max_iter=1000)
glm2.fit(X, y)
# The two models are not exactly identical since the lbfgs solver
# computes the approximate hessian from previous iterations, which
# will not be strictly identical in the case of a warm start.
rtol = 2e-4 if solver == "lbfgs" else 1e-9
assert_allclose(glm1.coef_, glm2.coef_, rtol=rtol)
assert_allclose(glm1.score(X, y), glm2.score(X, y), rtol=1e-5)
@pytest.mark.parametrize("n_samples, n_features", [(100, 10), (10, 100)])
@pytest.mark.parametrize("fit_intercept", [True, False])
@pytest.mark.parametrize("sample_weight", [None, True])
def test_normal_ridge_comparison(
n_samples, n_features, fit_intercept, sample_weight, request
):
"""Compare with Ridge regression for Normal distributions."""
test_size = 10
X, y = make_regression(
n_samples=n_samples + test_size,
n_features=n_features,
n_informative=n_features - 2,
noise=0.5,
random_state=42,
)
if n_samples > n_features:
ridge_params = {"solver": "svd"}
else:
ridge_params = {"solver": "saga", "max_iter": 1000000, "tol": 1e-7}
(
X_train,
X_test,
y_train,
y_test,
) = train_test_split(X, y, test_size=test_size, random_state=0)
alpha = 1.0
if sample_weight is None:
sw_train = None
alpha_ridge = alpha * n_samples
else:
sw_train = np.random.RandomState(0).rand(len(y_train))
alpha_ridge = alpha * sw_train.sum()
# GLM has 1/(2*n) * Loss + 1/2*L2, Ridge has Loss + L2
ridge = Ridge(
alpha=alpha_ridge,
random_state=42,
fit_intercept=fit_intercept,
**ridge_params,
)
ridge.fit(X_train, y_train, sample_weight=sw_train)
glm = _GeneralizedLinearRegressor(
alpha=alpha,
fit_intercept=fit_intercept,
max_iter=300,
tol=1e-5,
)
glm.fit(X_train, y_train, sample_weight=sw_train)
assert glm.coef_.shape == (X.shape[1],)
assert_allclose(glm.coef_, ridge.coef_, atol=5e-5)
assert_allclose(glm.intercept_, ridge.intercept_, rtol=1e-5)
assert_allclose(glm.predict(X_train), ridge.predict(X_train), rtol=2e-4)
assert_allclose(glm.predict(X_test), ridge.predict(X_test), rtol=2e-4)
@pytest.mark.parametrize("solver", ["lbfgs", "newton-cholesky"])
def test_poisson_glmnet(solver):
"""Compare Poisson regression with L2 regularization and LogLink to glmnet"""
# library("glmnet")
# options(digits=10)
# df <- data.frame(a=c(-2,-1,1,2), b=c(0,0,1,1), y=c(0,1,1,2))
# x <- data.matrix(df[,c("a", "b")])
# y <- df$y
# fit <- glmnet(x=x, y=y, alpha=0, intercept=T, family="poisson",
# standardize=F, thresh=1e-10, nlambda=10000)
# coef(fit, s=1)
# (Intercept) -0.12889386979
# a 0.29019207995
# b 0.03741173122
X = np.array([[-2, -1, 1, 2], [0, 0, 1, 1]]).T
y = np.array([0, 1, 1, 2])
glm = PoissonRegressor(
alpha=1,
fit_intercept=True,
tol=1e-7,
max_iter=300,
solver=solver,
)
glm.fit(X, y)
assert_allclose(glm.intercept_, -0.12889386979, rtol=1e-5)
assert_allclose(glm.coef_, [0.29019207995, 0.03741173122], rtol=1e-5)
def test_convergence_warning(regression_data):
X, y = regression_data
est = _GeneralizedLinearRegressor(max_iter=1, tol=1e-20)
with pytest.warns(ConvergenceWarning):
est.fit(X, y)
@pytest.mark.parametrize(
"name, link_class", [("identity", IdentityLink), ("log", LogLink)]
)
def test_tweedie_link_argument(name, link_class):
"""Test GLM link argument set as string."""
y = np.array([0.1, 0.5]) # in range of all distributions
X = np.array([[1], [2]])
glm = TweedieRegressor(power=1, link=name).fit(X, y)
assert isinstance(glm._base_loss.link, link_class)
@pytest.mark.parametrize(
"power, expected_link_class",
[
(0, IdentityLink), # normal
(1, LogLink), # poisson
(2, LogLink), # gamma
(3, LogLink), # inverse-gaussian
],
)
def test_tweedie_link_auto(power, expected_link_class):
"""Test that link='auto' delivers the expected link function"""
y = np.array([0.1, 0.5]) # in range of all distributions
X = np.array([[1], [2]])
glm = TweedieRegressor(link="auto", power=power).fit(X, y)
assert isinstance(glm._base_loss.link, expected_link_class)
@pytest.mark.parametrize("power", [0, 1, 1.5, 2, 3])
@pytest.mark.parametrize("link", ["log", "identity"])
def test_tweedie_score(regression_data, power, link):
"""Test that GLM score equals d2_tweedie_score for Tweedie losses."""
X, y = regression_data
# make y positive
y = np.abs(y) + 1.0
glm = TweedieRegressor(power=power, link=link).fit(X, y)
assert glm.score(X, y) == pytest.approx(
d2_tweedie_score(y, glm.predict(X), power=power)
)
@pytest.mark.parametrize(
"estimator, value",
[
(PoissonRegressor(), True),
(GammaRegressor(), True),
(TweedieRegressor(power=1.5), True),
(TweedieRegressor(power=0), False),
],
)
def test_tags(estimator, value):
assert estimator._get_tags()["requires_positive_y"] is value
# TODO(1.3): remove
@pytest.mark.parametrize(
"est, family",
[
(PoissonRegressor(), "poisson"),
(GammaRegressor(), "gamma"),
(TweedieRegressor(), TweedieDistribution()),
(TweedieRegressor(power=2), TweedieDistribution(power=2)),
(TweedieRegressor(power=3), TweedieDistribution(power=3)),
],
)
def test_family_deprecation(est, family):
"""Test backward compatibility of the family property."""
with pytest.warns(FutureWarning, match="`family` was deprecated"):
if isinstance(family, str):
assert est.family == family
else:
assert est.family.__class__ == family.__class__
assert est.family.power == family.power
def test_linalg_warning_with_newton_solver(global_random_seed):
newton_solver = "newton-cholesky"
rng = np.random.RandomState(global_random_seed)
# Use at least 20 samples to reduce the likelihood of getting a degenerate
# dataset for any global_random_seed.
X_orig = rng.normal(size=(20, 3))
y = rng.poisson(
np.exp(X_orig @ np.ones(X_orig.shape[1])), size=X_orig.shape[0]
).astype(np.float64)
# Collinear variation of the same input features.
X_collinear = np.hstack([X_orig] * 10)
# Let's consider the deviance of a constant baseline on this problem.
baseline_pred = np.full_like(y, y.mean())
constant_model_deviance = mean_poisson_deviance(y, baseline_pred)
assert constant_model_deviance > 1.0
# No warning raised on well-conditioned design, even without regularization.
tol = 1e-10
with warnings.catch_warnings():
warnings.simplefilter("error")
reg = PoissonRegressor(solver=newton_solver, alpha=0.0, tol=tol).fit(X_orig, y)
original_newton_deviance = mean_poisson_deviance(y, reg.predict(X_orig))
# On this dataset, we should have enough data points to not make it
# possible to get a near zero deviance (for the any of the admissible
# random seeds). This will make it easier to interpret meaning of rtol in
# the subsequent assertions:
assert original_newton_deviance > 0.2
# We check that the model could successfully fit information in X_orig to
# improve upon the constant baseline by a large margin (when evaluated on
# the traing set).
assert constant_model_deviance - original_newton_deviance > 0.1
# LBFGS is robust to a collinear design because its approximation of the
# Hessian is Symmeric Positive Definite by construction. Let's record its
# solution
with warnings.catch_warnings():
warnings.simplefilter("error")
reg = PoissonRegressor(solver="lbfgs", alpha=0.0, tol=tol).fit(X_collinear, y)
collinear_lbfgs_deviance = mean_poisson_deviance(y, reg.predict(X_collinear))
# The LBFGS solution on the collinear is expected to reach a comparable
# solution to the Newton solution on the original data.
rtol = 1e-6
assert collinear_lbfgs_deviance == pytest.approx(original_newton_deviance, rel=rtol)
# Fitting a Newton solver on the collinear version of the training data
# without regularization should raise an informative warning and fallback
# to the LBFGS solver.
msg = (
"The inner solver of .*Newton.*Solver stumbled upon a singular or very "
"ill-conditioned Hessian matrix"
)
with pytest.warns(scipy.linalg.LinAlgWarning, match=msg):
reg = PoissonRegressor(solver=newton_solver, alpha=0.0, tol=tol).fit(
X_collinear, y
)
# As a result we should still automatically converge to a good solution.
collinear_newton_deviance = mean_poisson_deviance(y, reg.predict(X_collinear))
assert collinear_newton_deviance == pytest.approx(
original_newton_deviance, rel=rtol
)
# Increasing the regularization slightly should make the problem go away:
with warnings.catch_warnings():
warnings.simplefilter("error", scipy.linalg.LinAlgWarning)
reg = PoissonRegressor(solver=newton_solver, alpha=1e-10).fit(X_collinear, y)
# The slightly penalized model on the collinear data should be close enough
# to the unpenalized model on the original data.
penalized_collinear_newton_deviance = mean_poisson_deviance(
y, reg.predict(X_collinear)
)
assert penalized_collinear_newton_deviance == pytest.approx(
original_newton_deviance, rel=rtol
)
@pytest.mark.parametrize("verbose", [0, 1, 2])
def test_newton_solver_verbosity(capsys, verbose):
"""Test the std output of verbose newton solvers."""
y = np.array([1, 2], dtype=float)
X = np.array([[1.0, 0], [0, 1]], dtype=float)
linear_loss = LinearModelLoss(base_loss=HalfPoissonLoss(), fit_intercept=False)
sol = NewtonCholeskySolver(
coef=linear_loss.init_zero_coef(X),
linear_loss=linear_loss,
l2_reg_strength=0,
verbose=verbose,
)
sol.solve(X, y, None) # returns array([0., 0.69314758])
captured = capsys.readouterr()
if verbose == 0:
assert captured.out == ""
else:
msg = [
"Newton iter=1",
"Check Convergence",
"1. max |gradient|",
"2. Newton decrement",
"Solver did converge at loss = ",
]
for m in msg:
assert m in captured.out
if verbose >= 2:
msg = ["Backtracking Line Search", "line search iteration="]
for m in msg:
assert m in captured.out
# Set the Newton solver to a state with a completely wrong Newton step.
sol = NewtonCholeskySolver(
coef=linear_loss.init_zero_coef(X),
linear_loss=linear_loss,
l2_reg_strength=0,
verbose=verbose,
)
sol.setup(X=X, y=y, sample_weight=None)
sol.iteration = 1
sol.update_gradient_hessian(X=X, y=y, sample_weight=None)
sol.coef_newton = np.array([1.0, 0])
sol.gradient_times_newton = sol.gradient @ sol.coef_newton
with warnings.catch_warnings():
warnings.simplefilter("ignore", ConvergenceWarning)
sol.line_search(X=X, y=y, sample_weight=None)
captured = capsys.readouterr()
if verbose >= 1:
assert (
"Line search did not converge and resorts to lbfgs instead." in captured.out
)
# Set the Newton solver to a state with bad Newton step such that the loss
# improvement in line search is tiny.
sol = NewtonCholeskySolver(
coef=np.array([1e-12, 0.69314758]),
linear_loss=linear_loss,
l2_reg_strength=0,
verbose=verbose,
)
sol.setup(X=X, y=y, sample_weight=None)
sol.iteration = 1
sol.update_gradient_hessian(X=X, y=y, sample_weight=None)
sol.coef_newton = np.array([1e-6, 0])
sol.gradient_times_newton = sol.gradient @ sol.coef_newton
with warnings.catch_warnings():
warnings.simplefilter("ignore", ConvergenceWarning)
sol.line_search(X=X, y=y, sample_weight=None)
captured = capsys.readouterr()
if verbose >= 2:
msg = [
"line search iteration=",
"check loss improvement <= armijo term:",
"check loss |improvement| <= eps * |loss_old|:",
"check sum(|gradient|) < sum(|gradient_old|):",
]
for m in msg:
assert m in captured.out
# Test for a case with negative hessian. We badly initialize coef for a Tweedie
# loss with non-canonical link, e.g. Inverse Gaussian deviance with a log link.
linear_loss = LinearModelLoss(
base_loss=HalfTweedieLoss(power=3), fit_intercept=False
)
sol = NewtonCholeskySolver(
coef=linear_loss.init_zero_coef(X) + 1,
linear_loss=linear_loss,
l2_reg_strength=0,
verbose=verbose,
)
with warnings.catch_warnings():
warnings.simplefilter("ignore", ConvergenceWarning)
sol.solve(X, y, None)
captured = capsys.readouterr()
if verbose >= 1:
assert (
"The inner solver detected a pointwise Hessian with many negative values"
" and resorts to lbfgs instead."
in captured.out
)
| bsd-3-clause | 4e03613467b77618e2c26114900d872c | 35.518486 | 88 | 0.625913 | 3.437034 | false | true | false | false |
scikit-learn/scikit-learn | examples/miscellaneous/plot_partial_dependence_visualization_api.py | 8 | 5363 | """
=========================================
Advanced Plotting With Partial Dependence
=========================================
The :class:`~sklearn.inspection.PartialDependenceDisplay` object can be used
for plotting without needing to recalculate the partial dependence. In this
example, we show how to plot partial dependence plots and how to quickly
customize the plot with the visualization API.
.. note::
See also :ref:`sphx_glr_auto_examples_miscellaneous_plot_roc_curve_visualization_api.py`
""" # noqa: E501
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.datasets import load_diabetes
from sklearn.neural_network import MLPRegressor
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline
from sklearn.tree import DecisionTreeRegressor
from sklearn.inspection import PartialDependenceDisplay
# %%
# Train models on the diabetes dataset
# ================================================
#
# First, we train a decision tree and a multi-layer perceptron on the diabetes
# dataset.
diabetes = load_diabetes()
X = pd.DataFrame(diabetes.data, columns=diabetes.feature_names)
y = diabetes.target
tree = DecisionTreeRegressor()
mlp = make_pipeline(
StandardScaler(),
MLPRegressor(hidden_layer_sizes=(100, 100), tol=1e-2, max_iter=500, random_state=0),
)
tree.fit(X, y)
mlp.fit(X, y)
# %%
# Plotting partial dependence for two features
# ============================================
#
# We plot partial dependence curves for features "age" and "bmi" (body mass
# index) for the decision tree. With two features,
# :func:`~sklearn.inspection.PartialDependenceDisplay.from_estimator` expects to plot
# two curves. Here the plot function place a grid of two plots using the space
# defined by `ax` .
fig, ax = plt.subplots(figsize=(12, 6))
ax.set_title("Decision Tree")
tree_disp = PartialDependenceDisplay.from_estimator(tree, X, ["age", "bmi"], ax=ax)
# %%
# The partial dependence curves can be plotted for the multi-layer perceptron.
# In this case, `line_kw` is passed to
# :func:`~sklearn.inspection.PartialDependenceDisplay.from_estimator` to change the
# color of the curve.
fig, ax = plt.subplots(figsize=(12, 6))
ax.set_title("Multi-layer Perceptron")
mlp_disp = PartialDependenceDisplay.from_estimator(
mlp, X, ["age", "bmi"], ax=ax, line_kw={"color": "red"}
)
# %%
# Plotting partial dependence of the two models together
# ======================================================
#
# The `tree_disp` and `mlp_disp`
# :class:`~sklearn.inspection.PartialDependenceDisplay` objects contain all the
# computed information needed to recreate the partial dependence curves. This
# means we can easily create additional plots without needing to recompute the
# curves.
#
# One way to plot the curves is to place them in the same figure, with the
# curves of each model on each row. First, we create a figure with two axes
# within two rows and one column. The two axes are passed to the
# :func:`~sklearn.inspection.PartialDependenceDisplay.plot` functions of
# `tree_disp` and `mlp_disp`. The given axes will be used by the plotting
# function to draw the partial dependence. The resulting plot places the
# decision tree partial dependence curves in the first row of the
# multi-layer perceptron in the second row.
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(10, 10))
tree_disp.plot(ax=ax1)
ax1.set_title("Decision Tree")
mlp_disp.plot(ax=ax2, line_kw={"color": "red"})
ax2.set_title("Multi-layer Perceptron")
# %%
# Another way to compare the curves is to plot them on top of each other. Here,
# we create a figure with one row and two columns. The axes are passed into the
# :func:`~sklearn.inspection.PartialDependenceDisplay.plot` function as a list,
# which will plot the partial dependence curves of each model on the same axes.
# The length of the axes list must be equal to the number of plots drawn.
# sphinx_gallery_thumbnail_number = 4
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 6))
tree_disp.plot(ax=[ax1, ax2], line_kw={"label": "Decision Tree"})
mlp_disp.plot(
ax=[ax1, ax2], line_kw={"label": "Multi-layer Perceptron", "color": "red"}
)
ax1.legend()
ax2.legend()
# %%
# `tree_disp.axes_` is a numpy array container the axes used to draw the
# partial dependence plots. This can be passed to `mlp_disp` to have the same
# affect of drawing the plots on top of each other. Furthermore, the
# `mlp_disp.figure_` stores the figure, which allows for resizing the figure
# after calling `plot`. In this case `tree_disp.axes_` has two dimensions, thus
# `plot` will only show the y label and y ticks on the left most plot.
tree_disp.plot(line_kw={"label": "Decision Tree"})
mlp_disp.plot(
line_kw={"label": "Multi-layer Perceptron", "color": "red"}, ax=tree_disp.axes_
)
tree_disp.figure_.set_size_inches(10, 6)
tree_disp.axes_[0, 0].legend()
tree_disp.axes_[0, 1].legend()
plt.show()
# %%
# Plotting partial dependence for one feature
# ===========================================
#
# Here, we plot the partial dependence curves for a single feature, "age", on
# the same axes. In this case, `tree_disp.axes_` is passed into the second
# plot function.
tree_disp = PartialDependenceDisplay.from_estimator(tree, X, ["age"])
mlp_disp = PartialDependenceDisplay.from_estimator(
mlp, X, ["age"], ax=tree_disp.axes_, line_kw={"color": "red"}
)
| bsd-3-clause | b4fa6bc53b11de2d2ce5e9b04f1ec74e | 38.145985 | 92 | 0.701473 | 3.431222 | false | false | false | false |
scikit-learn/scikit-learn | examples/gaussian_process/plot_gpc_iris.py | 13 | 2219 | """
=====================================================
Gaussian process classification (GPC) on iris dataset
=====================================================
This example illustrates the predicted probability of GPC for an isotropic
and anisotropic RBF kernel on a two-dimensional version for the iris-dataset.
The anisotropic RBF kernel obtains slightly higher log-marginal-likelihood by
assigning different length-scales to the two feature dimensions.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
y = np.array(iris.target, dtype=int)
h = 0.02 # step size in the mesh
kernel = 1.0 * RBF([1.0])
gpc_rbf_isotropic = GaussianProcessClassifier(kernel=kernel).fit(X, y)
kernel = 1.0 * RBF([1.0, 1.0])
gpc_rbf_anisotropic = GaussianProcessClassifier(kernel=kernel).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
titles = ["Isotropic RBF", "Anisotropic RBF"]
plt.figure(figsize=(10, 5))
for i, clf in enumerate((gpc_rbf_isotropic, gpc_rbf_anisotropic)):
# Plot the predicted probabilities. For that, we will assign a color to
# each point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(1, 2, i + 1)
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape((xx.shape[0], xx.shape[1], 3))
plt.imshow(Z, extent=(x_min, x_max, y_min, y_max), origin="lower")
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=np.array(["r", "g", "b"])[y], edgecolors=(0, 0, 0))
plt.xlabel("Sepal length")
plt.ylabel("Sepal width")
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title(
"%s, LML: %.3f" % (titles[i], clf.log_marginal_likelihood(clf.kernel_.theta))
)
plt.tight_layout()
plt.show()
| bsd-3-clause | 4eac018b8c0219b3c67838e2ef6b05bd | 34.790323 | 87 | 0.635872 | 3.023161 | false | false | false | false |
scikit-learn/scikit-learn | sklearn/utils/multiclass.py | 8 | 17679 | # Author: Arnaud Joly, Joel Nothman, Hamzeh Alsalhi
#
# License: BSD 3 clause
"""
Multi-class / multi-label utility function
==========================================
"""
from collections.abc import Sequence
from itertools import chain
import warnings
from scipy.sparse import issparse
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
import numpy as np
from .validation import check_array, _assert_all_finite
from ..utils._array_api import get_namespace
def _unique_multiclass(y):
xp, is_array_api = get_namespace(y)
if hasattr(y, "__array__") or is_array_api:
return xp.unique_values(xp.asarray(y))
else:
return set(y)
def _unique_indicator(y):
return np.arange(
check_array(y, input_name="y", accept_sparse=["csr", "csc", "coo"]).shape[1]
)
_FN_UNIQUE_LABELS = {
"binary": _unique_multiclass,
"multiclass": _unique_multiclass,
"multilabel-indicator": _unique_indicator,
}
def unique_labels(*ys):
"""Extract an ordered array of unique labels.
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "multiclass-multioutput" input type.
Parameters
----------
*ys : array-likes
Label values.
Returns
-------
out : ndarray of shape (n_unique_labels,)
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
"""
xp, is_array_api = get_namespace(*ys)
if not ys:
raise ValueError("No argument has been passed.")
# Check that we don't mix label format
ys_types = set(type_of_target(x) for x in ys)
if ys_types == {"binary", "multiclass"}:
ys_types = {"multiclass"}
if len(ys_types) > 1:
raise ValueError("Mix type of y not allowed, got types %s" % ys_types)
label_type = ys_types.pop()
# Check consistency for the indicator format
if (
label_type == "multilabel-indicator"
and len(
set(
check_array(y, accept_sparse=["csr", "csc", "coo"]).shape[1] for y in ys
)
)
> 1
):
raise ValueError(
"Multi-label binary indicator input with different numbers of labels"
)
# Get the unique set of labels
_unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)
if not _unique_labels:
raise ValueError("Unknown label type: %s" % repr(ys))
if is_array_api:
# array_api does not allow for mixed dtypes
unique_ys = xp.concat([_unique_labels(y) for y in ys])
return xp.unique_values(unique_ys)
ys_labels = set(chain.from_iterable((i for i in _unique_labels(y)) for y in ys))
# Check that we don't mix string type with number type
if len(set(isinstance(label, str) for label in ys_labels)) > 1:
raise ValueError("Mix of label input types (string and number)")
return xp.asarray(sorted(ys_labels))
def _is_integral_float(y):
return y.dtype.kind == "f" and np.all(y.astype(int) == y)
def is_multilabel(y):
"""Check if ``y`` is in a multilabel format.
Parameters
----------
y : ndarray of shape (n_samples,)
Target values.
Returns
-------
out : bool
Return ``True``, if ``y`` is in a multilabel format, else ```False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel([[1], [0, 2], []])
False
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
True
"""
xp, is_array_api = get_namespace(y)
if hasattr(y, "__array__") or isinstance(y, Sequence) or is_array_api:
# DeprecationWarning will be replaced by ValueError, see NEP 34
# https://numpy.org/neps/nep-0034-infer-dtype-is-object.html
with warnings.catch_warnings():
warnings.simplefilter("error", np.VisibleDeprecationWarning)
try:
y = xp.asarray(y)
except (np.VisibleDeprecationWarning, ValueError):
# dtype=object should be provided explicitly for ragged arrays,
# see NEP 34
y = xp.asarray(y, dtype=object)
if not (hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1):
return False
if issparse(y):
if isinstance(y, (dok_matrix, lil_matrix)):
y = y.tocsr()
labels = xp.unique_values(y.data)
return (
len(y.data) == 0
or (labels.size == 1 or (labels.size == 2) and (0 in labels))
and (y.dtype.kind in "biu" or _is_integral_float(labels)) # bool, int, uint
)
else:
labels = xp.unique_values(y)
return len(labels) < 3 and (
y.dtype.kind in "biu" or _is_integral_float(labels) # bool, int, uint
)
def check_classification_targets(y):
"""Ensure that target y is of a non-regression type.
Only the following target types (as defined in type_of_target) are allowed:
'binary', 'multiclass', 'multiclass-multioutput',
'multilabel-indicator', 'multilabel-sequences'
Parameters
----------
y : array-like
Target values.
"""
y_type = type_of_target(y, input_name="y")
if y_type not in [
"binary",
"multiclass",
"multiclass-multioutput",
"multilabel-indicator",
"multilabel-sequences",
]:
raise ValueError("Unknown label type: %r" % y_type)
def type_of_target(y, input_name=""):
"""Determine the type of data indicated by the target.
Note that this type is the most specific type that can be inferred.
For example:
* ``binary`` is more specific but compatible with ``multiclass``.
* ``multiclass`` of integers is more specific but compatible with
``continuous``.
* ``multilabel-indicator`` is more specific but compatible with
``multiclass-multioutput``.
Parameters
----------
y : {array-like, sparse matrix}
Target values. If a sparse matrix, `y` is expected to be a
CSR/CSC matrix.
input_name : str, default=""
The data name used to construct the error message.
.. versionadded:: 1.1.0
Returns
-------
target_type : str
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, sequence of sequences, or an array of non-sequence objects.
Examples
--------
>>> from sklearn.utils.multiclass import type_of_target
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1.0, 2.0])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target([1.0, 0.0, 3.0])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target([[1, 2]])
'multilabel-indicator'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
xp, is_array_api = get_namespace(y)
valid = (
(isinstance(y, Sequence) or issparse(y) or hasattr(y, "__array__"))
and not isinstance(y, str)
or is_array_api
)
if not valid:
raise ValueError(
"Expected array-like (array or non-string sequence), got %r" % y
)
sparse_pandas = y.__class__.__name__ in ["SparseSeries", "SparseArray"]
if sparse_pandas:
raise ValueError("y cannot be class 'SparseSeries' or 'SparseArray'")
if is_multilabel(y):
return "multilabel-indicator"
# DeprecationWarning will be replaced by ValueError, see NEP 34
# https://numpy.org/neps/nep-0034-infer-dtype-is-object.html
# We therefore catch both deprecation (NumPy < 1.24) warning and
# value error (NumPy >= 1.24).
with warnings.catch_warnings():
warnings.simplefilter("error", np.VisibleDeprecationWarning)
if not issparse(y):
try:
y = xp.asarray(y)
except (np.VisibleDeprecationWarning, ValueError):
# dtype=object should be provided explicitly for ragged arrays,
# see NEP 34
y = xp.asarray(y, dtype=object)
# The old sequence of sequences format
try:
if (
not hasattr(y[0], "__array__")
and isinstance(y[0], Sequence)
and not isinstance(y[0], str)
):
raise ValueError(
"You appear to be using a legacy multi-label data"
" representation. Sequence of sequences are no"
" longer supported; use a binary array or sparse"
" matrix instead - the MultiLabelBinarizer"
" transformer can convert to this format."
)
except IndexError:
pass
# Invalid inputs
if y.ndim not in (1, 2):
# Number of dimension greater than 2: [[[1, 2]]]
return "unknown"
if not min(y.shape):
# Empty ndarray: []/[[]]
if y.ndim == 1:
# 1-D empty array: []
return "binary" # []
# 2-D empty array: [[]]
return "unknown"
if not issparse(y) and y.dtype == object and not isinstance(y.flat[0], str):
# [obj_1] and not ["label_1"]
return "unknown"
# Check if multioutput
if y.ndim == 2 and y.shape[1] > 1:
suffix = "-multioutput" # [[1, 2], [1, 2]]
else:
suffix = "" # [1, 2, 3] or [[1], [2], [3]]
# Check float and contains non-integer float values
if y.dtype.kind == "f":
# [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]
data = y.data if issparse(y) else y
if xp.any(data != data.astype(int)):
_assert_all_finite(data, input_name=input_name)
return "continuous" + suffix
# Check multiclass
first_row = y[0] if not issparse(y) else y.getrow(0).data
if xp.unique_values(y).shape[0] > 2 or (y.ndim == 2 and len(first_row) > 1):
# [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]
return "multiclass" + suffix
else:
return "binary" # [1, 2] or [["a"], ["b"]]
def _check_partial_fit_first_call(clf, classes=None):
"""Private helper function for factorizing common classes param logic.
Estimators that implement the ``partial_fit`` API need to be provided with
the list of possible classes at the first call to partial_fit.
Subsequent calls to partial_fit should check that ``classes`` is still
consistent with a previous value of ``clf.classes_`` when provided.
This function returns True if it detects that this was the first call to
``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also
set on ``clf``.
"""
if getattr(clf, "classes_", None) is None and classes is None:
raise ValueError("classes must be passed on the first call to partial_fit.")
elif classes is not None:
if getattr(clf, "classes_", None) is not None:
if not np.array_equal(clf.classes_, unique_labels(classes)):
raise ValueError(
"`classes=%r` is not the same as on last call "
"to partial_fit, was: %r" % (classes, clf.classes_)
)
else:
# This is the first call to partial_fit
clf.classes_ = unique_labels(classes)
return True
# classes is None and clf.classes_ has already previously been set:
# nothing to do
return False
def class_distribution(y, sample_weight=None):
"""Compute class priors from multioutput-multiclass target data.
Parameters
----------
y : {array-like, sparse matrix} of size (n_samples, n_outputs)
The labels for each example.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
classes : list of size n_outputs of ndarray of size (n_classes,)
List of classes for each column.
n_classes : list of int of size n_outputs
Number of classes in each column.
class_prior : list of size n_outputs of ndarray of size (n_classes,)
Class distribution of each column.
"""
classes = []
n_classes = []
class_prior = []
n_samples, n_outputs = y.shape
if sample_weight is not None:
sample_weight = np.asarray(sample_weight)
if issparse(y):
y = y.tocsc()
y_nnz = np.diff(y.indptr)
for k in range(n_outputs):
col_nonzero = y.indices[y.indptr[k] : y.indptr[k + 1]]
# separate sample weights for zero and non-zero elements
if sample_weight is not None:
nz_samp_weight = sample_weight[col_nonzero]
zeros_samp_weight_sum = np.sum(sample_weight) - np.sum(nz_samp_weight)
else:
nz_samp_weight = None
zeros_samp_weight_sum = y.shape[0] - y_nnz[k]
classes_k, y_k = np.unique(
y.data[y.indptr[k] : y.indptr[k + 1]], return_inverse=True
)
class_prior_k = np.bincount(y_k, weights=nz_samp_weight)
# An explicit zero was found, combine its weight with the weight
# of the implicit zeros
if 0 in classes_k:
class_prior_k[classes_k == 0] += zeros_samp_weight_sum
# If an there is an implicit zero and it is not in classes and
# class_prior, make an entry for it
if 0 not in classes_k and y_nnz[k] < y.shape[0]:
classes_k = np.insert(classes_k, 0, 0)
class_prior_k = np.insert(class_prior_k, 0, zeros_samp_weight_sum)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior.append(class_prior_k / class_prior_k.sum())
else:
for k in range(n_outputs):
classes_k, y_k = np.unique(y[:, k], return_inverse=True)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior_k = np.bincount(y_k, weights=sample_weight)
class_prior.append(class_prior_k / class_prior_k.sum())
return (classes, n_classes, class_prior)
def _ovr_decision_function(predictions, confidences, n_classes):
"""Compute a continuous, tie-breaking OvR decision function from OvO.
It is important to include a continuous value, not only votes,
to make computing AUC or calibration meaningful.
Parameters
----------
predictions : array-like of shape (n_samples, n_classifiers)
Predicted classes for each binary classifier.
confidences : array-like of shape (n_samples, n_classifiers)
Decision functions or predicted probabilities for positive class
for each binary classifier.
n_classes : int
Number of classes. n_classifiers must be
``n_classes * (n_classes - 1 ) / 2``.
"""
n_samples = predictions.shape[0]
votes = np.zeros((n_samples, n_classes))
sum_of_confidences = np.zeros((n_samples, n_classes))
k = 0
for i in range(n_classes):
for j in range(i + 1, n_classes):
sum_of_confidences[:, i] -= confidences[:, k]
sum_of_confidences[:, j] += confidences[:, k]
votes[predictions[:, k] == 0, i] += 1
votes[predictions[:, k] == 1, j] += 1
k += 1
# Monotonically transform the sum_of_confidences to (-1/3, 1/3)
# and add it with votes. The monotonic transformation is
# f: x -> x / (3 * (|x| + 1)), it uses 1/3 instead of 1/2
# to ensure that we won't reach the limits and change vote order.
# The motivation is to use confidence levels as a way to break ties in
# the votes without switching any decision made based on a difference
# of 1 vote.
transformed_confidences = sum_of_confidences / (
3 * (np.abs(sum_of_confidences) + 1)
)
return votes + transformed_confidences
| bsd-3-clause | 166ddc7c1da004c4f839965af38aa5c0 | 32.932821 | 88 | 0.58363 | 3.708622 | false | false | false | false |
scikit-learn/scikit-learn | examples/compose/plot_digits_pipe.py | 11 | 2512 | # -*- coding: utf-8 -*-
"""
=========================================================
Pipelining: chaining a PCA and a logistic regression
=========================================================
The PCA does an unsupervised dimensionality reduction, while the logistic
regression does the prediction.
We use a GridSearchCV to set the dimensionality of the PCA
"""
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import StandardScaler
# Define a pipeline to search for the best combination of PCA truncation
# and classifier regularization.
pca = PCA()
# Define a Standard Scaler to normalize inputs
scaler = StandardScaler()
# set the tolerance to a large value to make the example faster
logistic = LogisticRegression(max_iter=10000, tol=0.1)
pipe = Pipeline(steps=[("scaler", scaler), ("pca", pca), ("logistic", logistic)])
X_digits, y_digits = datasets.load_digits(return_X_y=True)
# Parameters of pipelines can be set using '__' separated parameter names:
param_grid = {
"pca__n_components": [5, 15, 30, 45, 60],
"logistic__C": np.logspace(-4, 4, 4),
}
search = GridSearchCV(pipe, param_grid, n_jobs=2)
search.fit(X_digits, y_digits)
print("Best parameter (CV score=%0.3f):" % search.best_score_)
print(search.best_params_)
# Plot the PCA spectrum
pca.fit(X_digits)
fig, (ax0, ax1) = plt.subplots(nrows=2, sharex=True, figsize=(6, 6))
ax0.plot(
np.arange(1, pca.n_components_ + 1), pca.explained_variance_ratio_, "+", linewidth=2
)
ax0.set_ylabel("PCA explained variance ratio")
ax0.axvline(
search.best_estimator_.named_steps["pca"].n_components,
linestyle=":",
label="n_components chosen",
)
ax0.legend(prop=dict(size=12))
# For each number of components, find the best classifier results
results = pd.DataFrame(search.cv_results_)
components_col = "param_pca__n_components"
best_clfs = results.groupby(components_col).apply(
lambda g: g.nlargest(1, "mean_test_score")
)
best_clfs.plot(
x=components_col, y="mean_test_score", yerr="std_test_score", legend=False, ax=ax1
)
ax1.set_ylabel("Classification accuracy (val)")
ax1.set_xlabel("n_components")
plt.xlim(-1, 70)
plt.tight_layout()
plt.show()
| bsd-3-clause | c1f940ce832b1fbb971d89d233f8c041 | 29.621951 | 88 | 0.702111 | 3.425648 | false | false | false | false |
scikit-learn/scikit-learn | examples/cluster/plot_affinity_propagation.py | 8 | 2180 | """
=================================================
Demo of affinity propagation clustering algorithm
=================================================
Reference:
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
"""
import numpy as np
from sklearn.cluster import AffinityPropagation
from sklearn import metrics
from sklearn.datasets import make_blobs
# %%
# Generate sample data
# --------------------
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(
n_samples=300, centers=centers, cluster_std=0.5, random_state=0
)
# %%
# Compute Affinity Propagation
# ----------------------------
af = AffinityPropagation(preference=-50, random_state=0).fit(X)
cluster_centers_indices = af.cluster_centers_indices_
labels = af.labels_
n_clusters_ = len(cluster_centers_indices)
print("Estimated number of clusters: %d" % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f" % metrics.adjusted_rand_score(labels_true, labels))
print(
"Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels)
)
print(
"Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels, metric="sqeuclidean")
)
# %%
# Plot result
# -----------
import matplotlib.pyplot as plt
plt.close("all")
plt.figure(1)
plt.clf()
colors = plt.cycler("color", plt.cm.viridis(np.linspace(0, 1, 4)))
for k, col in zip(range(n_clusters_), colors):
class_members = labels == k
cluster_center = X[cluster_centers_indices[k]]
plt.scatter(
X[class_members, 0], X[class_members, 1], color=col["color"], marker="."
)
plt.scatter(
cluster_center[0], cluster_center[1], s=14, color=col["color"], marker="o"
)
for x in X[class_members]:
plt.plot(
[cluster_center[0], x[0]], [cluster_center[1], x[1]], color=col["color"]
)
plt.title("Estimated number of clusters: %d" % n_clusters_)
plt.show()
| bsd-3-clause | f8ca295f3fb890e260ccd3fe44cdcc9e | 28.459459 | 86 | 0.634862 | 3.205882 | false | false | false | false |
scikit-learn/scikit-learn | sklearn/neighbors/_kde.py | 9 | 12346 | """
Kernel Density Estimation
-------------------------
"""
# Author: Jake Vanderplas <jakevdp@cs.washington.edu>
import itertools
from numbers import Integral, Real
import numpy as np
from scipy.special import gammainc
from ..base import BaseEstimator
from ..neighbors._base import VALID_METRICS
from ..utils import check_random_state
from ..utils.validation import _check_sample_weight, check_is_fitted
from ..utils._param_validation import Interval, StrOptions
from ..utils.extmath import row_norms
from ._ball_tree import BallTree, DTYPE
from ._kd_tree import KDTree
VALID_KERNELS = [
"gaussian",
"tophat",
"epanechnikov",
"exponential",
"linear",
"cosine",
]
TREE_DICT = {"ball_tree": BallTree, "kd_tree": KDTree}
# TODO: implement a brute force version for testing purposes
# TODO: create a density estimation base class?
class KernelDensity(BaseEstimator):
"""Kernel Density Estimation.
Read more in the :ref:`User Guide <kernel_density>`.
Parameters
----------
bandwidth : float or {"scott", "silverman"}, default=1.0
The bandwidth of the kernel. If bandwidth is a float, it defines the
bandwidth of the kernel. If bandwidth is a string, one of the estimation
methods is implemented.
algorithm : {'kd_tree', 'ball_tree', 'auto'}, default='auto'
The tree algorithm to use.
kernel : {'gaussian', 'tophat', 'epanechnikov', 'exponential', 'linear', \
'cosine'}, default='gaussian'
The kernel to use.
metric : str, default='euclidean'
Metric to use for distance computation. See the
documentation of `scipy.spatial.distance
<https://docs.scipy.org/doc/scipy/reference/spatial.distance.html>`_ and
the metrics listed in
:class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric
values.
Not all metrics are valid with all algorithms: refer to the
documentation of :class:`BallTree` and :class:`KDTree`. Note that the
normalization of the density output is correct only for the Euclidean
distance metric.
atol : float, default=0
The desired absolute tolerance of the result. A larger tolerance will
generally lead to faster execution.
rtol : float, default=0
The desired relative tolerance of the result. A larger tolerance will
generally lead to faster execution.
breadth_first : bool, default=True
If true (default), use a breadth-first approach to the problem.
Otherwise use a depth-first approach.
leaf_size : int, default=40
Specify the leaf size of the underlying tree. See :class:`BallTree`
or :class:`KDTree` for details.
metric_params : dict, default=None
Additional parameters to be passed to the tree for use with the
metric. For more information, see the documentation of
:class:`BallTree` or :class:`KDTree`.
Attributes
----------
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
tree_ : ``BinaryTree`` instance
The tree algorithm for fast generalized N-point problems.
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
bandwidth_ : float
Value of the bandwidth, given directly by the bandwidth parameter or
estimated using the 'scott' or 'silverman' method.
.. versionadded:: 1.0
See Also
--------
sklearn.neighbors.KDTree : K-dimensional tree for fast generalized N-point
problems.
sklearn.neighbors.BallTree : Ball tree for fast generalized N-point
problems.
Examples
--------
Compute a gaussian kernel density estimate with a fixed bandwidth.
>>> from sklearn.neighbors import KernelDensity
>>> import numpy as np
>>> rng = np.random.RandomState(42)
>>> X = rng.random_sample((100, 3))
>>> kde = KernelDensity(kernel='gaussian', bandwidth=0.5).fit(X)
>>> log_density = kde.score_samples(X[:3])
>>> log_density
array([-1.52955942, -1.51462041, -1.60244657])
"""
_parameter_constraints: dict = {
"bandwidth": [
Interval(Real, 0, None, closed="neither"),
StrOptions({"scott", "silverman"}),
],
"algorithm": [StrOptions(set(TREE_DICT.keys()) | {"auto"})],
"kernel": [StrOptions(set(VALID_KERNELS))],
"metric": [
StrOptions(
set(itertools.chain(*[VALID_METRICS[alg] for alg in TREE_DICT.keys()]))
)
],
"atol": [Interval(Real, 0, None, closed="left")],
"rtol": [Interval(Real, 0, None, closed="left")],
"breadth_first": ["boolean"],
"leaf_size": [Interval(Integral, 1, None, closed="left")],
"metric_params": [None, dict],
}
def __init__(
self,
*,
bandwidth=1.0,
algorithm="auto",
kernel="gaussian",
metric="euclidean",
atol=0,
rtol=0,
breadth_first=True,
leaf_size=40,
metric_params=None,
):
self.algorithm = algorithm
self.bandwidth = bandwidth
self.kernel = kernel
self.metric = metric
self.atol = atol
self.rtol = rtol
self.breadth_first = breadth_first
self.leaf_size = leaf_size
self.metric_params = metric_params
def _choose_algorithm(self, algorithm, metric):
# given the algorithm string + metric string, choose the optimal
# algorithm to compute the result.
if algorithm == "auto":
# use KD Tree if possible
if metric in KDTree.valid_metrics:
return "kd_tree"
elif metric in BallTree.valid_metrics:
return "ball_tree"
else: # kd_tree or ball_tree
if metric not in TREE_DICT[algorithm].valid_metrics:
raise ValueError(
"invalid metric for {0}: '{1}'".format(TREE_DICT[algorithm], metric)
)
return algorithm
def fit(self, X, y=None, sample_weight=None):
"""Fit the Kernel Density model on the data.
Parameters
----------
X : array-like of shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
y : None
Ignored. This parameter exists only for compatibility with
:class:`~sklearn.pipeline.Pipeline`.
sample_weight : array-like of shape (n_samples,), default=None
List of sample weights attached to the data X.
.. versionadded:: 0.20
Returns
-------
self : object
Returns the instance itself.
"""
self._validate_params()
algorithm = self._choose_algorithm(self.algorithm, self.metric)
if isinstance(self.bandwidth, str):
if self.bandwidth == "scott":
self.bandwidth_ = X.shape[0] ** (-1 / (X.shape[1] + 4))
elif self.bandwidth == "silverman":
self.bandwidth_ = (X.shape[0] * (X.shape[1] + 2) / 4) ** (
-1 / (X.shape[1] + 4)
)
else:
self.bandwidth_ = self.bandwidth
X = self._validate_data(X, order="C", dtype=DTYPE)
if sample_weight is not None:
sample_weight = _check_sample_weight(
sample_weight, X, DTYPE, only_non_negative=True
)
kwargs = self.metric_params
if kwargs is None:
kwargs = {}
self.tree_ = TREE_DICT[algorithm](
X,
metric=self.metric,
leaf_size=self.leaf_size,
sample_weight=sample_weight,
**kwargs,
)
return self
def score_samples(self, X):
"""Compute the log-likelihood of each sample under the model.
Parameters
----------
X : array-like of shape (n_samples, n_features)
An array of points to query. Last dimension should match dimension
of training data (n_features).
Returns
-------
density : ndarray of shape (n_samples,)
Log-likelihood of each sample in `X`. These are normalized to be
probability densities, so values will be low for high-dimensional
data.
"""
check_is_fitted(self)
# The returned density is normalized to the number of points.
# For it to be a probability, we must scale it. For this reason
# we'll also scale atol.
X = self._validate_data(X, order="C", dtype=DTYPE, reset=False)
if self.tree_.sample_weight is None:
N = self.tree_.data.shape[0]
else:
N = self.tree_.sum_weight
atol_N = self.atol * N
log_density = self.tree_.kernel_density(
X,
h=self.bandwidth_,
kernel=self.kernel,
atol=atol_N,
rtol=self.rtol,
breadth_first=self.breadth_first,
return_log=True,
)
log_density -= np.log(N)
return log_density
def score(self, X, y=None):
"""Compute the total log-likelihood under the model.
Parameters
----------
X : array-like of shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
y : None
Ignored. This parameter exists only for compatibility with
:class:`~sklearn.pipeline.Pipeline`.
Returns
-------
logprob : float
Total log-likelihood of the data in X. This is normalized to be a
probability density, so the value will be low for high-dimensional
data.
"""
return np.sum(self.score_samples(X))
def sample(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Currently, this is implemented only for gaussian and tophat kernels.
Parameters
----------
n_samples : int, default=1
Number of samples to generate.
random_state : int, RandomState instance or None, default=None
Determines random number generation used to generate
random samples. Pass an int for reproducible results
across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : array-like of shape (n_samples, n_features)
List of samples.
"""
check_is_fitted(self)
# TODO: implement sampling for other valid kernel shapes
if self.kernel not in ["gaussian", "tophat"]:
raise NotImplementedError()
data = np.asarray(self.tree_.data)
rng = check_random_state(random_state)
u = rng.uniform(0, 1, size=n_samples)
if self.tree_.sample_weight is None:
i = (u * data.shape[0]).astype(np.int64)
else:
cumsum_weight = np.cumsum(np.asarray(self.tree_.sample_weight))
sum_weight = cumsum_weight[-1]
i = np.searchsorted(cumsum_weight, u * sum_weight)
if self.kernel == "gaussian":
return np.atleast_2d(rng.normal(data[i], self.bandwidth_))
elif self.kernel == "tophat":
# we first draw points from a d-dimensional normal distribution,
# then use an incomplete gamma function to map them to a uniform
# d-dimensional tophat distribution.
dim = data.shape[1]
X = rng.normal(size=(n_samples, dim))
s_sq = row_norms(X, squared=True)
correction = (
gammainc(0.5 * dim, 0.5 * s_sq) ** (1.0 / dim)
* self.bandwidth_
/ np.sqrt(s_sq)
)
return data[i] + X * correction[:, np.newaxis]
def _more_tags(self):
return {
"_xfail_checks": {
"check_sample_weights_invariance": (
"sample_weight must have positive values"
),
}
}
| bsd-3-clause | 439680ba9530c1b38f1c6a489db05502 | 32.917582 | 88 | 0.579864 | 4.207907 | false | false | false | false |
scikit-learn/scikit-learn | sklearn/ensemble/_iforest.py | 8 | 19672 | # Authors: Nicolas Goix <nicolas.goix@telecom-paristech.fr>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# License: BSD 3 clause
import numbers
import numpy as np
from scipy.sparse import issparse
from warnings import warn
from numbers import Integral, Real
from ..tree import ExtraTreeRegressor
from ..tree._tree import DTYPE as tree_dtype
from ..utils import (
check_random_state,
check_array,
gen_batches,
get_chunk_n_rows,
)
from ..utils._param_validation import Interval, StrOptions
from ..utils.validation import check_is_fitted, _num_samples
from ..base import OutlierMixin
from ._bagging import BaseBagging
__all__ = ["IsolationForest"]
class IsolationForest(OutlierMixin, BaseBagging):
"""
Isolation Forest Algorithm.
Return the anomaly score of each sample using the IsolationForest algorithm
The IsolationForest 'isolates' observations by randomly selecting a feature
and then randomly selecting a split value between the maximum and minimum
values of the selected feature.
Since recursive partitioning can be represented by a tree structure, the
number of splittings required to isolate a sample is equivalent to the path
length from the root node to the terminating node.
This path length, averaged over a forest of such random trees, is a
measure of normality and our decision function.
Random partitioning produces noticeably shorter paths for anomalies.
Hence, when a forest of random trees collectively produce shorter path
lengths for particular samples, they are highly likely to be anomalies.
Read more in the :ref:`User Guide <isolation_forest>`.
.. versionadded:: 0.18
Parameters
----------
n_estimators : int, default=100
The number of base estimators in the ensemble.
max_samples : "auto", int or float, default="auto"
The number of samples to draw from X to train each base estimator.
- If int, then draw `max_samples` samples.
- If float, then draw `max_samples * X.shape[0]` samples.
- If "auto", then `max_samples=min(256, n_samples)`.
If max_samples is larger than the number of samples provided,
all samples will be used for all trees (no sampling).
contamination : 'auto' or float, default='auto'
The amount of contamination of the data set, i.e. the proportion
of outliers in the data set. Used when fitting to define the threshold
on the scores of the samples.
- If 'auto', the threshold is determined as in the
original paper.
- If float, the contamination should be in the range (0, 0.5].
.. versionchanged:: 0.22
The default value of ``contamination`` changed from 0.1
to ``'auto'``.
max_features : int or float, default=1.0
The number of features to draw from X to train each base estimator.
- If int, then draw `max_features` features.
- If float, then draw `max(1, int(max_features * n_features_in_))` features.
Note: using a float number less than 1.0 or integer less than number of
features will enable feature subsampling and leads to a longerr runtime.
bootstrap : bool, default=False
If True, individual trees are fit on random subsets of the training
data sampled with replacement. If False, sampling without replacement
is performed.
n_jobs : int, default=None
The number of jobs to run in parallel for both :meth:`fit` and
:meth:`predict`. ``None`` means 1 unless in a
:obj:`joblib.parallel_backend` context. ``-1`` means using all
processors. See :term:`Glossary <n_jobs>` for more details.
random_state : int, RandomState instance or None, default=None
Controls the pseudo-randomness of the selection of the feature
and split values for each branching step and each tree in the forest.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
verbose : int, default=0
Controls the verbosity of the tree building process.
warm_start : bool, default=False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest. See :term:`the Glossary <warm_start>`.
.. versionadded:: 0.21
Attributes
----------
estimator_ : :class:`~sklearn.tree.ExtraTreeRegressor` instance
The child estimator template used to create the collection of
fitted sub-estimators.
.. versionadded:: 1.2
`base_estimator_` was renamed to `estimator_`.
base_estimator_ : ExtraTreeRegressor instance
The child estimator template used to create the collection of
fitted sub-estimators.
.. deprecated:: 1.2
`base_estimator_` is deprecated and will be removed in 1.4.
Use `estimator_` instead.
estimators_ : list of ExtraTreeRegressor instances
The collection of fitted sub-estimators.
estimators_features_ : list of ndarray
The subset of drawn features for each base estimator.
estimators_samples_ : list of ndarray
The subset of drawn samples (i.e., the in-bag samples) for each base
estimator.
max_samples_ : int
The actual number of samples.
offset_ : float
Offset used to define the decision function from the raw scores. We
have the relation: ``decision_function = score_samples - offset_``.
``offset_`` is defined as follows. When the contamination parameter is
set to "auto", the offset is equal to -0.5 as the scores of inliers are
close to 0 and the scores of outliers are close to -1. When a
contamination parameter different than "auto" is provided, the offset
is defined in such a way we obtain the expected number of outliers
(samples with decision function < 0) in training.
.. versionadded:: 0.20
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
sklearn.covariance.EllipticEnvelope : An object for detecting outliers in a
Gaussian distributed dataset.
sklearn.svm.OneClassSVM : Unsupervised Outlier Detection.
Estimate the support of a high-dimensional distribution.
The implementation is based on libsvm.
sklearn.neighbors.LocalOutlierFactor : Unsupervised Outlier Detection
using Local Outlier Factor (LOF).
Notes
-----
The implementation is based on an ensemble of ExtraTreeRegressor. The
maximum depth of each tree is set to ``ceil(log_2(n))`` where
:math:`n` is the number of samples used to build the tree
(see (Liu et al., 2008) for more details).
References
----------
.. [1] Liu, Fei Tony, Ting, Kai Ming and Zhou, Zhi-Hua. "Isolation forest."
Data Mining, 2008. ICDM'08. Eighth IEEE International Conference on.
.. [2] Liu, Fei Tony, Ting, Kai Ming and Zhou, Zhi-Hua. "Isolation-based
anomaly detection." ACM Transactions on Knowledge Discovery from
Data (TKDD) 6.1 (2012): 3.
Examples
--------
>>> from sklearn.ensemble import IsolationForest
>>> X = [[-1.1], [0.3], [0.5], [100]]
>>> clf = IsolationForest(random_state=0).fit(X)
>>> clf.predict([[0.1], [0], [90]])
array([ 1, 1, -1])
"""
_parameter_constraints: dict = {
"n_estimators": [Interval(Integral, 1, None, closed="left")],
"max_samples": [
StrOptions({"auto"}),
Interval(Integral, 1, None, closed="left"),
Interval(Real, 0, 1, closed="right"),
],
"contamination": [
StrOptions({"auto"}),
Interval(Real, 0, 0.5, closed="right"),
],
"max_features": [
Integral,
Interval(Real, 0, 1, closed="right"),
],
"bootstrap": ["boolean"],
"n_jobs": [Integral, None],
"random_state": ["random_state"],
"verbose": ["verbose"],
"warm_start": ["boolean"],
}
def __init__(
self,
*,
n_estimators=100,
max_samples="auto",
contamination="auto",
max_features=1.0,
bootstrap=False,
n_jobs=None,
random_state=None,
verbose=0,
warm_start=False,
):
super().__init__(
estimator=ExtraTreeRegressor(
max_features=1, splitter="random", random_state=random_state
),
# here above max_features has no links with self.max_features
bootstrap=bootstrap,
bootstrap_features=False,
n_estimators=n_estimators,
max_samples=max_samples,
max_features=max_features,
warm_start=warm_start,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
)
self.contamination = contamination
def _set_oob_score(self, X, y):
raise NotImplementedError("OOB score not supported by iforest")
def _parallel_args(self):
# ExtraTreeRegressor releases the GIL, so it's more efficient to use
# a thread-based backend rather than a process-based backend so as
# to avoid suffering from communication overhead and extra memory
# copies.
return {"prefer": "threads"}
def fit(self, X, y=None, sample_weight=None):
"""
Fit estimator.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csc_matrix`` for maximum efficiency.
y : Ignored
Not used, present for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Fitted estimator.
"""
self._validate_params()
X = self._validate_data(X, accept_sparse=["csc"], dtype=tree_dtype)
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
# ensure that max_sample is in [1, n_samples]:
n_samples = X.shape[0]
if isinstance(self.max_samples, str) and self.max_samples == "auto":
max_samples = min(256, n_samples)
elif isinstance(self.max_samples, numbers.Integral):
if self.max_samples > n_samples:
warn(
"max_samples (%s) is greater than the "
"total number of samples (%s). max_samples "
"will be set to n_samples for estimation."
% (self.max_samples, n_samples)
)
max_samples = n_samples
else:
max_samples = self.max_samples
else: # max_samples is float
max_samples = int(self.max_samples * X.shape[0])
self.max_samples_ = max_samples
max_depth = int(np.ceil(np.log2(max(max_samples, 2))))
super()._fit(
X,
y,
max_samples,
max_depth=max_depth,
sample_weight=sample_weight,
check_input=False,
)
if self.contamination == "auto":
# 0.5 plays a special role as described in the original paper.
# we take the opposite as we consider the opposite of their score.
self.offset_ = -0.5
return self
# else, define offset_ wrt contamination parameter
self.offset_ = np.percentile(self.score_samples(X), 100.0 * self.contamination)
return self
def predict(self, X):
"""
Predict if a particular sample is an outlier or not.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
is_inlier : ndarray of shape (n_samples,)
For each observation, tells whether or not (+1 or -1) it should
be considered as an inlier according to the fitted model.
"""
check_is_fitted(self)
decision_func = self.decision_function(X)
is_inlier = np.ones_like(decision_func, dtype=int)
is_inlier[decision_func < 0] = -1
return is_inlier
def decision_function(self, X):
"""
Average anomaly score of X of the base classifiers.
The anomaly score of an input sample is computed as
the mean anomaly score of the trees in the forest.
The measure of normality of an observation given a tree is the depth
of the leaf containing this observation, which is equivalent to
the number of splittings required to isolate this point. In case of
several observations n_left in the leaf, the average path length of
a n_left samples isolation tree is added.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
scores : ndarray of shape (n_samples,)
The anomaly score of the input samples.
The lower, the more abnormal. Negative scores represent outliers,
positive scores represent inliers.
"""
# We subtract self.offset_ to make 0 be the threshold value for being
# an outlier:
return self.score_samples(X) - self.offset_
def score_samples(self, X):
"""
Opposite of the anomaly score defined in the original paper.
The anomaly score of an input sample is computed as
the mean anomaly score of the trees in the forest.
The measure of normality of an observation given a tree is the depth
of the leaf containing this observation, which is equivalent to
the number of splittings required to isolate this point. In case of
several observations n_left in the leaf, the average path length of
a n_left samples isolation tree is added.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples.
Returns
-------
scores : ndarray of shape (n_samples,)
The anomaly score of the input samples.
The lower, the more abnormal.
"""
# code structure from ForestClassifier/predict_proba
check_is_fitted(self)
# Check data
X = self._validate_data(X, accept_sparse="csr", reset=False)
# Take the opposite of the scores as bigger is better (here less
# abnormal)
return -self._compute_chunked_score_samples(X)
def _compute_chunked_score_samples(self, X):
n_samples = _num_samples(X)
if self._max_features == X.shape[1]:
subsample_features = False
else:
subsample_features = True
# We get as many rows as possible within our working_memory budget
# (defined by sklearn.get_config()['working_memory']) to store
# self._max_features in each row during computation.
#
# Note:
# - this will get at least 1 row, even if 1 row of score will
# exceed working_memory.
# - this does only account for temporary memory usage while loading
# the data needed to compute the scores -- the returned scores
# themselves are 1D.
chunk_n_rows = get_chunk_n_rows(
row_bytes=16 * self._max_features, max_n_rows=n_samples
)
slices = gen_batches(n_samples, chunk_n_rows)
scores = np.zeros(n_samples, order="f")
for sl in slices:
# compute score on the slices of test samples:
scores[sl] = self._compute_score_samples(X[sl], subsample_features)
return scores
def _compute_score_samples(self, X, subsample_features):
"""
Compute the score of each samples in X going through the extra trees.
Parameters
----------
X : array-like or sparse matrix
Data matrix.
subsample_features : bool
Whether features should be subsampled.
"""
n_samples = X.shape[0]
depths = np.zeros(n_samples, order="f")
for tree, features in zip(self.estimators_, self.estimators_features_):
X_subset = X[:, features] if subsample_features else X
leaves_index = tree.apply(X_subset)
node_indicator = tree.decision_path(X_subset)
n_samples_leaf = tree.tree_.n_node_samples[leaves_index]
depths += (
np.ravel(node_indicator.sum(axis=1))
+ _average_path_length(n_samples_leaf)
- 1.0
)
denominator = len(self.estimators_) * _average_path_length([self.max_samples_])
scores = 2 ** (
# For a single training sample, denominator and depth are 0.
# Therefore, we set the score manually to 1.
-np.divide(
depths, denominator, out=np.ones_like(depths), where=denominator != 0
)
)
return scores
def _more_tags(self):
return {
"_xfail_checks": {
"check_sample_weights_invariance": (
"zero sample_weight is not equivalent to removing samples"
),
}
}
def _average_path_length(n_samples_leaf):
"""
The average path length in a n_samples iTree, which is equal to
the average path length of an unsuccessful BST search since the
latter has the same structure as an isolation tree.
Parameters
----------
n_samples_leaf : array-like of shape (n_samples,)
The number of training samples in each test sample leaf, for
each estimators.
Returns
-------
average_path_length : ndarray of shape (n_samples,)
"""
n_samples_leaf = check_array(n_samples_leaf, ensure_2d=False)
n_samples_leaf_shape = n_samples_leaf.shape
n_samples_leaf = n_samples_leaf.reshape((1, -1))
average_path_length = np.zeros(n_samples_leaf.shape)
mask_1 = n_samples_leaf <= 1
mask_2 = n_samples_leaf == 2
not_mask = ~np.logical_or(mask_1, mask_2)
average_path_length[mask_1] = 0.0
average_path_length[mask_2] = 1.0
average_path_length[not_mask] = (
2.0 * (np.log(n_samples_leaf[not_mask] - 1.0) + np.euler_gamma)
- 2.0 * (n_samples_leaf[not_mask] - 1.0) / n_samples_leaf[not_mask]
)
return average_path_length.reshape(n_samples_leaf_shape)
| bsd-3-clause | ad69eea3864c5ab2a7735a91c268c62a | 35.095413 | 88 | 0.609953 | 4.201623 | false | false | false | false |
kjung/scikit-learn | sklearn/metrics/tests/test_pairwise.py | 8 | 25509 | import numpy as np
from numpy import linalg
from scipy.sparse import dok_matrix, csr_matrix, issparse
from scipy.spatial.distance import cosine, cityblock, minkowski, wminkowski
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.externals.six import iteritems
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.metrics.pairwise import manhattan_distances
from sklearn.metrics.pairwise import linear_kernel
from sklearn.metrics.pairwise import chi2_kernel, additive_chi2_kernel
from sklearn.metrics.pairwise import polynomial_kernel
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics.pairwise import laplacian_kernel
from sklearn.metrics.pairwise import sigmoid_kernel
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import cosine_distances
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_distances_argmin_min
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.metrics.pairwise import PAIRWISE_KERNEL_FUNCTIONS
from sklearn.metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from sklearn.metrics.pairwise import PAIRED_DISTANCES
from sklearn.metrics.pairwise import check_pairwise_arrays
from sklearn.metrics.pairwise import check_paired_arrays
from sklearn.metrics.pairwise import paired_distances
from sklearn.metrics.pairwise import paired_euclidean_distances
from sklearn.metrics.pairwise import paired_manhattan_distances
from sklearn.preprocessing import normalize
def test_pairwise_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
S = pairwise_distances(X, metric="euclidean")
S2 = euclidean_distances(X)
assert_array_almost_equal(S, S2)
# Euclidean distance, with Y != X.
Y = rng.random_sample((2, 4))
S = pairwise_distances(X, Y, metric="euclidean")
S2 = euclidean_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
S2 = pairwise_distances(X_tuples, Y_tuples, metric="euclidean")
assert_array_almost_equal(S, S2)
# "cityblock" uses sklearn metric, cityblock (function) is scipy.spatial.
S = pairwise_distances(X, metric="cityblock")
S2 = pairwise_distances(X, metric=cityblock)
assert_equal(S.shape[0], S.shape[1])
assert_equal(S.shape[0], X.shape[0])
assert_array_almost_equal(S, S2)
# The manhattan metric should be equivalent to cityblock.
S = pairwise_distances(X, Y, metric="manhattan")
S2 = pairwise_distances(X, Y, metric=cityblock)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Low-level function for manhattan can divide in blocks to avoid
# using too much memory during the broadcasting
S3 = manhattan_distances(X, Y, size_threshold=10)
assert_array_almost_equal(S, S3)
# Test cosine as a string metric versus cosine callable
# "cosine" uses sklearn metric, cosine (function) is scipy.spatial
S = pairwise_distances(X, Y, metric="cosine")
S2 = pairwise_distances(X, Y, metric=cosine)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Test with sparse X and Y,
# currently only supported for Euclidean, L1 and cosine.
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
S = pairwise_distances(X_sparse, Y_sparse, metric="euclidean")
S2 = euclidean_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse, metric="cosine")
S2 = cosine_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse.tocsc(), metric="manhattan")
S2 = manhattan_distances(X_sparse.tobsr(), Y_sparse.tocoo())
assert_array_almost_equal(S, S2)
S2 = manhattan_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with scipy.spatial.distance metric, with a kwd
kwds = {"p": 2.0}
S = pairwise_distances(X, Y, metric="minkowski", **kwds)
S2 = pairwise_distances(X, Y, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# same with Y = None
kwds = {"p": 2.0}
S = pairwise_distances(X, metric="minkowski", **kwds)
S2 = pairwise_distances(X, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# Test that scipy distance metrics throw an error if sparse matrix given
assert_raises(TypeError, pairwise_distances, X_sparse, metric="minkowski")
assert_raises(TypeError, pairwise_distances, X, Y_sparse,
metric="minkowski")
# Test that a value error is raised if the metric is unknown
assert_raises(ValueError, pairwise_distances, X, Y, metric="blah")
def test_pairwise_precomputed():
for func in [pairwise_distances, pairwise_kernels]:
# Test correct shape
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), metric='precomputed')
# with two args
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 4)),
metric='precomputed')
# even if shape[1] agrees (although thus second arg is spurious)
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 3)),
metric='precomputed')
# Test not copied (if appropriate dtype)
S = np.zeros((5, 5))
S2 = func(S, metric="precomputed")
assert_true(S is S2)
# with two args
S = np.zeros((5, 3))
S2 = func(S, np.zeros((3, 3)), metric="precomputed")
assert_true(S is S2)
# Test always returns float dtype
S = func(np.array([[1]], dtype='int'), metric='precomputed')
assert_equal('f', S.dtype.kind)
# Test converts list to array-like
S = func([[1]], metric='precomputed')
assert_true(isinstance(S, np.ndarray))
def check_pairwise_parallel(func, metric, kwds):
rng = np.random.RandomState(0)
for make_data in (np.array, csr_matrix):
X = make_data(rng.random_sample((5, 4)))
Y = make_data(rng.random_sample((3, 4)))
try:
S = func(X, metric=metric, n_jobs=1, **kwds)
except (TypeError, ValueError) as exc:
# Not all metrics support sparse input
# ValueError may be triggered by bad callable
if make_data is csr_matrix:
assert_raises(type(exc), func, X, metric=metric,
n_jobs=2, **kwds)
continue
else:
raise
S2 = func(X, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
S = func(X, Y, metric=metric, n_jobs=1, **kwds)
S2 = func(X, Y, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
def test_pairwise_parallel():
wminkowski_kwds = {'w': np.arange(1, 5).astype('double'), 'p': 1}
metrics = [(pairwise_distances, 'euclidean', {}),
(pairwise_distances, wminkowski, wminkowski_kwds),
(pairwise_distances, 'wminkowski', wminkowski_kwds),
(pairwise_kernels, 'polynomial', {'degree': 1}),
(pairwise_kernels, callable_rbf_kernel, {'gamma': .1}),
]
for func, metric, kwds in metrics:
yield check_pairwise_parallel, func, metric, kwds
def test_pairwise_callable_nonstrict_metric():
# paired_distances should allow callable metric where metric(x, x) != 0
# Knowing that the callable is a strict metric would allow the diagonal to
# be left uncalculated and set to 0.
assert_equal(pairwise_distances([[1]], metric=lambda x, y: 5)[0, 0], 5)
def callable_rbf_kernel(x, y, **kwds):
# Callable version of pairwise.rbf_kernel.
K = rbf_kernel(np.atleast_2d(x), np.atleast_2d(y), **kwds)
return K
def test_pairwise_kernels(): # Test the pairwise_kernels helper function.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
# Test with all metrics that should be in PAIRWISE_KERNEL_FUNCTIONS.
test_metrics = ["rbf", "laplacian", "sigmoid", "polynomial", "linear",
"chi2", "additive_chi2"]
for metric in test_metrics:
function = PAIRWISE_KERNEL_FUNCTIONS[metric]
# Test with Y=None
K1 = pairwise_kernels(X, metric=metric)
K2 = function(X)
assert_array_almost_equal(K1, K2)
# Test with Y=Y
K1 = pairwise_kernels(X, Y=Y, metric=metric)
K2 = function(X, Y=Y)
assert_array_almost_equal(K1, K2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
K2 = pairwise_kernels(X_tuples, Y_tuples, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with sparse X and Y
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
if metric in ["chi2", "additive_chi2"]:
# these don't support sparse matrices yet
assert_raises(ValueError, pairwise_kernels,
X_sparse, Y=Y_sparse, metric=metric)
continue
K1 = pairwise_kernels(X_sparse, Y=Y_sparse, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with a callable function, with given keywords.
metric = callable_rbf_kernel
kwds = {'gamma': 0.1}
K1 = pairwise_kernels(X, Y=Y, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=Y, **kwds)
assert_array_almost_equal(K1, K2)
# callable function, X=Y
K1 = pairwise_kernels(X, Y=X, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=X, **kwds)
assert_array_almost_equal(K1, K2)
def test_pairwise_kernels_filter_param():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
K = rbf_kernel(X, Y, gamma=0.1)
params = {"gamma": 0.1, "blabla": ":)"}
K2 = pairwise_kernels(X, Y, metric="rbf", filter_params=True, **params)
assert_array_almost_equal(K, K2)
assert_raises(TypeError, pairwise_kernels, X, Y, "rbf", **params)
def test_paired_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
# Euclidean distance, with Y != X.
Y = rng.random_sample((5, 4))
for metric, func in iteritems(PAIRED_DISTANCES):
S = paired_distances(X, Y, metric=metric)
S2 = func(X, Y)
assert_array_almost_equal(S, S2)
S3 = func(csr_matrix(X), csr_matrix(Y))
assert_array_almost_equal(S, S3)
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
# Check the the pairwise_distances implementation
# gives the same value
distances = PAIRWISE_DISTANCE_FUNCTIONS[metric](X, Y)
distances = np.diag(distances)
assert_array_almost_equal(distances, S)
# Check the callable implementation
S = paired_distances(X, Y, metric='manhattan')
S2 = paired_distances(X, Y, metric=lambda x, y: np.abs(x - y).sum(axis=0))
assert_array_almost_equal(S, S2)
# Test that a value error is raised when the lengths of X and Y should not
# differ
Y = rng.random_sample((3, 4))
assert_raises(ValueError, paired_distances, X, Y)
def test_pairwise_distances_argmin_min():
# Check pairwise minimum distances computation for any metric
X = [[0], [1]]
Y = [[-1], [2]]
Xsp = dok_matrix(X)
Ysp = csr_matrix(Y, dtype=np.float32)
# euclidean metric
D, E = pairwise_distances_argmin_min(X, Y, metric="euclidean")
D2 = pairwise_distances_argmin(X, Y, metric="euclidean")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# sparse matrix case
Dsp, Esp = pairwise_distances_argmin_min(Xsp, Ysp, metric="euclidean")
assert_array_equal(Dsp, D)
assert_array_equal(Esp, E)
# We don't want np.matrix here
assert_equal(type(Dsp), np.ndarray)
assert_equal(type(Esp), np.ndarray)
# Non-euclidean sklearn metric
D, E = pairwise_distances_argmin_min(X, Y, metric="manhattan")
D2 = pairwise_distances_argmin(X, Y, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(E, [1., 1.])
D, E = pairwise_distances_argmin_min(Xsp, Ysp, metric="manhattan")
D2 = pairwise_distances_argmin(Xsp, Ysp, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (callable)
D, E = pairwise_distances_argmin_min(X, Y, metric=minkowski,
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (string)
D, E = pairwise_distances_argmin_min(X, Y, metric="minkowski",
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Compare with naive implementation
rng = np.random.RandomState(0)
X = rng.randn(97, 149)
Y = rng.randn(111, 149)
dist = pairwise_distances(X, Y, metric="manhattan")
dist_orig_ind = dist.argmin(axis=0)
dist_orig_val = dist[dist_orig_ind, range(len(dist_orig_ind))]
dist_chunked_ind, dist_chunked_val = pairwise_distances_argmin_min(
X, Y, axis=0, metric="manhattan", batch_size=50)
np.testing.assert_almost_equal(dist_orig_ind, dist_chunked_ind, decimal=7)
np.testing.assert_almost_equal(dist_orig_val, dist_chunked_val, decimal=7)
def test_euclidean_distances():
# Check the pairwise Euclidean distances computation
X = [[0]]
Y = [[1], [2]]
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
X = csr_matrix(X)
Y = csr_matrix(Y)
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
rng = np.random.RandomState(0)
X = rng.random_sample((10, 4))
Y = rng.random_sample((20, 4))
X_norm_sq = (X ** 2).sum(axis=1).reshape(1, -1)
Y_norm_sq = (Y ** 2).sum(axis=1).reshape(1, -1)
# check that we still get the right answers with {X,Y}_norm_squared
D1 = euclidean_distances(X, Y)
D2 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq)
D3 = euclidean_distances(X, Y, Y_norm_squared=Y_norm_sq)
D4 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq,
Y_norm_squared=Y_norm_sq)
assert_array_almost_equal(D2, D1)
assert_array_almost_equal(D3, D1)
assert_array_almost_equal(D4, D1)
# check we get the wrong answer with wrong {X,Y}_norm_squared
X_norm_sq *= 0.5
Y_norm_sq *= 0.5
wrong_D = euclidean_distances(X, Y,
X_norm_squared=np.zeros_like(X_norm_sq),
Y_norm_squared=np.zeros_like(Y_norm_sq))
assert_greater(np.max(np.abs(wrong_D - D1)), .01)
# Paired distances
def test_paired_euclidean_distances():
# Check the paired Euclidean distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_euclidean_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_paired_manhattan_distances():
# Check the paired manhattan distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_manhattan_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_chi_square_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((10, 4))
K_add = additive_chi2_kernel(X, Y)
gamma = 0.1
K = chi2_kernel(X, Y, gamma=gamma)
assert_equal(K.dtype, np.float)
for i, x in enumerate(X):
for j, y in enumerate(Y):
chi2 = -np.sum((x - y) ** 2 / (x + y))
chi2_exp = np.exp(gamma * chi2)
assert_almost_equal(K_add[i, j], chi2)
assert_almost_equal(K[i, j], chi2_exp)
# check diagonal is ones for data with itself
K = chi2_kernel(Y)
assert_array_equal(np.diag(K), 1)
# check off-diagonal is < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
# check that float32 is preserved
X = rng.random_sample((5, 4)).astype(np.float32)
Y = rng.random_sample((10, 4)).astype(np.float32)
K = chi2_kernel(X, Y)
assert_equal(K.dtype, np.float32)
# check integer type gets converted,
# check that zeros are handled
X = rng.random_sample((10, 4)).astype(np.int32)
K = chi2_kernel(X, X)
assert_true(np.isfinite(K).all())
assert_equal(K.dtype, np.float)
# check that kernel of similar things is greater than dissimilar ones
X = [[.3, .7], [1., 0]]
Y = [[0, 1], [.9, .1]]
K = chi2_kernel(X, Y)
assert_greater(K[0, 0], K[0, 1])
assert_greater(K[1, 1], K[1, 0])
# test negative input
assert_raises(ValueError, chi2_kernel, [[0, -1]])
assert_raises(ValueError, chi2_kernel, [[0, -1]], [[-1, -1]])
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[-1, -1]])
# different n_features in X and Y
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[.2, .2, .6]])
# sparse matrices
assert_raises(ValueError, chi2_kernel, csr_matrix(X), csr_matrix(Y))
assert_raises(ValueError, additive_chi2_kernel,
csr_matrix(X), csr_matrix(Y))
def test_kernel_symmetry():
# Valid kernels should be symmetric
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
laplacian_kernel, sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
assert_array_almost_equal(K, K.T, 15)
def test_kernel_sparse():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
X_sparse = csr_matrix(X)
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
laplacian_kernel, sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
K2 = kernel(X_sparse, X_sparse)
assert_array_almost_equal(K, K2)
def test_linear_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = linear_kernel(X, X)
# the diagonal elements of a linear kernel are their squared norm
assert_array_almost_equal(K.flat[::6], [linalg.norm(x) ** 2 for x in X])
def test_rbf_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = rbf_kernel(X, X)
# the diagonal elements of a rbf kernel are 1
assert_array_almost_equal(K.flat[::6], np.ones(5))
def test_laplacian_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = laplacian_kernel(X, X)
# the diagonal elements of a laplacian kernel are 1
assert_array_almost_equal(np.diag(K), np.ones(5))
# off-diagonal elements are < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
def test_cosine_similarity_sparse_output():
# Test if cosine_similarity correctly produces sparse output.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
K1 = cosine_similarity(Xcsr, Ycsr, dense_output=False)
assert_true(issparse(K1))
K2 = pairwise_kernels(Xcsr, Y=Ycsr, metric="cosine")
assert_array_almost_equal(K1.todense(), K2)
def test_cosine_similarity():
# Test the cosine_similarity.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
for X_, Y_ in ((X, None), (X, Y),
(Xcsr, None), (Xcsr, Ycsr)):
# Test that the cosine is kernel is equal to a linear kernel when data
# has been previously normalized by L2-norm.
K1 = pairwise_kernels(X_, Y=Y_, metric="cosine")
X_ = normalize(X_)
if Y_ is not None:
Y_ = normalize(Y_)
K2 = pairwise_kernels(X_, Y=Y_, metric="linear")
assert_array_almost_equal(K1, K2)
def test_check_dense_matrices():
# Ensure that pairwise array check works for dense matrices.
# Check that if XB is None, XB is returned as reference to XA
XA = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_true(XA_checked is XB_checked)
assert_array_equal(XA, XA_checked)
def test_check_XB_returned():
# Ensure that if XA and XB are given correctly, they return as equal.
# Check that if XB is not None, it is returned equal.
# Note that the second dimension of XB is the same as XA.
XA = np.resize(np.arange(40), (5, 8))
XB = np.resize(np.arange(32), (4, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
XB = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_paired_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
def test_check_different_dimensions():
# Ensure an error is raised if the dimensions are different.
XA = np.resize(np.arange(45), (5, 9))
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XB = np.resize(np.arange(4 * 9), (4, 9))
assert_raises(ValueError, check_paired_arrays, XA, XB)
def test_check_invalid_dimensions():
# Ensure an error is raised on 1D input arrays.
# The modified tests are not 1D. In the old test, the array was internally
# converted to 2D anyways
XA = np.arange(45).reshape(9, 5)
XB = np.arange(32).reshape(4, 8)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XA = np.arange(45).reshape(9, 5)
XB = np.arange(32).reshape(4, 8)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
def test_check_sparse_arrays():
# Ensures that checks return valid sparse matrices.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_sparse = csr_matrix(XA)
XB = rng.random_sample((5, 4))
XB_sparse = csr_matrix(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_sparse, XB_sparse)
# compare their difference because testing csr matrices for
# equality with '==' does not work as expected.
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XB_checked))
assert_equal(abs(XB_sparse - XB_checked).sum(), 0)
XA_checked, XA_2_checked = check_pairwise_arrays(XA_sparse, XA_sparse)
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XA_2_checked))
assert_equal(abs(XA_2_checked - XA_checked).sum(), 0)
def tuplify(X):
# Turns a numpy matrix (any n-dimensional array) into tuples.
s = X.shape
if len(s) > 1:
# Tuplify each sub-array in the input.
return tuple(tuplify(row) for row in X)
else:
# Single dimension input, just return tuple of contents.
return tuple(r for r in X)
def test_check_tuple_input():
# Ensures that checks return valid tuples.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_tuples = tuplify(XA)
XB = rng.random_sample((5, 4))
XB_tuples = tuplify(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_tuples, XB_tuples)
assert_array_equal(XA_tuples, XA_checked)
assert_array_equal(XB_tuples, XB_checked)
def test_check_preserve_type():
# Ensures that type float32 is preserved.
XA = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XB = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_equal(XA_checked.dtype, np.float32)
# both float32
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_equal(XA_checked.dtype, np.float32)
assert_equal(XB_checked.dtype, np.float32)
# mismatched A
XA_checked, XB_checked = check_pairwise_arrays(XA.astype(np.float),
XB)
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
# mismatched B
XA_checked, XB_checked = check_pairwise_arrays(XA,
XB.astype(np.float))
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
| bsd-3-clause | 00281808303150ca44f068886ec7e12c | 37.475113 | 78 | 0.636952 | 3.2339 | false | true | false | false |
kjung/scikit-learn | sklearn/metrics/cluster/bicluster.py | 352 | 2797 | from __future__ import division
import numpy as np
from sklearn.utils.linear_assignment_ import linear_assignment
from sklearn.utils.validation import check_consistent_length, check_array
__all__ = ["consensus_score"]
def _check_rows_and_columns(a, b):
"""Unpacks the row and column arrays and checks their shape."""
check_consistent_length(*a)
check_consistent_length(*b)
checks = lambda x: check_array(x, ensure_2d=False)
a_rows, a_cols = map(checks, a)
b_rows, b_cols = map(checks, b)
return a_rows, a_cols, b_rows, b_cols
def _jaccard(a_rows, a_cols, b_rows, b_cols):
"""Jaccard coefficient on the elements of the two biclusters."""
intersection = ((a_rows * b_rows).sum() *
(a_cols * b_cols).sum())
a_size = a_rows.sum() * a_cols.sum()
b_size = b_rows.sum() * b_cols.sum()
return intersection / (a_size + b_size - intersection)
def _pairwise_similarity(a, b, similarity):
"""Computes pairwise similarity matrix.
result[i, j] is the Jaccard coefficient of a's bicluster i and b's
bicluster j.
"""
a_rows, a_cols, b_rows, b_cols = _check_rows_and_columns(a, b)
n_a = a_rows.shape[0]
n_b = b_rows.shape[0]
result = np.array(list(list(similarity(a_rows[i], a_cols[i],
b_rows[j], b_cols[j])
for j in range(n_b))
for i in range(n_a)))
return result
def consensus_score(a, b, similarity="jaccard"):
"""The similarity of two sets of biclusters.
Similarity between individual biclusters is computed. Then the
best matching between sets is found using the Hungarian algorithm.
The final score is the sum of similarities divided by the size of
the larger set.
Read more in the :ref:`User Guide <biclustering>`.
Parameters
----------
a : (rows, columns)
Tuple of row and column indicators for a set of biclusters.
b : (rows, columns)
Another set of biclusters like ``a``.
similarity : string or function, optional, default: "jaccard"
May be the string "jaccard" to use the Jaccard coefficient, or
any function that takes four arguments, each of which is a 1d
indicator vector: (a_rows, a_columns, b_rows, b_columns).
References
----------
* Hochreiter, Bodenhofer, et. al., 2010. `FABIA: factor analysis
for bicluster acquisition
<https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2881408/>`__.
"""
if similarity == "jaccard":
similarity = _jaccard
matrix = _pairwise_similarity(a, b, similarity)
indices = linear_assignment(1. - matrix)
n_a = len(a[0])
n_b = len(b[0])
return matrix[indices[:, 0], indices[:, 1]].sum() / max(n_a, n_b)
| bsd-3-clause | 1f3dac8cdbfc142531678c76f5d5a411 | 31.523256 | 73 | 0.618162 | 3.419315 | false | false | false | false |
kjung/scikit-learn | sklearn/tree/tree.py | 1 | 122456 | """
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Noel Dawe <noel@dawe.me>
# Satrajit Gosh <satrajit.ghosh@gmail.com>
# Joly Arnaud <arnaud.v.joly@gmail.com>
# Fares Hedayati <fares.hedayati@gmail.com>
#
# Licence: BSD 3 clause
from __future__ import division
import numbers
from abc import ABCMeta
from abc import abstractmethod
from math import ceil, floor
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_array
from ..utils import check_random_state
from ..utils import compute_sample_weight
from ..utils.multiclass import check_classification_targets
from ..exceptions import NotFittedError
from ._criterion import Criterion
from ._criterion import Entropy
from ._criterion import Gini
from ._criterion import PowersCriterion
from ._criterion import VarianceCriterion
from ._splitter import Splitter
from ._splitter import PropensitySplitter
from ._splitter import PowersSplitter
from ._splitter import VarianceSplitter
from ._tree import DepthFirstTreeBuilder
from ._tree import BestFirstTreeBuilder
from ._tree import PropensityTreeBuilder
from ._tree import DoubleSampleTreeBuilder
from ._tree import PowersTreeBuilder
from ._tree import Tree
from . import _tree, _splitter, _criterion
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor",
"PropensityTree",
"DoubleSampleTree",
"PowersTree"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _criterion.Gini, "entropy": _criterion.Entropy}
CRITERIA_REG = {"mse": _criterion.MSE, "friedman_mse": _criterion.FriedmanMSE}
DENSE_SPLITTERS = {"best": _splitter.BestSplitter,
"random": _splitter.RandomSplitter,
"propensity":_splitter.PropensitySplitter}
SPARSE_SPLITTERS = {"best": _splitter.BestSparseSplitter,
"random": _splitter.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
class_weight=None,
presort=False):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.class_weight = class_weight
self.presort = presort
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_weight=None, check_input=True,
X_idx_sorted=None):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression). In the regression case, use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
X_idx_sorted : array-like, shape = [n_samples, n_features], optional
The indexes of the sorted training input samples. If many tree
are grown on the same dataset, this allows the ordering to be
cached between trees. If None, the data will be sorted here.
Don't use this parameter unless you know what to do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
y = check_array(y, ensure_2d=False, dtype=None)
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
check_classification_targets(y)
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
y_encoded = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_encoded[:, k] = np.unique(y[:, k],
return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_encoded
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.min_samples_leaf, (numbers.Integral, np.integer)):
min_samples_leaf = self.min_samples_leaf
else: # float
min_samples_leaf = int(ceil(self.min_samples_leaf * n_samples))
if isinstance(self.min_samples_split, (numbers.Integral, np.integer)):
min_samples_split = self.min_samples_split
else: # float
min_samples_split = int(ceil(self.min_samples_split * n_samples))
min_samples_split = max(2, min_samples_split)
min_samples_split = max(min_samples_split, 2 * min_samples_leaf)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1,
int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if not (0. < self.min_samples_split <= 1. or
2 <= self.min_samples_split):
raise ValueError("min_samples_split must be in at least 2"
" or in (0, 1], got %s" % min_samples_split)
if not (0. < self.min_samples_leaf <= 0.5 or
1 <= self.min_samples_leaf):
raise ValueError("min_samples_leaf must be at least than 1 "
"or in (0, 0.5], got %s" % min_samples_leaf)
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
presort = self.presort
# Allow presort to be 'auto', which means True if the dataset is dense,
# otherwise it will be False.
if self.presort == 'auto' and issparse(X):
presort = False
elif self.presort == 'auto':
presort = True
if presort is True and issparse(X):
raise ValueError("Presorting is not supported for sparse "
"matrices.")
# If multiple trees are built on the same dataset, we only want to
# presort once. Splitters now can accept presorted indices if desired,
# but do not handle any presorting themselves. Ensemble algorithms
# which desire presorting must do presorting themselves and pass that
# matrix into each tree.
if X_idx_sorted is None and presort:
X_idx_sorted = np.asfortranarray(np.argsort(X, axis=0),
dtype=np.int32)
if presort and X_idx_sorted.shape != X.shape:
raise ValueError("The shape of X (X.shape = {}) doesn't match "
"the shape of X_idx_sorted (X_idx_sorted"
".shape = {})".format(X.shape,
X_idx_sorted.shape))
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
min_samples_leaf,
min_weight_leaf,
random_state,
self.presort)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
min_samples_leaf,
min_weight_leaf,
max_depth)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes)
builder.build(self.tree_, X, y, sample_weight, X_idx_sorted)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
"match the input. Model n_features is %s and "
"input n_features is %s "
% (self.n_features_, n_features))
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
.. versionadded:: 0.17
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
def decision_path(self, X, check_input=True):
"""Return the decision path in the tree
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
indicator : sparse csr array, shape = [n_samples, n_nodes]
Return a node indicator matrix where non zero elements
indicates that the samples goes through the nodes.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.decision_path(X)
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
class_weight : dict, list of dicts, "balanced" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
class_weight=None,
presort=False):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state,
presort=presort)
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error, which is equal to
variance reduction as feature selection criterion.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
Attributes
----------
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
presort=False):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state,
presort=presort)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
class PropensityTree(BaseDecisionTree, ClassifierMixin):
"""A propensity tree classifier. This is a base model for Wager & Athey's
causal forests algorithm 2.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples of each treatment class required to
be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
class_weight : dict, list of dicts, "balanced" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
max_depth=None,
min_samples_split=2,
min_samples_leaf=3,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
class_weight=None,
presort=False):
super(PropensityTree, self).__init__(
criterion="propensity",
splitter="propensity",
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state,
presort=presort)
# NB - the fit method here overrides the fit method from the base class, BaseDecisionTree.
def fit(self, X, y, w, sample_weight=None, check_input=True,
X_idx_sorted=None):
"""Build a propensity tree from the training set (X, y, w).
This is algorithm 2 from Wager & Athey. We fit a tree that
uses the treatment variable w to determine splits, but predict
using the outcome y.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples]
Array of outcome indicators.
w : array-lie, shape = [n_samples]
Array of treatment indicators.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
X_idx_sorted : array-like, shape = [n_samples, n_features], optional
The indexes of the sorted training input samples. If many tree
are grown on the same dataset, this allows the ordering to be
cached between trees. If None, the data will be sorted here.
Don't use this parameter unless you know what to do.
Returns
-------
self : object
Returns self.
"""
is_classification = True
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
y = check_array(y, ensure_2d=False, dtype=None)
w = check_array(w, ensure_2d=False, dtype=None)
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
y = np.atleast_1d(y)
w = np.atleast_1d(w)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
if w.ndim == 1:
w = np.reshape(w, (-1, 1))
# We will hard assign b/c multi-class doesn't make sense in this context.
self.n_outputs_ = 1
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
w_original = np.copy(w)
w_encoded = np.zeros(w.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, w_encoded[:, k] = np.unique(w[:, k],
return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
w = w_encoded
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, w_original)
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(w, "dtype", None) != DOUBLE or not w.flags.contiguous:
w = np.ascontiguousarray(w, dtype=DOUBLE)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.min_samples_leaf, (numbers.Integral, np.integer)):
min_samples_leaf = self.min_samples_leaf
else: # float
min_samples_leaf = int(ceil(self.min_samples_leaf * n_samples))
if isinstance(self.min_samples_split, (numbers.Integral, np.integer)):
min_samples_split = self.min_samples_split
else: # float
min_samples_split = int(ceil(self.min_samples_split * n_samples))
min_samples_split = max(2, min_samples_split)
min_samples_split = max(min_samples_split, 2 * min_samples_leaf)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1,
int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of outcomees=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if len(w) != n_samples:
raise ValueError("Number of treatments=%d does not match "
"number of samples=%d" % (len(w), n_samples))
if not (0. < self.min_samples_split <= 1. or
2 <= self.min_samples_split):
raise ValueError("min_samples_split must be in at least 2"
" or in (0, 1], got %s" % min_samples_split)
if not (0. < self.min_samples_leaf <= 0.5 or
1 <= self.min_samples_leaf):
raise ValueError("min_samples_leaf must be at least than 1 "
"or in (0, 0.5], got %s" % min_samples_leaf)
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
presort = self.presort
# Allow presort to be 'auto', which means True if the dataset is dense,
# otherwise it will be False.
if self.presort == 'auto' and issparse(X):
presort = False
elif self.presort == 'auto':
presort = True
if presort is True and issparse(X):
raise ValueError("Presorting is not supported for sparse "
"matrices.")
# If multiple trees are built on the same dataset, we only want to
# presort once. Splitters now can accept presorted indices if desired,
# but do not handle any presorting themselves. Ensemble algorithms
# which desire presorting must do presorting themselves and pass that
# matrix into each tree.
if X_idx_sorted is None and presort:
X_idx_sorted = np.asfortranarray(np.argsort(X, axis=0),
dtype=np.int32)
if presort and X_idx_sorted.shape != X.shape:
raise ValueError("The shape of X (X.shape = {}) doesn't match "
"the shape of X_idx_sorted (X_idx_sorted"
".shape = {})".format(X.shape,
X_idx_sorted.shape))
# Build tree
# criterion = PropensityCriterion(self.n_outputs_,
# self.n_classes_)
criterion = Gini(self.n_outputs_, self.n_classes_)
splitter = PropensitySplitter(criterion,
self.max_features_,
min_samples_leaf,
min_weight_leaf,
random_state,
self.presort)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use version of DepthFirstTreeBuilder...
builder = PropensityTreeBuilder(splitter,
min_samples_split,
min_samples_leaf,
min_weight_leaf,
max_depth)
builder.build(self.tree_, X, w, sample_weight, X_idx_sorted)
# Now apply to training data and get the leaf node assignments; keep them in an instance var.
self.y = np.copy(y)
self.w = np.copy(w)
self.training_sample_nodes = self.tree_.apply(X)
# How do we do this? We want to average the outcome in each node, conditioned on w.
# Then we just subtract...
num_bins = np.max(self.training_sample_nodes) + 1
self.treated_y = np.zeros(num_bins)
self.control_y = np.zeros(num_bins)
self.treated_n = np.zeros(num_bins)
self.control_n = np.zeros(num_bins)
for i in range(0, len(self.training_sample_nodes)) :
node_idx = self.training_sample_nodes[i]
if (self.w[i] == 0) :
self.control_y[node_idx] += self.y[i]
self.control_n[node_idx] += 1
else :
self.treated_y[node_idx] += self.y[i]
self.treated_n[node_idx] += 1
for i in range(num_bins) :
if self.control_n[i] == 0 :
self.control_n[i] = -1
if self.treated_n[i] == 0 :
self.treated_n[i] = -1
self.treated_mean_y = self.treated_y / self.treated_n
self.control_mean_y = self.control_y / self.control_n
self.effect_estimates = self.treated_mean_y - self.control_mean_y
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def predict_effect(self, X) :
"""Predict the treatment effect for each sample in X.
Returns
-------
y : array of shape = [n_samples]
The estimated treatment effect for each sample in X.
"""
nodes = self.apply(X)
return np.take(self.effect_estimates, nodes)
def predict_outcomes(self, X) :
"""Predict the outcomes for treated and untreated for each sample in X;
return as 2 column array."""
nodes = self.apply(X)
tx_outcomes = np.take(self.treated_mean_y, nodes)
ctl_outcomes = np.take(self.control_mean_y, nodes)
return np.transpose(np.matrix([tx_outcomes, ctl_outcomes]))
# NB - the predict method here overrides the predict method from the base class, BaseDecisionTree.
# Basically, we just use apply and get the (precomputed) tx effects for each of the leaf nodes.
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
# NB - the apply method here overrides the apply method from the base class, BaseDecisionTree.
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
.. versionadded:: 0.17
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DoubleSampleTree(BaseDecisionTree, ClassifierMixin):
"""A double sample tree. This is a base model for Wager & Athey's
causal forests algorithm 1 (double sample forests).
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples of each treatment class required to
be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
class_weight : dict, list of dicts, "balanced" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
max_depth=None,
min_samples_split=2,
min_samples_leaf=3,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
class_weight=None,
presort=False):
super(DoubleSampleTree, self).__init__(
criterion="variance",
splitter="variance",
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state,
presort=presort)
# NB - the fit method here overrides the fit method from the base class, BaseDecisionTree.
def fit(self, X, y, w, sample_weight=None, check_input=True, X_idx_sorted=None):
"""Build a double sample tree from the training set (X, y, w).
This is algorithm 1 from Wager & Athey. We fit a tree using
half the data to find split points (using a variance maximizing
criterion) and use the rest of the data to estimate treatment
effects in the leaves.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples]
Array of outcome indicators.
w : array-lie, shape = [n_samples]
Array of treatment indicators.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
X_idx_sorted : array-like, shape = [n_samples, n_features], optional
The indexes of the sorted training input samples. If many tree
are grown on the same dataset, this allows the ordering to be
cached between trees. If None, the data will be sorted here.
Don't use this parameter unless you know what to do.
Returns
-------
self : object
Returns self.
"""
is_classification = True
# Determine output settings
n_samples, self.n_features_ = X.shape
# self.split_indices is used to partition samples into two sets -
# split_indices == 0 - used for estimation of treatment effects
# split_indices == 1 - used to find splits.
n_split = int(floor(n_samples / 2))
self.split_indices = np.zeros(n_samples, dtype=np.int)
self.split_indices[ np.random.choice(n_samples, size=n_split, replace=False) ] = 1
# dtype is int64, which is int, and is cdef'ed to SIZE_t in most pxd files.
n_samples, self.n_features_ = X.shape
# And enforce this
X_idx_sorted = None
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
y = check_array(y, ensure_2d=False, dtype=None)
w = check_array(w, ensure_2d=False, dtype=None)
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
y = np.atleast_1d(y)
w = np.atleast_1d(w)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
if w.ndim == 1:
w = np.reshape(w, (-1, 1))
# We will hard assign b/c multi-class doesn't make sense in this context.
self.n_outputs_ = 1
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
w_original = np.copy(w)
w_encoded = np.zeros(w.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, w_encoded[:, k] = np.unique(w[:, k],
return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
w = w_encoded
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, w_original)
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(w, "dtype", None) != DOUBLE or not w.flags.contiguous:
w = np.ascontiguousarray(w, dtype=DOUBLE)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.min_samples_leaf, (numbers.Integral, np.integer)):
min_samples_leaf = self.min_samples_leaf
else: # float
min_samples_leaf = int(ceil(self.min_samples_leaf * n_samples))
if isinstance(self.min_samples_split, (numbers.Integral, np.integer)):
min_samples_split = self.min_samples_split
else: # float
min_samples_split = int(ceil(self.min_samples_split * n_samples))
min_samples_split = max(2, min_samples_split)
min_samples_split = max(min_samples_split, 2 * min_samples_leaf)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1,
int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of outcomees=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if len(w) != n_samples:
raise ValueError("Number of treatments=%d does not match "
"number of samples=%d" % (len(w), n_samples))
if not (0. < self.min_samples_split <= 1. or
2 <= self.min_samples_split):
raise ValueError("min_samples_split must be in at least 2"
" or in (0, 1], got %s" % min_samples_split)
if not (0. < self.min_samples_leaf <= 0.5 or
1 <= self.min_samples_leaf):
raise ValueError("min_samples_leaf must be at least than 1 "
"or in (0, 0.5], got %s" % min_samples_leaf)
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
# if len(sample_weight) != n_samples:
# raise ValueError("Number of weights=%d does not match "
# "number of samples=%d" %
# (len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
presort = self.presort
# Allow presort to be 'auto', which means True if the dataset is dense,
# otherwise it will be False.
if self.presort == 'auto' and issparse(X):
presort = False
elif self.presort == 'auto':
presort = True
if presort is True and issparse(X):
raise ValueError("Presorting is not supported for sparse "
"matrices.")
# If multiple trees are built on the same dataset, we only want to
# presort once. Splitters now can accept presorted indices if desired,
# but do not handle any presorting themselves. Ensemble algorithms
# which desire presorting must do presorting themselves and pass that
# matrix into each tree.
if X_idx_sorted is None and presort:
X_idx_sorted = np.asfortranarray(np.argsort(X, axis=0),
dtype=np.int32)
if presort and X_idx_sorted.shape != X.shape:
raise ValueError("The shape of X (X.shape = {}) doesn't match "
"the shape of X_idx_sorted (X_idx_sorted"
".shape = {})".format(X.shape,
X_idx_sorted.shape))
# Build tree
criterion = VarianceCriterion(self.n_outputs_,
self.n_classes_)
splitter = VarianceSplitter(criterion,
self.max_features_,
min_samples_leaf,
min_weight_leaf,
random_state,
self.presort)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use version of DepthFirstTreeBuilder...
builder = DoubleSampleTreeBuilder(splitter,
min_samples_split,
min_samples_leaf,
min_weight_leaf,
max_depth)
builder.build(self.tree_, X, y, w, self.split_indices, sample_weight, X_idx_sorted)
# Now apply to estimation data and get the leaf node assignments; keep them in an instance var.
# The problem is - what if some leaf nodes don't have any samples from which to estimate the
# treatment effect? And what about the requirement for a min number of samples in each leaf node?
# What do we do then!...
self.training_sample_nodes = self.tree_.apply(X)
# Calculate effect estimates on each.
# How do we do this? We want to average the outcome in each node, conditioned on w.
# num_bins is number of nodes in the tree (not just leaf nodes).
# We keep arrays treated_y, control_y, etc, that keep track of quantities in
# each node, but only for leaf nodes in practice.
num_bins = np.max(self.training_sample_nodes) + 1
self.treated_y = np.zeros(num_bins)
self.control_y = np.zeros(num_bins)
self.treated_n = np.zeros(num_bins)
self.control_n = np.zeros(num_bins)
# For each sample, if it wasn't used to find the splits (split_indices[i] == 0)
# then use it for effect estimation.
for i in range(0, len(self.training_sample_nodes)) :
if self.split_indices[i] == 0 :
node_idx = self.training_sample_nodes[i]
if (w[i] == 0) :
self.control_y[node_idx] += y[i]
self.control_n[node_idx] += 1
else :
self.treated_y[node_idx] += y[i]
self.treated_n[node_idx] += 1
# Guard against divide by zero; should execute this...
# presumably, all non-leaf nodes should have -1 as the value.
for i in range(num_bins) :
if self.control_n[i] == 0 :
self.control_n[i] = -1
if self.treated_n[i] == 0 :
self.treated_n[i] = -1
# Calculate means in each leaf node, then subtract to get effect estimates.
# note - this does not use any guards against 0 or 1 probabilities...
self.treated_mean_y = self.treated_y / self.treated_n
self.control_mean_y = self.control_y / self.control_n
self.effect_estimates = self.treated_mean_y - self.control_mean_y
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def predict_effect(self, X) :
"""Predict the treatment effect for each sample in X.
Returns
-------
y : array of shape = [n_samples]
The estimated treatment effect for each sample in X.
"""
nodes = self.apply(X)
return np.take(self.effect_estimates, nodes)
def predict_outcomes(self, X) :
"""Predict the outcomes for treated and untreated for each sample in X;
return as 2 column array."""
nodes = self.apply(X)
tx_outcomes = np.take(self.treated_mean_y, nodes)
ctl_outcomes = np.take(self.control_mean_y, nodes)
return np.transpose(np.matrix([tx_outcomes, ctl_outcomes]))
# NB - the predict method here overrides the predict method from the base class, BaseDecisionTree.
# Basically, we just use apply and get the (precomputed) tx effects for each of the leaf nodes.
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
# NB - the apply method here overrides the apply method from the base class, BaseDecisionTree.
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
.. versionadded:: 0.17
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class PowersTree(BaseDecisionTree, ClassifierMixin):
"""A tree for estimating causal effects using Scott Power's split criterion.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples of each treatment class required to
be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
class_weight : dict, list of dicts, "balanced" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
max_depth=None,
min_samples_split=2,
min_samples_leaf=3,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
class_weight=None,
presort=False):
super(PowersTree, self).__init__(
criterion="PowersCriterion",
splitter="PowersSplitter",
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state,
presort=presort)
# NB - the fit method here overrides the fit method from the base class, BaseDecisionTree.
def fit(self, X, y, w, sample_weight=None, check_input=True,
X_idx_sorted=None):
"""Build a causal tree from the training set (X, y, w).
using Scott Powers' split criterion.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples]
Array of outcome indicators.
w : array-lie, shape = [n_samples]
Array of treatment indicators.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
X_idx_sorted : array-like, shape = [n_samples, n_features], optional
The indexes of the sorted training input samples. If many tree
are grown on the same dataset, this allows the ordering to be
cached between trees. If None, the data will be sorted here.
Don't use this parameter unless you know what to do.
Returns
-------
self : object
Returns self.
"""
is_classification = True
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
y = check_array(y, ensure_2d=False, dtype=None)
w = check_array(w, ensure_2d=False, dtype=None)
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
y = np.atleast_1d(y)
w = np.atleast_1d(w)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
if w.ndim == 1:
w = np.reshape(w, (-1, 1))
# We will hard assign b/c multi-class doesn't make sense in this context.
self.n_outputs_ = 1
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
w_original = np.copy(w)
w_encoded = np.zeros(w.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, w_encoded[:, k] = np.unique(w[:, k],
return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
w = w_encoded
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, w_original)
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(w, "dtype", None) != DOUBLE or not w.flags.contiguous:
w = np.ascontiguousarray(w, dtype=DOUBLE)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.min_samples_leaf, (numbers.Integral, np.integer)):
min_samples_leaf = self.min_samples_leaf
else: # float
min_samples_leaf = int(ceil(self.min_samples_leaf * n_samples))
if isinstance(self.min_samples_split, (numbers.Integral, np.integer)):
min_samples_split = self.min_samples_split
else: # float
min_samples_split = int(ceil(self.min_samples_split * n_samples))
min_samples_split = max(2, min_samples_split)
min_samples_split = max(min_samples_split, 2 * min_samples_leaf)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1,
int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of outcomees=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if len(w) != n_samples:
raise ValueError("Number of treatments=%d does not match "
"number of samples=%d" % (len(w), n_samples))
if not (0. < self.min_samples_split <= 1. or
2 <= self.min_samples_split):
raise ValueError("min_samples_split must be in at least 2"
" or in (0, 1], got %s" % min_samples_split)
if not (0. < self.min_samples_leaf <= 0.5 or
1 <= self.min_samples_leaf):
raise ValueError("min_samples_leaf must be at least than 1 "
"or in (0, 0.5], got %s" % min_samples_leaf)
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
presort = self.presort
# Allow presort to be 'auto', which means True if the dataset is dense,
# otherwise it will be False.
if self.presort == 'auto' and issparse(X):
presort = False
elif self.presort == 'auto':
presort = True
if presort is True and issparse(X):
raise ValueError("Presorting is not supported for sparse "
"matrices.")
# If multiple trees are built on the same dataset, we only want to
# presort once. Splitters now can accept presorted indices if desired,
# but do not handle any presorting themselves. Ensemble algorithms
# which desire presorting must do presorting themselves and pass that
# matrix into each tree.
if X_idx_sorted is None and presort:
X_idx_sorted = np.asfortranarray(np.argsort(X, axis=0),
dtype=np.int32)
if presort and X_idx_sorted.shape != X.shape:
raise ValueError("The shape of X (X.shape = {}) doesn't match "
"the shape of X_idx_sorted (X_idx_sorted"
".shape = {})".format(X.shape,
X_idx_sorted.shape))
# Build tree
criterion = PowersCriterion()
if np.unique(y).shape[0] == 2 :
criterion.set_binary_outcome(1)
else :
criterion.set_binary_outcome(0)
splitter = PowersSplitter(criterion,
self.max_features_,
min_samples_leaf,
min_weight_leaf,
random_state,
self.presort)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use version of DepthFirstTreeBuilder...
builder = PowersTreeBuilder(splitter,
min_samples_split,
min_samples_leaf,
min_weight_leaf,
max_depth)
builder.build(self.tree_, X, y, w, sample_weight, X_idx_sorted)
# Now apply to training data and get the leaf node assignments; keep them in an instance var.
self.y = np.copy(y)
self.w = np.copy(w)
self.training_sample_nodes = self.tree_.apply(X)
# TODO: Calculate effect estimates on each.
# How do we do this? We want to average the outcome in each node, conditioned on w.
# Then we just subtract...
num_bins = np.max(self.training_sample_nodes) + 1
self.treated_y = np.zeros(num_bins)
self.control_y = np.zeros(num_bins)
self.treated_n = np.zeros(num_bins)
self.control_n = np.zeros(num_bins)
for i in range(0, len(self.training_sample_nodes)) :
node_idx = self.training_sample_nodes[i]
if (self.w[i] == 0) :
self.control_y[node_idx] += self.y[i]
self.control_n[node_idx] += 1
else :
self.treated_y[node_idx] += self.y[i]
self.treated_n[node_idx] += 1
# Note - should never have -1 counts because of the min number constraint...
for i in range(num_bins) :
if self.control_n[i] == 0 :
self.control_n[i] = -1
if self.treated_n[i] == 0 :
self.treated_n[i] = -1
self.treated_mean_y = self.treated_y / self.treated_n
self.control_mean_y = self.control_y / self.control_n
self.effect_estimates = self.treated_mean_y - self.control_mean_y
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def predict_effect(self, X) :
"""Predict the treatment effect for each sample in X.
Returns
-------
y : array of shape = [n_samples]
The estimated treatment effect for each sample in X.
"""
nodes = self.apply(X)
return np.take(self.effect_estimates, nodes)
def predict_outcomes(self, X) :
"""Predict the outcomes for treated and untreated for each sample in X;
return as 2 column array."""
nodes = self.apply(X)
tx_outcomes = np.take(self.treated_mean_y, nodes)
ctl_outcomes = np.take(self.control_mean_y, nodes)
return np.transpose(np.matrix([tx_outcomes, ctl_outcomes]))
# NB - the predict method here overrides the predict method from the base class, BaseDecisionTree.
# Basically, we just use apply and get the (precomputed) tx effects for each of the leaf nodes.
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
# NB - the apply method here overrides the apply method from the base class, BaseDecisionTree.
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
.. versionadded:: 0.17
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
| bsd-3-clause | 28740cd88cbaecaefc6533eb2f248691 | 40.023786 | 112 | 0.568874 | 4.214918 | false | false | false | false |
kjung/scikit-learn | sklearn/manifold/spectral_embedding_.py | 3 | 20837 | """Spectral Embedding"""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Wei LI <kuantkid@gmail.com>
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import sparse
from scipy.linalg import eigh
from scipy.sparse.linalg import lobpcg
from ..base import BaseEstimator
from ..externals import six
from ..utils import check_random_state, check_array, check_symmetric
from ..utils.extmath import _deterministic_vector_sign_flip
from ..utils.graph import graph_laplacian
from ..utils.sparsetools import connected_components
from ..utils.arpack import eigsh
from ..metrics.pairwise import rbf_kernel
from ..neighbors import kneighbors_graph
def _graph_connected_component(graph, node_id):
"""Find the largest graph connected components that contains one
given node
Parameters
----------
graph : array-like, shape: (n_samples, n_samples)
adjacency matrix of the graph, non-zero weight means an edge
between the nodes
node_id : int
The index of the query node of the graph
Returns
-------
connected_components_matrix : array-like, shape: (n_samples,)
An array of bool value indicating the indexes of the nodes
belonging to the largest connected components of the given query
node
"""
n_node = graph.shape[0]
if sparse.issparse(graph):
# speed up row-wise access to boolean connection mask
graph = graph.tocsr()
connected_nodes = np.zeros(n_node, dtype=np.bool)
nodes_to_explore = np.zeros(n_node, dtype=np.bool)
nodes_to_explore[node_id] = True
for _ in range(n_node):
last_num_component = connected_nodes.sum()
np.logical_or(connected_nodes, nodes_to_explore, out=connected_nodes)
if last_num_component >= connected_nodes.sum():
break
indices = np.where(nodes_to_explore)[0]
nodes_to_explore.fill(False)
for i in indices:
if sparse.issparse(graph):
neighbors = graph[i].toarray().ravel()
else:
neighbors = graph[i]
np.logical_or(nodes_to_explore, neighbors, out=nodes_to_explore)
return connected_nodes
def _graph_is_connected(graph):
""" Return whether the graph is connected (True) or Not (False)
Parameters
----------
graph : array-like or sparse matrix, shape: (n_samples, n_samples)
adjacency matrix of the graph, non-zero weight means an edge
between the nodes
Returns
-------
is_connected : bool
True means the graph is fully connected and False means not
"""
if sparse.isspmatrix(graph):
# sparse graph, find all the connected components
n_connected_components, _ = connected_components(graph)
return n_connected_components == 1
else:
# dense graph, find all connected components start from node 0
return _graph_connected_component(graph, 0).sum() == graph.shape[0]
def _set_diag(laplacian, value, norm_laplacian):
"""Set the diagonal of the laplacian matrix and convert it to a
sparse format well suited for eigenvalue decomposition
Parameters
----------
laplacian : array or sparse matrix
The graph laplacian
value : float
The value of the diagonal
norm_laplacian : bool
Whether the value of the diagonal should be changed or not
Returns
-------
laplacian : array or sparse matrix
An array of matrix in a form that is well suited to fast
eigenvalue decomposition, depending on the band width of the
matrix.
"""
n_nodes = laplacian.shape[0]
# We need all entries in the diagonal to values
if not sparse.isspmatrix(laplacian):
if norm_laplacian:
laplacian.flat[::n_nodes + 1] = value
else:
laplacian = laplacian.tocoo()
if norm_laplacian:
diag_idx = (laplacian.row == laplacian.col)
laplacian.data[diag_idx] = value
# If the matrix has a small number of diagonals (as in the
# case of structured matrices coming from images), the
# dia format might be best suited for matvec products:
n_diags = np.unique(laplacian.row - laplacian.col).size
if n_diags <= 7:
# 3 or less outer diagonals on each side
laplacian = laplacian.todia()
else:
# csr has the fastest matvec and is thus best suited to
# arpack
laplacian = laplacian.tocsr()
return laplacian
def spectral_embedding(adjacency, n_components=8, eigen_solver=None,
random_state=None, eigen_tol=0.0,
norm_laplacian=True, drop_first=True):
"""Project the sample on the first eigenvectors of the graph Laplacian.
The adjacency matrix is used to compute a normalized graph Laplacian
whose spectrum (especially the eigenvectors associated to the
smallest eigenvalues) has an interpretation in terms of minimal
number of cuts necessary to split the graph into comparably sized
components.
This embedding can also 'work' even if the ``adjacency`` variable is
not strictly the adjacency matrix of a graph but more generally
an affinity or similarity matrix between samples (for instance the
heat kernel of a euclidean distance matrix or a k-NN matrix).
However care must taken to always make the affinity matrix symmetric
so that the eigenvector decomposition works as expected.
Read more in the :ref:`User Guide <spectral_embedding>`.
Parameters
----------
adjacency : array-like or sparse matrix, shape: (n_samples, n_samples)
The adjacency matrix of the graph to embed.
n_components : integer, optional, default 8
The dimension of the projection subspace.
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}, default None
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization of the
lobpcg eigenvectors decomposition when eigen_solver == 'amg'.
By default, arpack is used.
eigen_tol : float, optional, default=0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
drop_first : bool, optional, default=True
Whether to drop the first eigenvector. For spectral embedding, this
should be True as the first eigenvector should be constant vector for
connected graph, but for spectral clustering, this should be kept as
False to retain the first eigenvector.
norm_laplacian : bool, optional, default=True
If True, then compute normalized Laplacian.
Returns
-------
embedding : array, shape=(n_samples, n_components)
The reduced samples.
Notes
-----
Spectral embedding is most useful when the graph has one connected
component. If there graph has many components, the first few eigenvectors
will simply uncover the connected components of the graph.
References
----------
* http://en.wikipedia.org/wiki/LOBPCG
* Toward the Optimal Preconditioned Eigensolver: Locally Optimal
Block Preconditioned Conjugate Gradient Method
Andrew V. Knyazev
http://dx.doi.org/10.1137%2FS1064827500366124
"""
adjacency = check_symmetric(adjacency)
try:
from pyamg import smoothed_aggregation_solver
except ImportError:
if eigen_solver == "amg":
raise ValueError("The eigen_solver was set to 'amg', but pyamg is "
"not available.")
if eigen_solver is None:
eigen_solver = 'arpack'
elif eigen_solver not in ('arpack', 'lobpcg', 'amg'):
raise ValueError("Unknown value for eigen_solver: '%s'."
"Should be 'amg', 'arpack', or 'lobpcg'"
% eigen_solver)
random_state = check_random_state(random_state)
n_nodes = adjacency.shape[0]
# Whether to drop the first eigenvector
if drop_first:
n_components = n_components + 1
if not _graph_is_connected(adjacency):
warnings.warn("Graph is not fully connected, spectral embedding"
" may not work as expected.")
laplacian, dd = graph_laplacian(adjacency,
normed=norm_laplacian, return_diag=True)
if (eigen_solver == 'arpack' or eigen_solver != 'lobpcg' and
(not sparse.isspmatrix(laplacian) or n_nodes < 5 * n_components)):
# lobpcg used with eigen_solver='amg' has bugs for low number of nodes
# for details see the source code in scipy:
# https://github.com/scipy/scipy/blob/v0.11.0/scipy/sparse/linalg/eigen
# /lobpcg/lobpcg.py#L237
# or matlab:
# http://www.mathworks.com/matlabcentral/fileexchange/48-lobpcg-m
laplacian = _set_diag(laplacian, 1, norm_laplacian)
# Here we'll use shift-invert mode for fast eigenvalues
# (see http://docs.scipy.org/doc/scipy/reference/tutorial/arpack.html
# for a short explanation of what this means)
# Because the normalized Laplacian has eigenvalues between 0 and 2,
# I - L has eigenvalues between -1 and 1. ARPACK is most efficient
# when finding eigenvalues of largest magnitude (keyword which='LM')
# and when these eigenvalues are very large compared to the rest.
# For very large, very sparse graphs, I - L can have many, many
# eigenvalues very near 1.0. This leads to slow convergence. So
# instead, we'll use ARPACK's shift-invert mode, asking for the
# eigenvalues near 1.0. This effectively spreads-out the spectrum
# near 1.0 and leads to much faster convergence: potentially an
# orders-of-magnitude speedup over simply using keyword which='LA'
# in standard mode.
try:
# We are computing the opposite of the laplacian inplace so as
# to spare a memory allocation of a possibly very large array
laplacian *= -1
v0 = random_state.uniform(-1, 1, laplacian.shape[0])
lambdas, diffusion_map = eigsh(laplacian, k=n_components,
sigma=1.0, which='LM',
tol=eigen_tol, v0=v0)
embedding = diffusion_map.T[n_components::-1] * dd
except RuntimeError:
# When submatrices are exactly singular, an LU decomposition
# in arpack fails. We fallback to lobpcg
eigen_solver = "lobpcg"
# Revert the laplacian to its opposite to have lobpcg work
laplacian *= -1
if eigen_solver == 'amg':
# Use AMG to get a preconditioner and speed up the eigenvalue
# problem.
if not sparse.issparse(laplacian):
warnings.warn("AMG works better for sparse matrices")
# lobpcg needs double precision floats
laplacian = check_array(laplacian, dtype=np.float64,
accept_sparse=True)
laplacian = _set_diag(laplacian, 1, norm_laplacian)
ml = smoothed_aggregation_solver(check_array(laplacian, 'csr'))
M = ml.aspreconditioner()
X = random_state.rand(laplacian.shape[0], n_components + 1)
X[:, 0] = dd.ravel()
lambdas, diffusion_map = lobpcg(laplacian, X, M=M, tol=1.e-12,
largest=False)
embedding = diffusion_map.T * dd
if embedding.shape[0] == 1:
raise ValueError
elif eigen_solver == "lobpcg":
# lobpcg needs double precision floats
laplacian = check_array(laplacian, dtype=np.float64,
accept_sparse=True)
if n_nodes < 5 * n_components + 1:
# see note above under arpack why lobpcg has problems with small
# number of nodes
# lobpcg will fallback to eigh, so we short circuit it
if sparse.isspmatrix(laplacian):
laplacian = laplacian.toarray()
lambdas, diffusion_map = eigh(laplacian)
embedding = diffusion_map.T[:n_components] * dd
else:
laplacian = _set_diag(laplacian, 1, norm_laplacian)
# We increase the number of eigenvectors requested, as lobpcg
# doesn't behave well in low dimension
X = random_state.rand(laplacian.shape[0], n_components + 1)
X[:, 0] = dd.ravel()
lambdas, diffusion_map = lobpcg(laplacian, X, tol=1e-15,
largest=False, maxiter=2000)
embedding = diffusion_map.T[:n_components] * dd
if embedding.shape[0] == 1:
raise ValueError
embedding = _deterministic_vector_sign_flip(embedding)
if drop_first:
return embedding[1:n_components].T
else:
return embedding[:n_components].T
class SpectralEmbedding(BaseEstimator):
"""Spectral embedding for non-linear dimensionality reduction.
Forms an affinity matrix given by the specified function and
applies spectral decomposition to the corresponding graph laplacian.
The resulting transformation is given by the value of the
eigenvectors for each data point.
Read more in the :ref:`User Guide <spectral_embedding>`.
Parameters
-----------
n_components : integer, default: 2
The dimension of the projected subspace.
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities.
random_state : int seed, RandomState instance, or None, default : None
A pseudo random number generator used for the initialization of the
lobpcg eigenvectors decomposition when eigen_solver == 'amg'.
affinity : string or callable, default : "nearest_neighbors"
How to construct the affinity matrix.
- 'nearest_neighbors' : construct affinity matrix by knn graph
- 'rbf' : construct affinity matrix by rbf kernel
- 'precomputed' : interpret X as precomputed affinity matrix
- callable : use passed in function as affinity
the function takes in data matrix (n_samples, n_features)
and return affinity matrix (n_samples, n_samples).
gamma : float, optional, default : 1/n_features
Kernel coefficient for rbf kernel.
n_neighbors : int, default : max(n_samples/10 , 1)
Number of nearest neighbors for nearest_neighbors graph building.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
embedding_ : array, shape = (n_samples, n_components)
Spectral embedding of the training matrix.
affinity_matrix_ : array, shape = (n_samples, n_samples)
Affinity_matrix constructed from samples or precomputed.
References
----------
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- On Spectral Clustering: Analysis and an algorithm, 2011
Andrew Y. Ng, Michael I. Jordan, Yair Weiss
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.19.8100
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
"""
def __init__(self, n_components=2, affinity="nearest_neighbors",
gamma=None, random_state=None, eigen_solver=None,
n_neighbors=None, n_jobs=1):
self.n_components = n_components
self.affinity = affinity
self.gamma = gamma
self.random_state = random_state
self.eigen_solver = eigen_solver
self.n_neighbors = n_neighbors
self.n_jobs = n_jobs
@property
def _pairwise(self):
return self.affinity == "precomputed"
def _get_affinity_matrix(self, X, Y=None):
"""Calculate the affinity matrix from data
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples
and n_features is the number of features.
If affinity is "precomputed"
X : array-like, shape (n_samples, n_samples),
Interpret X as precomputed adjacency graph computed from
samples.
Returns
-------
affinity_matrix, shape (n_samples, n_samples)
"""
if self.affinity == 'precomputed':
self.affinity_matrix_ = X
return self.affinity_matrix_
if self.affinity == 'nearest_neighbors':
if sparse.issparse(X):
warnings.warn("Nearest neighbors affinity currently does "
"not support sparse input, falling back to "
"rbf affinity")
self.affinity = "rbf"
else:
self.n_neighbors_ = (self.n_neighbors
if self.n_neighbors is not None
else max(int(X.shape[0] / 10), 1))
self.affinity_matrix_ = kneighbors_graph(X, self.n_neighbors_,
include_self=True,
n_jobs=self.n_jobs)
# currently only symmetric affinity_matrix supported
self.affinity_matrix_ = 0.5 * (self.affinity_matrix_ +
self.affinity_matrix_.T)
return self.affinity_matrix_
if self.affinity == 'rbf':
self.gamma_ = (self.gamma
if self.gamma is not None else 1.0 / X.shape[1])
self.affinity_matrix_ = rbf_kernel(X, gamma=self.gamma_)
return self.affinity_matrix_
self.affinity_matrix_ = self.affinity(X)
return self.affinity_matrix_
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples
and n_features is the number of features.
If affinity is "precomputed"
X : array-like, shape (n_samples, n_samples),
Interpret X as precomputed adjacency graph computed from
samples.
Returns
-------
self : object
Returns the instance itself.
"""
X = check_array(X, ensure_min_samples=2, estimator=self)
random_state = check_random_state(self.random_state)
if isinstance(self.affinity, six.string_types):
if self.affinity not in set(("nearest_neighbors", "rbf",
"precomputed")):
raise ValueError(("%s is not a valid affinity. Expected "
"'precomputed', 'rbf', 'nearest_neighbors' "
"or a callable.") % self.affinity)
elif not callable(self.affinity):
raise ValueError(("'affinity' is expected to be an an affinity "
"name or a callable. Got: %s") % self.affinity)
affinity_matrix = self._get_affinity_matrix(X)
self.embedding_ = spectral_embedding(affinity_matrix,
n_components=self.n_components,
eigen_solver=self.eigen_solver,
random_state=random_state)
return self
def fit_transform(self, X, y=None):
"""Fit the model from data in X and transform X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples
and n_features is the number of features.
If affinity is "precomputed"
X : array-like, shape (n_samples, n_samples),
Interpret X as precomputed adjacency graph computed from
samples.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self.fit(X)
return self.embedding_
| bsd-3-clause | 1928c5bc3abe2dd20ac38425d6de0348 | 39.776908 | 79 | 0.613428 | 4.270752 | false | false | false | false |
kjung/scikit-learn | sklearn/preprocessing/imputation.py | 28 | 14119 | # Authors: Nicolas Tresegnie <nicolas.tresegnie@gmail.com>
# License: BSD 3 clause
import warnings
import numpy as np
import numpy.ma as ma
from scipy import sparse
from scipy import stats
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array
from ..utils.fixes import astype
from ..utils.sparsefuncs import _get_median
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
from ..externals import six
zip = six.moves.zip
map = six.moves.map
__all__ = [
'Imputer',
]
def _get_mask(X, value_to_mask):
"""Compute the boolean mask X == missing_values."""
if value_to_mask == "NaN" or np.isnan(value_to_mask):
return np.isnan(X)
else:
return X == value_to_mask
def _most_frequent(array, extra_value, n_repeat):
"""Compute the most frequent value in a 1d array extended with
[extra_value] * n_repeat, where extra_value is assumed to be not part
of the array."""
# Compute the most frequent value in array only
if array.size > 0:
mode = stats.mode(array)
most_frequent_value = mode[0][0]
most_frequent_count = mode[1][0]
else:
most_frequent_value = 0
most_frequent_count = 0
# Compare to array + [extra_value] * n_repeat
if most_frequent_count == 0 and n_repeat == 0:
return np.nan
elif most_frequent_count < n_repeat:
return extra_value
elif most_frequent_count > n_repeat:
return most_frequent_value
elif most_frequent_count == n_repeat:
# Ties the breaks. Copy the behaviour of scipy.stats.mode
if most_frequent_value < extra_value:
return most_frequent_value
else:
return extra_value
class Imputer(BaseEstimator, TransformerMixin):
"""Imputation transformer for completing missing values.
Read more in the :ref:`User Guide <imputation>`.
Parameters
----------
missing_values : integer or "NaN", optional (default="NaN")
The placeholder for the missing values. All occurrences of
`missing_values` will be imputed. For missing values encoded as np.nan,
use the string value "NaN".
strategy : string, optional (default="mean")
The imputation strategy.
- If "mean", then replace missing values using the mean along
the axis.
- If "median", then replace missing values using the median along
the axis.
- If "most_frequent", then replace missing using the most frequent
value along the axis.
axis : integer, optional (default=0)
The axis along which to impute.
- If `axis=0`, then impute along columns.
- If `axis=1`, then impute along rows.
verbose : integer, optional (default=0)
Controls the verbosity of the imputer.
copy : boolean, optional (default=True)
If True, a copy of X will be created. If False, imputation will
be done in-place whenever possible. Note that, in the following cases,
a new copy will always be made, even if `copy=False`:
- If X is not an array of floating values;
- If X is sparse and `missing_values=0`;
- If `axis=0` and X is encoded as a CSR matrix;
- If `axis=1` and X is encoded as a CSC matrix.
Attributes
----------
statistics_ : array of shape (n_features,)
The imputation fill value for each feature if axis == 0.
Notes
-----
- When ``axis=0``, columns which only contained missing values at `fit`
are discarded upon `transform`.
- When ``axis=1``, an exception is raised if there are rows for which it is
not possible to fill in the missing values (e.g., because they only
contain missing values).
"""
def __init__(self, missing_values="NaN", strategy="mean",
axis=0, verbose=0, copy=True):
self.missing_values = missing_values
self.strategy = strategy
self.axis = axis
self.verbose = verbose
self.copy = copy
def fit(self, X, y=None):
"""Fit the imputer on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data, where ``n_samples`` is the number of samples and
``n_features`` is the number of features.
Returns
-------
self : object
Returns self.
"""
# Check parameters
allowed_strategies = ["mean", "median", "most_frequent"]
if self.strategy not in allowed_strategies:
raise ValueError("Can only use these strategies: {0} "
" got strategy={1}".format(allowed_strategies,
self.strategy))
if self.axis not in [0, 1]:
raise ValueError("Can only impute missing values on axis 0 and 1, "
" got axis={0}".format(self.axis))
# Since two different arrays can be provided in fit(X) and
# transform(X), the imputation data will be computed in transform()
# when the imputation is done per sample (i.e., when axis=1).
if self.axis == 0:
X = check_array(X, accept_sparse='csc', dtype=np.float64,
force_all_finite=False)
if sparse.issparse(X):
self.statistics_ = self._sparse_fit(X,
self.strategy,
self.missing_values,
self.axis)
else:
self.statistics_ = self._dense_fit(X,
self.strategy,
self.missing_values,
self.axis)
return self
def _sparse_fit(self, X, strategy, missing_values, axis):
"""Fit the transformer on sparse data."""
# Imputation is done "by column", so if we want to do it
# by row we only need to convert the matrix to csr format.
if axis == 1:
X = X.tocsr()
else:
X = X.tocsc()
# Count the zeros
if missing_values == 0:
n_zeros_axis = np.zeros(X.shape[not axis], dtype=int)
else:
n_zeros_axis = X.shape[axis] - np.diff(X.indptr)
# Mean
if strategy == "mean":
if missing_values != 0:
n_non_missing = n_zeros_axis
# Mask the missing elements
mask_missing_values = _get_mask(X.data, missing_values)
mask_valids = np.logical_not(mask_missing_values)
# Sum only the valid elements
new_data = X.data.copy()
new_data[mask_missing_values] = 0
X = sparse.csc_matrix((new_data, X.indices, X.indptr),
copy=False)
sums = X.sum(axis=0)
# Count the elements != 0
mask_non_zeros = sparse.csc_matrix(
(mask_valids.astype(np.float64),
X.indices,
X.indptr), copy=False)
s = mask_non_zeros.sum(axis=0)
n_non_missing = np.add(n_non_missing, s)
else:
sums = X.sum(axis=axis)
n_non_missing = np.diff(X.indptr)
# Ignore the error, columns with a np.nan statistics_
# are not an error at this point. These columns will
# be removed in transform
with np.errstate(all="ignore"):
return np.ravel(sums) / np.ravel(n_non_missing)
# Median + Most frequent
else:
# Remove the missing values, for each column
columns_all = np.hsplit(X.data, X.indptr[1:-1])
mask_missing_values = _get_mask(X.data, missing_values)
mask_valids = np.hsplit(np.logical_not(mask_missing_values),
X.indptr[1:-1])
# astype necessary for bug in numpy.hsplit before v1.9
columns = [col[astype(mask, bool, copy=False)]
for col, mask in zip(columns_all, mask_valids)]
# Median
if strategy == "median":
median = np.empty(len(columns))
for i, column in enumerate(columns):
median[i] = _get_median(column, n_zeros_axis[i])
return median
# Most frequent
elif strategy == "most_frequent":
most_frequent = np.empty(len(columns))
for i, column in enumerate(columns):
most_frequent[i] = _most_frequent(column,
0,
n_zeros_axis[i])
return most_frequent
def _dense_fit(self, X, strategy, missing_values, axis):
"""Fit the transformer on dense data."""
X = check_array(X, force_all_finite=False)
mask = _get_mask(X, missing_values)
masked_X = ma.masked_array(X, mask=mask)
# Mean
if strategy == "mean":
mean_masked = np.ma.mean(masked_X, axis=axis)
# Avoid the warning "Warning: converting a masked element to nan."
mean = np.ma.getdata(mean_masked)
mean[np.ma.getmask(mean_masked)] = np.nan
return mean
# Median
elif strategy == "median":
if tuple(int(v) for v in np.__version__.split('.')[:2]) < (1, 5):
# In old versions of numpy, calling a median on an array
# containing nans returns nan. This is different is
# recent versions of numpy, which we want to mimic
masked_X.mask = np.logical_or(masked_X.mask,
np.isnan(X))
median_masked = np.ma.median(masked_X, axis=axis)
# Avoid the warning "Warning: converting a masked element to nan."
median = np.ma.getdata(median_masked)
median[np.ma.getmaskarray(median_masked)] = np.nan
return median
# Most frequent
elif strategy == "most_frequent":
# scipy.stats.mstats.mode cannot be used because it will no work
# properly if the first element is masked and if it's frequency
# is equal to the frequency of the most frequent valid element
# See https://github.com/scipy/scipy/issues/2636
# To be able access the elements by columns
if axis == 0:
X = X.transpose()
mask = mask.transpose()
most_frequent = np.empty(X.shape[0])
for i, (row, row_mask) in enumerate(zip(X[:], mask[:])):
row_mask = np.logical_not(row_mask).astype(np.bool)
row = row[row_mask]
most_frequent[i] = _most_frequent(row, np.nan, 0)
return most_frequent
def transform(self, X):
"""Impute all missing values in X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
The input data to complete.
"""
if self.axis == 0:
check_is_fitted(self, 'statistics_')
# Since two different arrays can be provided in fit(X) and
# transform(X), the imputation data need to be recomputed
# when the imputation is done per sample
if self.axis == 1:
X = check_array(X, accept_sparse='csr', dtype=FLOAT_DTYPES,
force_all_finite=False, copy=self.copy)
if sparse.issparse(X):
statistics = self._sparse_fit(X,
self.strategy,
self.missing_values,
self.axis)
else:
statistics = self._dense_fit(X,
self.strategy,
self.missing_values,
self.axis)
else:
X = check_array(X, accept_sparse='csc', dtype=FLOAT_DTYPES,
force_all_finite=False, copy=self.copy)
statistics = self.statistics_
# Delete the invalid rows/columns
invalid_mask = np.isnan(statistics)
valid_mask = np.logical_not(invalid_mask)
valid_statistics = statistics[valid_mask]
valid_statistics_indexes = np.where(valid_mask)[0]
missing = np.arange(X.shape[not self.axis])[invalid_mask]
if self.axis == 0 and invalid_mask.any():
if self.verbose:
warnings.warn("Deleting features without "
"observed values: %s" % missing)
X = X[:, valid_statistics_indexes]
elif self.axis == 1 and invalid_mask.any():
raise ValueError("Some rows only contain "
"missing values: %s" % missing)
# Do actual imputation
if sparse.issparse(X) and self.missing_values != 0:
mask = _get_mask(X.data, self.missing_values)
indexes = np.repeat(np.arange(len(X.indptr) - 1, dtype=np.int),
np.diff(X.indptr))[mask]
X.data[mask] = astype(valid_statistics[indexes], X.dtype,
copy=False)
else:
if sparse.issparse(X):
X = X.toarray()
mask = _get_mask(X, self.missing_values)
n_missing = np.sum(mask, axis=self.axis)
values = np.repeat(valid_statistics, n_missing)
if self.axis == 0:
coordinates = np.where(mask.transpose())[::-1]
else:
coordinates = mask
X[coordinates] = values
return X
| bsd-3-clause | f3c4011d6f6f65c87e5bb9e0290d4fe5 | 36.650667 | 79 | 0.531907 | 4.275893 | false | false | false | false |
kjung/scikit-learn | sklearn/multiclass.py | 13 | 27844 | """
Multiclass and multilabel classification strategies
===================================================
This module implements multiclass learning algorithms:
- one-vs-the-rest / one-vs-all
- one-vs-one
- error correcting output codes
The estimators provided in this module are meta-estimators: they require a base
estimator to be provided in their constructor. For example, it is possible to
use these estimators to turn a binary classifier or a regressor into a
multiclass classifier. It is also possible to use these estimators with
multiclass estimators in the hope that their accuracy or runtime performance
improves.
All classifiers in scikit-learn implement multiclass classification; you
only need to use this module if you want to experiment with custom multiclass
strategies.
The one-vs-the-rest meta-classifier also implements a `predict_proba` method,
so long as such a method is implemented by the base classifier. This method
returns probabilities of class membership in both the single label and
multilabel case. Note that in the multilabel case, probabilities are the
marginal probability that a given sample falls in the given class. As such, in
the multilabel case the sum of these probabilities over all possible labels
for a given sample *will not* sum to unity, as they do in the single label
case.
"""
# Author: Mathieu Blondel <mathieu@mblondel.org>
# Author: Hamzeh Alsalhi <93hamsal@gmail.com>
#
# License: BSD 3 clause
import array
import numpy as np
import warnings
import scipy.sparse as sp
from .base import BaseEstimator, ClassifierMixin, clone, is_classifier
from .base import MetaEstimatorMixin, is_regressor
from .preprocessing import LabelBinarizer
from .metrics.pairwise import euclidean_distances
from .utils import check_random_state
from .utils.validation import _num_samples
from .utils.validation import check_consistent_length
from .utils.validation import check_is_fitted
from .utils.multiclass import (_check_partial_fit_first_call,
check_classification_targets)
from .externals.joblib import Parallel
from .externals.joblib import delayed
from .externals.six.moves import zip as izip
__all__ = [
"OneVsRestClassifier",
"OneVsOneClassifier",
"OutputCodeClassifier",
]
def _fit_binary(estimator, X, y, classes=None):
"""Fit a single binary estimator."""
unique_y = np.unique(y)
if len(unique_y) == 1:
if classes is not None:
if y[0] == -1:
c = 0
else:
c = y[0]
warnings.warn("Label %s is present in all training examples." %
str(classes[c]))
estimator = _ConstantPredictor().fit(X, unique_y)
else:
estimator = clone(estimator)
estimator.fit(X, y)
return estimator
def _partial_fit_binary(estimator, X, y):
"""Partially fit a single binary estimator."""
estimator.partial_fit(X, y, np.array((0, 1)))
return estimator
def _predict_binary(estimator, X):
"""Make predictions using a single binary estimator."""
if is_regressor(estimator):
return estimator.predict(X)
try:
score = np.ravel(estimator.decision_function(X))
except (AttributeError, NotImplementedError):
# probabilities of the positive class
score = estimator.predict_proba(X)[:, 1]
return score
def _check_estimator(estimator):
"""Make sure that an estimator implements the necessary methods."""
if (not hasattr(estimator, "decision_function") and
not hasattr(estimator, "predict_proba")):
raise ValueError("The base estimator should implement "
"decision_function or predict_proba!")
class _ConstantPredictor(BaseEstimator):
def fit(self, X, y):
self.y_ = y
return self
def predict(self, X):
check_is_fitted(self, 'y_')
return np.repeat(self.y_, X.shape[0])
def decision_function(self, X):
check_is_fitted(self, 'y_')
return np.repeat(self.y_, X.shape[0])
def predict_proba(self, X):
check_is_fitted(self, 'y_')
return np.repeat([np.hstack([1 - self.y_, self.y_])],
X.shape[0], axis=0)
class OneVsRestClassifier(BaseEstimator, ClassifierMixin, MetaEstimatorMixin):
"""One-vs-the-rest (OvR) multiclass/multilabel strategy
Also known as one-vs-all, this strategy consists in fitting one classifier
per class. For each classifier, the class is fitted against all the other
classes. In addition to its computational efficiency (only `n_classes`
classifiers are needed), one advantage of this approach is its
interpretability. Since each class is represented by one and one classifier
only, it is possible to gain knowledge about the class by inspecting its
corresponding classifier. This is the most commonly used strategy for
multiclass classification and is a fair default choice.
This strategy can also be used for multilabel learning, where a classifier
is used to predict multiple labels for instance, by fitting on a 2-d matrix
in which cell [i, j] is 1 if sample i has label j and 0 otherwise.
In the multilabel learning literature, OvR is also known as the binary
relevance method.
Read more in the :ref:`User Guide <ovr_classification>`.
Parameters
----------
estimator : estimator object
An estimator object implementing `fit` and one of `decision_function`
or `predict_proba`.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is
useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are
used. Thus for n_jobs = -2, all CPUs but one are used.
Attributes
----------
estimators_ : list of `n_classes` estimators
Estimators used for predictions.
classes_ : array, shape = [`n_classes`]
Class labels.
label_binarizer_ : LabelBinarizer object
Object used to transform multiclass labels to binary labels and
vice-versa.
multilabel_ : boolean
Whether a OneVsRestClassifier is a multilabel classifier.
"""
def __init__(self, estimator, n_jobs=1):
self.estimator = estimator
self.n_jobs = n_jobs
def fit(self, X, y):
"""Fit underlying estimators.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
y : (sparse) array-like, shape = [n_samples, ], [n_samples, n_classes]
Multi-class targets. An indicator matrix turns on multilabel
classification.
Returns
-------
self
"""
# A sparse LabelBinarizer, with sparse_output=True, has been shown to
# outpreform or match a dense label binarizer in all cases and has also
# resulted in less or equal memory consumption in the fit_ovr function
# overall.
self.label_binarizer_ = LabelBinarizer(sparse_output=True)
Y = self.label_binarizer_.fit_transform(y)
Y = Y.tocsc()
self.classes_ = self.label_binarizer_.classes_
columns = (col.toarray().ravel() for col in Y.T)
# In cases where individual estimators are very fast to train setting
# n_jobs > 1 in can results in slower performance due to the overhead
# of spawning threads. See joblib issue #112.
self.estimators_ = Parallel(n_jobs=self.n_jobs)(delayed(_fit_binary)(
self.estimator, X, column, classes=[
"not %s" % self.label_binarizer_.classes_[i],
self.label_binarizer_.classes_[i]])
for i, column in enumerate(columns))
return self
def partial_fit(self, X, y, classes=None):
"""Partially fit underlying estimators
Should be used when memory is inefficient to train all data.
Chunks of data can be passed in several iteration.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
y : (sparse) array-like, shape = [n_samples, ], [n_samples, n_classes]
Multi-class targets. An indicator matrix turns on multilabel
classification.
classes : array, shape (n_classes, )
Classes across all calls to partial_fit.
Can be obtained via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is only required in the first call of partial_fit
and can be omitted in the subsequent calls.
Returns
-------
self
"""
if _check_partial_fit_first_call(self, classes):
if (not hasattr(self.estimator, "partial_fit")):
raise ValueError("Base estimator {0}, doesn't have partial_fit"
"method".format(estimator))
self.estimators_ = [clone(self.estimator) for _ in range
(self.n_classes_)]
# A sparse LabelBinarizer, with sparse_output=True, has been shown to
# outperform or match a dense label binarizer in all cases and has also
# resulted in less or equal memory consumption in the fit_ovr function
# overall.
self.label_binarizer_ = LabelBinarizer(sparse_output=True)
Y = self.label_binarizer_.fit_transform(y)
Y = Y.tocsc()
columns = (col.toarray().ravel() for col in Y.T)
self.estimators_ = Parallel(n_jobs=self.n_jobs)(delayed(
_partial_fit_binary)(self.estimators_[i],
X, next(columns) if self.classes_[i] in
self.label_binarizer_.classes_ else
np.zeros((1, len(y))))
for i in range(self.n_classes_))
return self
def predict(self, X):
"""Predict multi-class targets using underlying estimators.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
Returns
-------
y : (sparse) array-like, shape = [n_samples, ], [n_samples, n_classes].
Predicted multi-class targets.
"""
check_is_fitted(self, 'estimators_')
if (hasattr(self.estimators_[0], "decision_function") and
is_classifier(self.estimators_[0])):
thresh = 0
else:
thresh = .5
n_samples = _num_samples(X)
if self.label_binarizer_.y_type_ == "multiclass":
maxima = np.empty(n_samples, dtype=float)
maxima.fill(-np.inf)
argmaxima = np.zeros(n_samples, dtype=int)
for i, e in enumerate(self.estimators_):
pred = _predict_binary(e, X)
np.maximum(maxima, pred, out=maxima)
argmaxima[maxima == pred] = i
return self.classes_[np.array(argmaxima.T)]
else:
indices = array.array('i')
indptr = array.array('i', [0])
for e in self.estimators_:
indices.extend(np.where(_predict_binary(e, X) > thresh)[0])
indptr.append(len(indices))
data = np.ones(len(indices), dtype=int)
indicator = sp.csc_matrix((data, indices, indptr),
shape=(n_samples, len(self.estimators_)))
return self.label_binarizer_.inverse_transform(indicator)
def predict_proba(self, X):
"""Probability estimates.
The returned estimates for all classes are ordered by label of classes.
Note that in the multilabel case, each sample can have any number of
labels. This returns the marginal probability that the given sample has
the label in question. For example, it is entirely consistent that two
labels both have a 90% probability of applying to a given sample.
In the single label multiclass case, the rows of the returned matrix
sum to 1.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : (sparse) array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
"""
check_is_fitted(self, 'estimators_')
# Y[i, j] gives the probability that sample i has the label j.
# In the multi-label case, these are not disjoint.
Y = np.array([e.predict_proba(X)[:, 1] for e in self.estimators_]).T
if len(self.estimators_) == 1:
# Only one estimator, but we still want to return probabilities
# for two classes.
Y = np.concatenate(((1 - Y), Y), axis=1)
if not self.multilabel_:
# Then, probabilities should be normalized to 1.
Y /= np.sum(Y, axis=1)[:, np.newaxis]
return Y
def decision_function(self, X):
"""Returns the distance of each sample from the decision boundary for
each class. This can only be used with estimators which implement the
decision_function method.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
"""
check_is_fitted(self, 'estimators_')
if not hasattr(self.estimators_[0], "decision_function"):
raise AttributeError(
"Base estimator doesn't have a decision_function attribute.")
return np.array([est.decision_function(X).ravel()
for est in self.estimators_]).T
@property
def multilabel_(self):
"""Whether this is a multilabel classifier"""
return self.label_binarizer_.y_type_.startswith('multilabel')
@property
def n_classes_(self):
return len(self.classes_)
@property
def coef_(self):
check_is_fitted(self, 'estimators_')
if not hasattr(self.estimators_[0], "coef_"):
raise AttributeError(
"Base estimator doesn't have a coef_ attribute.")
coefs = [e.coef_ for e in self.estimators_]
if sp.issparse(coefs[0]):
return sp.vstack(coefs)
return np.vstack(coefs)
@property
def intercept_(self):
check_is_fitted(self, 'estimators_')
if not hasattr(self.estimators_[0], "intercept_"):
raise AttributeError(
"Base estimator doesn't have an intercept_ attribute.")
return np.array([e.intercept_.ravel() for e in self.estimators_])
def _fit_ovo_binary(estimator, X, y, i, j):
"""Fit a single binary estimator (one-vs-one)."""
cond = np.logical_or(y == i, y == j)
y = y[cond]
y_binary = np.empty(y.shape, np.int)
y_binary[y == i] = 0
y_binary[y == j] = 1
ind = np.arange(X.shape[0])
return _fit_binary(estimator, X[ind[cond]], y_binary, classes=[i, j])
def _partial_fit_ovo_binary(estimator, X, y, i, j):
"""Partially fit a single binary estimator(one-vs-one)."""
cond = np.logical_or(y == i, y == j)
y = y[cond]
y_binary = np.zeros_like(y)
y_binary[y == j] = 1
ind = np.arange(X.shape[0])
return _partial_fit_binary(estimator, X[cond], y_binary)
class OneVsOneClassifier(BaseEstimator, ClassifierMixin, MetaEstimatorMixin):
"""One-vs-one multiclass strategy
This strategy consists in fitting one classifier per class pair.
At prediction time, the class which received the most votes is selected.
Since it requires to fit `n_classes * (n_classes - 1) / 2` classifiers,
this method is usually slower than one-vs-the-rest, due to its
O(n_classes^2) complexity. However, this method may be advantageous for
algorithms such as kernel algorithms which don't scale well with
`n_samples`. This is because each individual learning problem only involves
a small subset of the data whereas, with one-vs-the-rest, the complete
dataset is used `n_classes` times.
Read more in the :ref:`User Guide <ovo_classification>`.
Parameters
----------
estimator : estimator object
An estimator object implementing `fit` and one of `decision_function`
or `predict_proba`.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is
useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are
used. Thus for n_jobs = -2, all CPUs but one are used.
Attributes
----------
estimators_ : list of `n_classes * (n_classes - 1) / 2` estimators
Estimators used for predictions.
classes_ : numpy array of shape [n_classes]
Array containing labels.
"""
def __init__(self, estimator, n_jobs=1):
self.estimator = estimator
self.n_jobs = n_jobs
def fit(self, X, y):
"""Fit underlying estimators.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
y : array-like, shape = [n_samples]
Multi-class targets.
Returns
-------
self
"""
y = np.asarray(y)
check_consistent_length(X, y)
self.classes_ = np.unique(y)
n_classes = self.classes_.shape[0]
self.estimators_ = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_ovo_binary)(
self.estimator, X, y, self.classes_[i], self.classes_[j])
for i in range(n_classes) for j in range(i + 1, n_classes))
return self
def partial_fit(self, X, y, classes=None):
"""Partially fit underlying estimators
Should be used when memory is inefficient to train all data. Chunks
of data can be passed in several iteration, where the first call
should have an array of all target variables.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
y : array-like, shape = [n_samples]
Multi-class targets.
classes : array, shape (n_classes, )
Classes across all calls to partial_fit.
Can be obtained via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is only required in the first call of partial_fit
and can be omitted in the subsequent calls.
Returns
-------
self
"""
if _check_partial_fit_first_call(self, classes):
self.estimators_ = [clone(self.estimator) for i in
range(self.n_classes_ *
(self.n_classes_-1) // 2)]
y = np.asarray(y)
check_consistent_length(X, y)
check_classification_targets(y)
self.estimators_ = Parallel(n_jobs=self.n_jobs)(
delayed(_partial_fit_ovo_binary)(
estimator, X, y, self.classes_[i], self.classes_[j])
for estimator, (i, j) in izip(self.estimators_, ((i, j) for i
in range(self.n_classes_) for j in range
(i + 1, self.n_classes_))))
return self
def predict(self, X):
"""Estimate the best class label for each sample in X.
This is implemented as ``argmax(decision_function(X), axis=1)`` which
will return the label of the class with most votes by estimators
predicting the outcome of a decision for each possible class pair.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
Returns
-------
y : numpy array of shape [n_samples]
Predicted multi-class targets.
"""
Y = self.decision_function(X)
return self.classes_[Y.argmax(axis=1)]
def decision_function(self, X):
"""Decision function for the OneVsOneClassifier.
The decision values for the samples are computed by adding the
normalized sum of pair-wise classification confidence levels to the
votes in order to disambiguate between the decision values when the
votes for all the classes are equal leading to a tie.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
Y : array-like, shape = [n_samples, n_classes]
"""
check_is_fitted(self, 'estimators_')
predictions = np.vstack([est.predict(X) for est in self.estimators_]).T
confidences = np.vstack([_predict_binary(est, X) for est in self.estimators_]).T
return _ovr_decision_function(predictions, confidences,
len(self.classes_))
@property
def n_classes_(self):
return len(self.classes_)
def _ovr_decision_function(predictions, confidences, n_classes):
"""Compute a continuous, tie-breaking ovr decision function.
It is important to include a continuous value, not only votes,
to make computing AUC or calibration meaningful.
Parameters
----------
predictions : array-like, shape (n_samples, n_classifiers)
Predicted classes for each binary classifier.
confidences : array-like, shape (n_samples, n_classifiers)
Decision functions or predicted probabilities for positive class
for each binary classifier.
n_classes : int
Number of classes. n_classifiers must be
``n_classes * (n_classes - 1 ) / 2``
"""
n_samples = predictions.shape[0]
votes = np.zeros((n_samples, n_classes))
sum_of_confidences = np.zeros((n_samples, n_classes))
k = 0
for i in range(n_classes):
for j in range(i + 1, n_classes):
sum_of_confidences[:, i] -= confidences[:, k]
sum_of_confidences[:, j] += confidences[:, k]
votes[predictions[:, k] == 0, i] += 1
votes[predictions[:, k] == 1, j] += 1
k += 1
max_confidences = sum_of_confidences.max()
min_confidences = sum_of_confidences.min()
if max_confidences == min_confidences:
return votes
# Scale the sum_of_confidences to (-0.5, 0.5) and add it with votes.
# The motivation is to use confidence levels as a way to break ties in
# the votes without switching any decision made based on a difference
# of 1 vote.
eps = np.finfo(sum_of_confidences.dtype).eps
max_abs_confidence = max(abs(max_confidences), abs(min_confidences))
scale = (0.5 - eps) / max_abs_confidence
return votes + sum_of_confidences * scale
class OutputCodeClassifier(BaseEstimator, ClassifierMixin, MetaEstimatorMixin):
"""(Error-Correcting) Output-Code multiclass strategy
Output-code based strategies consist in representing each class with a
binary code (an array of 0s and 1s). At fitting time, one binary
classifier per bit in the code book is fitted. At prediction time, the
classifiers are used to project new points in the class space and the class
closest to the points is chosen. The main advantage of these strategies is
that the number of classifiers used can be controlled by the user, either
for compressing the model (0 < code_size < 1) or for making the model more
robust to errors (code_size > 1). See the documentation for more details.
Read more in the :ref:`User Guide <ecoc>`.
Parameters
----------
estimator : estimator object
An estimator object implementing `fit` and one of `decision_function`
or `predict_proba`.
code_size : float
Percentage of the number of classes to be used to create the code book.
A number between 0 and 1 will require fewer classifiers than
one-vs-the-rest. A number greater than 1 will require more classifiers
than one-vs-the-rest.
random_state : numpy.RandomState, optional
The generator used to initialize the codebook. Defaults to
numpy.random.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is
useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are
used. Thus for n_jobs = -2, all CPUs but one are used.
Attributes
----------
estimators_ : list of `int(n_classes * code_size)` estimators
Estimators used for predictions.
classes_ : numpy array of shape [n_classes]
Array containing labels.
code_book_ : numpy array of shape [n_classes, code_size]
Binary array containing the code of each class.
References
----------
.. [1] "Solving multiclass learning problems via error-correcting output
codes",
Dietterich T., Bakiri G.,
Journal of Artificial Intelligence Research 2,
1995.
.. [2] "The error coding method and PICTs",
James G., Hastie T.,
Journal of Computational and Graphical statistics 7,
1998.
.. [3] "The Elements of Statistical Learning",
Hastie T., Tibshirani R., Friedman J., page 606 (second-edition)
2008.
"""
def __init__(self, estimator, code_size=1.5, random_state=None, n_jobs=1):
self.estimator = estimator
self.code_size = code_size
self.random_state = random_state
self.n_jobs = n_jobs
def fit(self, X, y):
"""Fit underlying estimators.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
y : numpy array of shape [n_samples]
Multi-class targets.
Returns
-------
self
"""
if self.code_size <= 0:
raise ValueError("code_size should be greater than 0, got {1}"
"".format(self.code_size))
_check_estimator(self.estimator)
random_state = check_random_state(self.random_state)
self.classes_ = np.unique(y)
n_classes = self.classes_.shape[0]
code_size_ = int(n_classes * self.code_size)
# FIXME: there are more elaborate methods than generating the codebook
# randomly.
self.code_book_ = random_state.random_sample((n_classes, code_size_))
self.code_book_[self.code_book_ > 0.5] = 1
if hasattr(self.estimator, "decision_function"):
self.code_book_[self.code_book_ != 1] = -1
else:
self.code_book_[self.code_book_ != 1] = 0
classes_index = dict((c, i) for i, c in enumerate(self.classes_))
Y = np.array([self.code_book_[classes_index[y[i]]]
for i in range(X.shape[0])], dtype=np.int)
self.estimators_ = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_binary)(self.estimator, X, Y[:, i])
for i in range(Y.shape[1]))
return self
def predict(self, X):
"""Predict multi-class targets using underlying estimators.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
Returns
-------
y : numpy array of shape [n_samples]
Predicted multi-class targets.
"""
check_is_fitted(self, 'estimators_')
Y = np.array([_predict_binary(e, X) for e in self.estimators_]).T
pred = euclidean_distances(Y, self.code_book_).argmin(axis=1)
return self.classes_[pred]
| bsd-3-clause | bf45709c2d3504e0253ad24d56dd9144 | 35.733509 | 88 | 0.612592 | 4.066005 | false | false | false | false |
kjung/scikit-learn | sklearn/metrics/ranking.py | 4 | 27716 | """Metrics to assess performance on classification task given scores
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Arnaud Joly <a.joly@ulg.ac.be>
# Jochen Wersdorfer <jochen@wersdoerfer.de>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Joel Nothman <joel.nothman@gmail.com>
# Noel Dawe <noel@dawe.me>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import csr_matrix
from ..utils import check_consistent_length
from ..utils import column_or_1d, check_array
from ..utils.multiclass import type_of_target
from ..utils.fixes import isclose
from ..utils.fixes import bincount
from ..utils.fixes import array_equal
from ..utils.stats import rankdata
from ..utils.sparsefuncs import count_nonzero
from ..exceptions import UndefinedMetricWarning
from .base import _average_binary_score
def auc(x, y, reorder=False):
"""Compute Area Under the Curve (AUC) using the trapezoidal rule
This is a general function, given points on a curve. For computing the
area under the ROC-curve, see :func:`roc_auc_score`.
Parameters
----------
x : array, shape = [n]
x coordinates.
y : array, shape = [n]
y coordinates.
reorder : boolean, optional (default=False)
If True, assume that the curve is ascending in the case of ties, as for
an ROC curve. If the curve is non-ascending, the result will be wrong.
Returns
-------
auc : float
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> pred = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2)
>>> metrics.auc(fpr, tpr)
0.75
See also
--------
roc_auc_score : Computes the area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
"""
check_consistent_length(x, y)
x = column_or_1d(x)
y = column_or_1d(y)
if x.shape[0] < 2:
raise ValueError('At least 2 points are needed to compute'
' area under curve, but x.shape = %s' % x.shape)
direction = 1
if reorder:
# reorder the data points according to the x axis and using y to
# break ties
order = np.lexsort((y, x))
x, y = x[order], y[order]
else:
dx = np.diff(x)
if np.any(dx < 0):
if np.all(dx <= 0):
direction = -1
else:
raise ValueError("Reordering is not turned on, and "
"the x array is not increasing: %s" % x)
area = direction * np.trapz(y, x)
if isinstance(area, np.memmap):
# Reductions such as .sum used internally in np.trapz do not return a
# scalar by default for numpy.memmap instances contrary to
# regular numpy.ndarray instances.
area = area.dtype.type(area)
return area
def average_precision_score(y_true, y_score, average="macro",
sample_weight=None):
"""Compute average precision (AP) from prediction scores
This score corresponds to the area under the precision-recall curve.
Note: this implementation is restricted to the binary classification task
or multilabel classification task.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
average_precision : float
References
----------
.. [1] `Wikipedia entry for the Average precision
<http://en.wikipedia.org/wiki/Average_precision>`_
See also
--------
roc_auc_score : Area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import average_precision_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> average_precision_score(y_true, y_scores) # doctest: +ELLIPSIS
0.79...
"""
def _binary_average_precision(y_true, y_score, sample_weight=None):
precision, recall, thresholds = precision_recall_curve(
y_true, y_score, sample_weight=sample_weight)
return auc(recall, precision)
return _average_binary_score(_binary_average_precision, y_true, y_score,
average, sample_weight=sample_weight)
def roc_auc_score(y_true, y_score, average="macro", sample_weight=None):
"""Compute Area Under the Curve (AUC) from prediction scores
Note: this implementation is restricted to the binary classification task
or multilabel classification task in label indicator format.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
auc : float
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
See also
--------
average_precision_score : Area under the precision-recall curve
roc_curve : Compute Receiver operating characteristic (ROC)
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import roc_auc_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> roc_auc_score(y_true, y_scores)
0.75
"""
def _binary_roc_auc_score(y_true, y_score, sample_weight=None):
if len(np.unique(y_true)) != 2:
raise ValueError("Only one class present in y_true. ROC AUC score "
"is not defined in that case.")
fpr, tpr, tresholds = roc_curve(y_true, y_score,
sample_weight=sample_weight)
return auc(fpr, tpr, reorder=True)
return _average_binary_score(
_binary_roc_auc_score, y_true, y_score, average,
sample_weight=sample_weight)
def _binary_clf_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Calculate true and false positives per binary classification threshold.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification
y_score : array, shape = [n_samples]
Estimated probabilities or decision function
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fps : array, shape = [n_thresholds]
A count of false positives, at index i being the number of negative
samples assigned a score >= thresholds[i]. The total number of
negative samples is equal to fps[-1] (thus true negatives are given by
fps[-1] - fps).
tps : array, shape = [n_thresholds <= len(np.unique(y_score))]
An increasing count of true positives, at index i being the number
of positive samples assigned a score >= thresholds[i]. The total
number of positive samples is equal to tps[-1] (thus false negatives
are given by tps[-1] - tps).
thresholds : array, shape = [n_thresholds]
Decreasing score values.
"""
check_consistent_length(y_true, y_score)
y_true = column_or_1d(y_true)
y_score = column_or_1d(y_score)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
# ensure binary classification if pos_label is not specified
classes = np.unique(y_true)
if (pos_label is None and
not (array_equal(classes, [0, 1]) or
array_equal(classes, [-1, 1]) or
array_equal(classes, [0]) or
array_equal(classes, [-1]) or
array_equal(classes, [1]))):
raise ValueError("Data is not binary and pos_label is not specified")
elif pos_label is None:
pos_label = 1.
# make y_true a boolean vector
y_true = (y_true == pos_label)
# sort scores and corresponding truth values
desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1]
y_score = y_score[desc_score_indices]
y_true = y_true[desc_score_indices]
if sample_weight is not None:
weight = sample_weight[desc_score_indices]
else:
weight = 1.
# y_score typically has many tied values. Here we extract
# the indices associated with the distinct values. We also
# concatenate a value for the end of the curve.
# We need to use isclose to avoid spurious repeated thresholds
# stemming from floating point roundoff errors.
distinct_value_indices = np.where(np.logical_not(isclose(
np.diff(y_score), 0)))[0]
threshold_idxs = np.r_[distinct_value_indices, y_true.size - 1]
# accumulate the true positives with decreasing threshold
tps = (y_true * weight).cumsum()[threshold_idxs]
if sample_weight is not None:
fps = weight.cumsum()[threshold_idxs] - tps
else:
fps = 1 + threshold_idxs - tps
return fps, tps, y_score[threshold_idxs]
def precision_recall_curve(y_true, probas_pred, pos_label=None,
sample_weight=None):
"""Compute precision-recall pairs for different probability thresholds
Note: this implementation is restricted to the binary classification task.
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The last precision and recall values are 1. and 0. respectively and do not
have a corresponding threshold. This ensures that the graph starts on the
x axis.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification in range {-1, 1} or {0, 1}.
probas_pred : array, shape = [n_samples]
Estimated probabilities or decision function.
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : array, shape = [n_thresholds + 1]
Precision values such that element i is the precision of
predictions with score >= thresholds[i] and the last element is 1.
recall : array, shape = [n_thresholds + 1]
Decreasing recall values such that element i is the recall of
predictions with score >= thresholds[i] and the last element is 0.
thresholds : array, shape = [n_thresholds <= len(np.unique(probas_pred))]
Increasing thresholds on the decision function used to compute
precision and recall.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import precision_recall_curve
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> precision, recall, thresholds = precision_recall_curve(
... y_true, y_scores)
>>> precision # doctest: +ELLIPSIS
array([ 0.66..., 0.5 , 1. , 1. ])
>>> recall
array([ 1. , 0.5, 0.5, 0. ])
>>> thresholds
array([ 0.35, 0.4 , 0.8 ])
"""
fps, tps, thresholds = _binary_clf_curve(y_true, probas_pred,
pos_label=pos_label,
sample_weight=sample_weight)
precision = tps / (tps + fps)
recall = tps / tps[-1]
# stop when full recall attained
# and reverse the outputs so recall is decreasing
last_ind = tps.searchsorted(tps[-1])
sl = slice(last_ind, None, -1)
return np.r_[precision[sl], 1], np.r_[recall[sl], 0], thresholds[sl]
def roc_curve(y_true, y_score, pos_label=None, sample_weight=None,
drop_intermediate=True):
"""Compute Receiver operating characteristic (ROC)
Note: this implementation is restricted to the binary classification task.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples]
True binary labels in range {0, 1} or {-1, 1}. If labels are not
binary, pos_label should be explicitly given.
y_score : array, shape = [n_samples]
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
pos_label : int
Label considered as positive and others are considered negative.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
drop_intermediate : boolean, optional (default=True)
Whether to drop some suboptimal thresholds which would not appear
on a plotted ROC curve. This is useful in order to create lighter
ROC curves.
.. versionadded:: 0.17
parameter *drop_intermediate*.
Returns
-------
fpr : array, shape = [>2]
Increasing false positive rates such that element i is the false
positive rate of predictions with score >= thresholds[i].
tpr : array, shape = [>2]
Increasing true positive rates such that element i is the true
positive rate of predictions with score >= thresholds[i].
thresholds : array, shape = [n_thresholds]
Decreasing thresholds on the decision function used to compute
fpr and tpr. `thresholds[0]` represents no instances being predicted
and is arbitrarily set to `max(y_score) + 1`.
See also
--------
roc_auc_score : Compute Area Under the Curve (AUC) from prediction scores
Notes
-----
Since the thresholds are sorted from low to high values, they
are reversed upon returning them to ensure they correspond to both ``fpr``
and ``tpr``, which are sorted in reversed order during their calculation.
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, scores, pos_label=2)
>>> fpr
array([ 0. , 0.5, 0.5, 1. ])
>>> tpr
array([ 0.5, 0.5, 1. , 1. ])
>>> thresholds
array([ 0.8 , 0.4 , 0.35, 0.1 ])
"""
fps, tps, thresholds = _binary_clf_curve(
y_true, y_score, pos_label=pos_label, sample_weight=sample_weight)
# Attempt to drop thresholds corresponding to points in between and
# collinear with other points. These are always suboptimal and do not
# appear on a plotted ROC curve (and thus do not affect the AUC).
# Here np.diff(_, 2) is used as a "second derivative" to tell if there
# is a corner at the point. Both fps and tps must be tested to handle
# thresholds with multiple data points (which are combined in
# _binary_clf_curve). This keeps all cases where the point should be kept,
# but does not drop more complicated cases like fps = [1, 3, 7],
# tps = [1, 2, 4]; there is no harm in keeping too many thresholds.
if drop_intermediate and len(fps) > 2:
optimal_idxs = np.where(np.r_[True,
np.logical_or(np.diff(fps, 2),
np.diff(tps, 2)),
True])[0]
fps = fps[optimal_idxs]
tps = tps[optimal_idxs]
thresholds = thresholds[optimal_idxs]
if tps.size == 0 or fps[0] != 0:
# Add an extra threshold position if necessary
tps = np.r_[0, tps]
fps = np.r_[0, fps]
thresholds = np.r_[thresholds[0] + 1, thresholds]
if fps[-1] <= 0:
warnings.warn("No negative samples in y_true, "
"false positive value should be meaningless",
UndefinedMetricWarning)
fpr = np.repeat(np.nan, fps.shape)
else:
fpr = fps / fps[-1]
if tps[-1] <= 0:
warnings.warn("No positive samples in y_true, "
"true positive value should be meaningless",
UndefinedMetricWarning)
tpr = np.repeat(np.nan, tps.shape)
else:
tpr = tps / tps[-1]
return fpr, tpr, thresholds
def label_ranking_average_precision_score(y_true, y_score):
"""Compute ranking-based average precision
Label ranking average precision (LRAP) is the average over each ground
truth label assigned to each sample, of the ratio of true vs. total
labels with lower score.
This metric is used in multilabel ranking problem, where the goal
is to give better rank to the labels associated to each sample.
The obtained score is always strictly greater than 0 and
the best value is 1.
Read more in the :ref:`User Guide <label_ranking_average_precision>`.
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
Returns
-------
score : float
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import label_ranking_average_precision_score
>>> y_true = np.array([[1, 0, 0], [0, 0, 1]])
>>> y_score = np.array([[0.75, 0.5, 1], [1, 0.2, 0.1]])
>>> label_ranking_average_precision_score(y_true, y_score) \
# doctest: +ELLIPSIS
0.416...
"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
# Handle badly formated array and the degenerate case with one label
y_type = type_of_target(y_true)
if (y_type != "multilabel-indicator" and
not (y_type == "binary" and y_true.ndim == 2)):
raise ValueError("{0} format is not supported".format(y_type))
y_true = csr_matrix(y_true)
y_score = -y_score
n_samples, n_labels = y_true.shape
out = 0.
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
relevant = y_true.indices[start:stop]
if (relevant.size == 0 or relevant.size == n_labels):
# If all labels are relevant or unrelevant, the score is also
# equal to 1. The label ranking has no meaning.
out += 1.
continue
scores_i = y_score[i]
rank = rankdata(scores_i, 'max')[relevant]
L = rankdata(scores_i[relevant], 'max')
out += (L / rank).mean()
return out / n_samples
def coverage_error(y_true, y_score, sample_weight=None):
"""Coverage error measure
Compute how far we need to go through the ranked scores to cover all
true labels. The best value is equal to the average number
of labels in ``y_true`` per sample.
Ties in ``y_scores`` are broken by giving maximal rank that would have
been assigned to all tied values.
Read more in the :ref:`User Guide <coverage_error>`.
Parameters
----------
y_true : array, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
coverage_error : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type != "multilabel-indicator":
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
y_score_mask = np.ma.masked_array(y_score, mask=np.logical_not(y_true))
y_min_relevant = y_score_mask.min(axis=1).reshape((-1, 1))
coverage = (y_score >= y_min_relevant).sum(axis=1)
coverage = coverage.filled(0)
return np.average(coverage, weights=sample_weight)
def label_ranking_loss(y_true, y_score, sample_weight=None):
"""Compute Ranking loss measure
Compute the average number of label pairs that are incorrectly ordered
given y_score weighted by the size of the label set and the number of
labels not in the label set.
This is similar to the error set size, but weighted by the number of
relevant and irrelevant labels. The best performance is achieved with
a ranking loss of zero.
Read more in the :ref:`User Guide <label_ranking_loss>`.
.. versionadded:: 0.17
A function *label_ranking_loss*
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False, accept_sparse='csr')
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type not in ("multilabel-indicator",):
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
n_samples, n_labels = y_true.shape
y_true = csr_matrix(y_true)
loss = np.zeros(n_samples)
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
# Sort and bin the label scores
unique_scores, unique_inverse = np.unique(y_score[i],
return_inverse=True)
true_at_reversed_rank = bincount(
unique_inverse[y_true.indices[start:stop]],
minlength=len(unique_scores))
all_at_reversed_rank = bincount(unique_inverse,
minlength=len(unique_scores))
false_at_reversed_rank = all_at_reversed_rank - true_at_reversed_rank
# if the scores are ordered, it's possible to count the number of
# incorrectly ordered paires in linear time by cumulatively counting
# how many false labels of a given score have a score higher than the
# accumulated true labels with lower score.
loss[i] = np.dot(true_at_reversed_rank.cumsum(),
false_at_reversed_rank)
n_positives = count_nonzero(y_true, axis=1)
with np.errstate(divide="ignore", invalid="ignore"):
loss /= ((n_labels - n_positives) * n_positives)
# When there is no positive or no negative labels, those values should
# be consider as correct, i.e. the ranking doesn't matter.
loss[np.logical_or(n_positives == 0, n_positives == n_labels)] = 0.
return np.average(loss, weights=sample_weight)
| bsd-3-clause | f91eb00eff6b220d8d2ff294ce550e12 | 35.372703 | 79 | 0.626173 | 3.854798 | false | false | false | false |
kjung/scikit-learn | examples/model_selection/plot_precision_recall.py | 72 | 6377 | """
================
Precision-Recall
================
Example of Precision-Recall metric to evaluate classifier output quality.
In information retrieval, precision is a measure of result relevancy, while
recall is a measure of how many truly relevant results are returned. A high
area under the curve represents both high recall and high precision, where high
precision relates to a low false positive rate, and high recall relates to a
low false negative rate. High scores for both show that the classifier is
returning accurate results (high precision), as well as returning a majority of
all positive results (high recall).
A system with high recall but low precision returns many results, but most of
its predicted labels are incorrect when compared to the training labels. A
system with high precision but low recall is just the opposite, returning very
few results, but most of its predicted labels are correct when compared to the
training labels. An ideal system with high precision and high recall will
return many results, with all results labeled correctly.
Precision (:math:`P`) is defined as the number of true positives (:math:`T_p`)
over the number of true positives plus the number of false positives
(:math:`F_p`).
:math:`P = \\frac{T_p}{T_p+F_p}`
Recall (:math:`R`) is defined as the number of true positives (:math:`T_p`)
over the number of true positives plus the number of false negatives
(:math:`F_n`).
:math:`R = \\frac{T_p}{T_p + F_n}`
These quantities are also related to the (:math:`F_1`) score, which is defined
as the harmonic mean of precision and recall.
:math:`F1 = 2\\frac{P \\times R}{P+R}`
It is important to note that the precision may not decrease with recall. The
definition of precision (:math:`\\frac{T_p}{T_p + F_p}`) shows that lowering
the threshold of a classifier may increase the denominator, by increasing the
number of results returned. If the threshold was previously set too high, the
new results may all be true positives, which will increase precision. If the
previous threshold was about right or too low, further lowering the threshold
will introduce false positives, decreasing precision.
Recall is defined as :math:`\\frac{T_p}{T_p+F_n}`, where :math:`T_p+F_n` does
not depend on the classifier threshold. This means that lowering the classifier
threshold may increase recall, by increasing the number of true positive
results. It is also possible that lowering the threshold may leave recall
unchanged, while the precision fluctuates.
The relationship between recall and precision can be observed in the
stairstep area of the plot - at the edges of these steps a small change
in the threshold considerably reduces precision, with only a minor gain in
recall. See the corner at recall = .59, precision = .8 for an example of this
phenomenon.
Precision-recall curves are typically used in binary classification to study
the output of a classifier. In order to extend Precision-recall curve and
average precision to multi-class or multi-label classification, it is necessary
to binarize the output. One curve can be drawn per label, but one can also draw
a precision-recall curve by considering each element of the label indicator
matrix as a binary prediction (micro-averaging).
.. note::
See also :func:`sklearn.metrics.average_precision_score`,
:func:`sklearn.metrics.recall_score`,
:func:`sklearn.metrics.precision_score`,
:func:`sklearn.metrics.f1_score`
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from itertools import cycle
from sklearn import svm, datasets
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# setup plot details
colors = cycle(['navy', 'turquoise', 'darkorange', 'cornflowerblue', 'teal'])
lw = 2
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# Split into training and test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=random_state)
# Run classifier
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
random_state=random_state))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute Precision-Recall and plot curve
precision = dict()
recall = dict()
average_precision = dict()
for i in range(n_classes):
precision[i], recall[i], _ = precision_recall_curve(y_test[:, i],
y_score[:, i])
average_precision[i] = average_precision_score(y_test[:, i], y_score[:, i])
# Compute micro-average ROC curve and ROC area
precision["micro"], recall["micro"], _ = precision_recall_curve(y_test.ravel(),
y_score.ravel())
average_precision["micro"] = average_precision_score(y_test, y_score,
average="micro")
# Plot Precision-Recall curve
plt.clf()
plt.plot(recall[0], precision[0], lw=lw, color='navy',
label='Precision-Recall curve')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('Precision-Recall example: AUC={0:0.2f}'.format(average_precision[0]))
plt.legend(loc="lower left")
plt.show()
# Plot Precision-Recall curve for each class
plt.clf()
plt.plot(recall["micro"], precision["micro"], color='gold', lw=lw,
label='micro-average Precision-recall curve (area = {0:0.2f})'
''.format(average_precision["micro"]))
for i, color in zip(range(n_classes), colors):
plt.plot(recall[i], precision[i], color=color, lw=lw,
label='Precision-recall curve of class {0} (area = {1:0.2f})'
''.format(i, average_precision[i]))
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('Extension of Precision-Recall curve to multi-class')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause | 4acc79a4effb871c7ed693a61a950f7c | 39.360759 | 80 | 0.713502 | 3.690394 | false | true | false | false |
kjung/scikit-learn | sklearn/decomposition/fastica_.py | 53 | 18240 | """
Python implementation of the fast ICA algorithms.
Reference: Tables 8.3 and 8.4 page 196 in the book:
Independent Component Analysis, by Hyvarinen et al.
"""
# Authors: Pierre Lafaye de Micheaux, Stefan van der Walt, Gael Varoquaux,
# Bertrand Thirion, Alexandre Gramfort, Denis A. Engemann
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import linalg
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six import moves
from ..utils import check_array, as_float_array, check_random_state
from ..utils.extmath import fast_dot
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
__all__ = ['fastica', 'FastICA']
def _gs_decorrelation(w, W, j):
"""
Orthonormalize w wrt the first j rows of W
Parameters
----------
w : ndarray of shape(n)
Array to be orthogonalized
W : ndarray of shape(p, n)
Null space definition
j : int < p
The no of (from the first) rows of Null space W wrt which w is
orthogonalized.
Notes
-----
Assumes that W is orthogonal
w changed in place
"""
w -= np.dot(np.dot(w, W[:j].T), W[:j])
return w
def _sym_decorrelation(W):
""" Symmetric decorrelation
i.e. W <- (W * W.T) ^{-1/2} * W
"""
s, u = linalg.eigh(np.dot(W, W.T))
# u (resp. s) contains the eigenvectors (resp. square roots of
# the eigenvalues) of W * W.T
return np.dot(np.dot(u * (1. / np.sqrt(s)), u.T), W)
def _ica_def(X, tol, g, fun_args, max_iter, w_init):
"""Deflationary FastICA using fun approx to neg-entropy function
Used internally by FastICA.
"""
n_components = w_init.shape[0]
W = np.zeros((n_components, n_components), dtype=X.dtype)
n_iter = []
# j is the index of the extracted component
for j in range(n_components):
w = w_init[j, :].copy()
w /= np.sqrt((w ** 2).sum())
for i in moves.xrange(max_iter):
gwtx, g_wtx = g(fast_dot(w.T, X), fun_args)
w1 = (X * gwtx).mean(axis=1) - g_wtx.mean() * w
_gs_decorrelation(w1, W, j)
w1 /= np.sqrt((w1 ** 2).sum())
lim = np.abs(np.abs((w1 * w).sum()) - 1)
w = w1
if lim < tol:
break
n_iter.append(i + 1)
W[j, :] = w
return W, max(n_iter)
def _ica_par(X, tol, g, fun_args, max_iter, w_init):
"""Parallel FastICA.
Used internally by FastICA --main loop
"""
W = _sym_decorrelation(w_init)
del w_init
p_ = float(X.shape[1])
for ii in moves.xrange(max_iter):
gwtx, g_wtx = g(fast_dot(W, X), fun_args)
W1 = _sym_decorrelation(fast_dot(gwtx, X.T) / p_
- g_wtx[:, np.newaxis] * W)
del gwtx, g_wtx
# builtin max, abs are faster than numpy counter parts.
lim = max(abs(abs(np.diag(fast_dot(W1, W.T))) - 1))
W = W1
if lim < tol:
break
else:
warnings.warn('FastICA did not converge. Consider increasing '
'tolerance or the maximum number of iterations.')
return W, ii + 1
# Some standard non-linear functions.
# XXX: these should be optimized, as they can be a bottleneck.
def _logcosh(x, fun_args=None):
alpha = fun_args.get('alpha', 1.0) # comment it out?
x *= alpha
gx = np.tanh(x, x) # apply the tanh inplace
g_x = np.empty(x.shape[0])
# XXX compute in chunks to avoid extra allocation
for i, gx_i in enumerate(gx): # please don't vectorize.
g_x[i] = (alpha * (1 - gx_i ** 2)).mean()
return gx, g_x
def _exp(x, fun_args):
exp = np.exp(-(x ** 2) / 2)
gx = x * exp
g_x = (1 - x ** 2) * exp
return gx, g_x.mean(axis=-1)
def _cube(x, fun_args):
return x ** 3, (3 * x ** 2).mean(axis=-1)
def fastica(X, n_components=None, algorithm="parallel", whiten=True,
fun="logcosh", fun_args=None, max_iter=200, tol=1e-04, w_init=None,
random_state=None, return_X_mean=False, compute_sources=True,
return_n_iter=False):
"""Perform Fast Independent Component Analysis.
Read more in the :ref:`User Guide <ICA>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
n_components : int, optional
Number of components to extract. If None no dimension reduction
is performed.
algorithm : {'parallel', 'deflation'}, optional
Apply a parallel or deflational FASTICA algorithm.
whiten : boolean, optional
If True perform an initial whitening of the data.
If False, the data is assumed to have already been
preprocessed: it should be centered, normed and white.
Otherwise you will get incorrect results.
In this case the parameter n_components will be ignored.
fun : string or function, optional. Default: 'logcosh'
The functional form of the G function used in the
approximation to neg-entropy. Could be either 'logcosh', 'exp',
or 'cube'.
You can also provide your own function. It should return a tuple
containing the value of the function, and of its derivative, in the
point. Example:
def my_g(x):
return x ** 3, 3 * x ** 2
fun_args : dictionary, optional
Arguments to send to the functional form.
If empty or None and if fun='logcosh', fun_args will take value
{'alpha' : 1.0}
max_iter : int, optional
Maximum number of iterations to perform.
tol: float, optional
A positive scalar giving the tolerance at which the
un-mixing matrix is considered to have converged.
w_init : (n_components, n_components) array, optional
Initial un-mixing array of dimension (n.comp,n.comp).
If None (default) then an array of normal r.v.'s is used.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
return_X_mean : bool, optional
If True, X_mean is returned too.
compute_sources : bool, optional
If False, sources are not computed, but only the rotation matrix.
This can save memory when working with big data. Defaults to True.
return_n_iter : bool, optional
Whether or not to return the number of iterations.
Returns
-------
K : array, shape (n_components, n_features) | None.
If whiten is 'True', K is the pre-whitening matrix that projects data
onto the first n_components principal components. If whiten is 'False',
K is 'None'.
W : array, shape (n_components, n_components)
Estimated un-mixing matrix.
The mixing matrix can be obtained by::
w = np.dot(W, K.T)
A = w.T * (w * w.T).I
S : array, shape (n_samples, n_components) | None
Estimated source matrix
X_mean : array, shape (n_features, )
The mean over features. Returned only if return_X_mean is True.
n_iter : int
If the algorithm is "deflation", n_iter is the
maximum number of iterations run across all components. Else
they are just the number of iterations taken to converge. This is
returned only when return_n_iter is set to `True`.
Notes
-----
The data matrix X is considered to be a linear combination of
non-Gaussian (independent) components i.e. X = AS where columns of S
contain the independent components and A is a linear mixing
matrix. In short ICA attempts to `un-mix' the data by estimating an
un-mixing matrix W where ``S = W K X.``
This implementation was originally made for data of shape
[n_features, n_samples]. Now the input is transposed
before the algorithm is applied. This makes it slightly
faster for Fortran-ordered input.
Implemented using FastICA:
`A. Hyvarinen and E. Oja, Independent Component Analysis:
Algorithms and Applications, Neural Networks, 13(4-5), 2000,
pp. 411-430`
"""
random_state = check_random_state(random_state)
fun_args = {} if fun_args is None else fun_args
# make interface compatible with other decompositions
# a copy is required only for non whitened data
X = check_array(X, copy=whiten, dtype=FLOAT_DTYPES).T
alpha = fun_args.get('alpha', 1.0)
if not 1 <= alpha <= 2:
raise ValueError('alpha must be in [1,2]')
if fun == 'logcosh':
g = _logcosh
elif fun == 'exp':
g = _exp
elif fun == 'cube':
g = _cube
elif callable(fun):
def g(x, fun_args):
return fun(x, **fun_args)
else:
exc = ValueError if isinstance(fun, six.string_types) else TypeError
raise exc("Unknown function %r;"
" should be one of 'logcosh', 'exp', 'cube' or callable"
% fun)
n, p = X.shape
if not whiten and n_components is not None:
n_components = None
warnings.warn('Ignoring n_components with whiten=False.')
if n_components is None:
n_components = min(n, p)
if (n_components > min(n, p)):
n_components = min(n, p)
print("n_components is too large: it will be set to %s" % n_components)
if whiten:
# Centering the columns (ie the variables)
X_mean = X.mean(axis=-1)
X -= X_mean[:, np.newaxis]
# Whitening and preprocessing by PCA
u, d, _ = linalg.svd(X, full_matrices=False)
del _
K = (u / d).T[:n_components] # see (6.33) p.140
del u, d
X1 = np.dot(K, X)
# see (13.6) p.267 Here X1 is white and data
# in X has been projected onto a subspace by PCA
X1 *= np.sqrt(p)
else:
# X must be casted to floats to avoid typing issues with numpy
# 2.0 and the line below
X1 = as_float_array(X, copy=False) # copy has been taken care of
if w_init is None:
w_init = np.asarray(random_state.normal(size=(n_components,
n_components)), dtype=X1.dtype)
else:
w_init = np.asarray(w_init)
if w_init.shape != (n_components, n_components):
raise ValueError('w_init has invalid shape -- should be %(shape)s'
% {'shape': (n_components, n_components)})
kwargs = {'tol': tol,
'g': g,
'fun_args': fun_args,
'max_iter': max_iter,
'w_init': w_init}
if algorithm == 'parallel':
W, n_iter = _ica_par(X1, **kwargs)
elif algorithm == 'deflation':
W, n_iter = _ica_def(X1, **kwargs)
else:
raise ValueError('Invalid algorithm: must be either `parallel` or'
' `deflation`.')
del X1
if whiten:
if compute_sources:
S = fast_dot(fast_dot(W, K), X).T
else:
S = None
if return_X_mean:
if return_n_iter:
return K, W, S, X_mean, n_iter
else:
return K, W, S, X_mean
else:
if return_n_iter:
return K, W, S, n_iter
else:
return K, W, S
else:
if compute_sources:
S = fast_dot(W, X).T
else:
S = None
if return_X_mean:
if return_n_iter:
return None, W, S, None, n_iter
else:
return None, W, S, None
else:
if return_n_iter:
return None, W, S, n_iter
else:
return None, W, S
class FastICA(BaseEstimator, TransformerMixin):
"""FastICA: a fast algorithm for Independent Component Analysis.
Read more in the :ref:`User Guide <ICA>`.
Parameters
----------
n_components : int, optional
Number of components to use. If none is passed, all are used.
algorithm : {'parallel', 'deflation'}
Apply parallel or deflational algorithm for FastICA.
whiten : boolean, optional
If whiten is false, the data is already considered to be
whitened, and no whitening is performed.
fun : string or function, optional. Default: 'logcosh'
The functional form of the G function used in the
approximation to neg-entropy. Could be either 'logcosh', 'exp',
or 'cube'.
You can also provide your own function. It should return a tuple
containing the value of the function, and of its derivative, in the
point. Example:
def my_g(x):
return x ** 3, 3 * x ** 2
fun_args : dictionary, optional
Arguments to send to the functional form.
If empty and if fun='logcosh', fun_args will take value
{'alpha' : 1.0}.
max_iter : int, optional
Maximum number of iterations during fit.
tol : float, optional
Tolerance on update at each iteration.
w_init : None of an (n_components, n_components) ndarray
The mixing matrix to be used to initialize the algorithm.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
components_ : 2D array, shape (n_components, n_features)
The unmixing matrix.
mixing_ : array, shape (n_features, n_components)
The mixing matrix.
n_iter_ : int
If the algorithm is "deflation", n_iter is the
maximum number of iterations run across all components. Else
they are just the number of iterations taken to converge.
Notes
-----
Implementation based on
`A. Hyvarinen and E. Oja, Independent Component Analysis:
Algorithms and Applications, Neural Networks, 13(4-5), 2000,
pp. 411-430`
"""
def __init__(self, n_components=None, algorithm='parallel', whiten=True,
fun='logcosh', fun_args=None, max_iter=200, tol=1e-4,
w_init=None, random_state=None):
super(FastICA, self).__init__()
self.n_components = n_components
self.algorithm = algorithm
self.whiten = whiten
self.fun = fun
self.fun_args = fun_args
self.max_iter = max_iter
self.tol = tol
self.w_init = w_init
self.random_state = random_state
def _fit(self, X, compute_sources=False):
"""Fit the model
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
compute_sources : bool
If False, sources are not computes but only the rotation matrix.
This can save memory when working with big data. Defaults to False.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
fun_args = {} if self.fun_args is None else self.fun_args
whitening, unmixing, sources, X_mean, self.n_iter_ = fastica(
X=X, n_components=self.n_components, algorithm=self.algorithm,
whiten=self.whiten, fun=self.fun, fun_args=fun_args,
max_iter=self.max_iter, tol=self.tol, w_init=self.w_init,
random_state=self.random_state, return_X_mean=True,
compute_sources=compute_sources, return_n_iter=True)
if self.whiten:
self.components_ = np.dot(unmixing, whitening)
self.mean_ = X_mean
self.whitening_ = whitening
else:
self.components_ = unmixing
self.mixing_ = linalg.pinv(self.components_)
if compute_sources:
self.__sources = sources
return sources
def fit_transform(self, X, y=None):
"""Fit the model and recover the sources from X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
return self._fit(X, compute_sources=True)
def fit(self, X, y=None):
"""Fit the model to X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
self
"""
self._fit(X, compute_sources=False)
return self
def transform(self, X, y=None, copy=True):
"""Recover the sources from X (apply the unmixing matrix).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data to transform, where n_samples is the number of samples
and n_features is the number of features.
copy : bool (optional)
If False, data passed to fit are overwritten. Defaults to True.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'mixing_')
X = check_array(X, copy=copy, dtype=FLOAT_DTYPES)
if self.whiten:
X -= self.mean_
return fast_dot(X, self.components_.T)
def inverse_transform(self, X, copy=True):
"""Transform the sources back to the mixed data (apply mixing matrix).
Parameters
----------
X : array-like, shape (n_samples, n_components)
Sources, where n_samples is the number of samples
and n_components is the number of components.
copy : bool (optional)
If False, data passed to fit are overwritten. Defaults to True.
Returns
-------
X_new : array-like, shape (n_samples, n_features)
"""
check_is_fitted(self, 'mixing_')
X = check_array(X, copy=(copy and self.whiten), dtype=FLOAT_DTYPES)
X = fast_dot(X, self.mixing_.T)
if self.whiten:
X += self.mean_
return X
| bsd-3-clause | d038a6f1bc5ccf190520f240aee41a2f | 30.888112 | 79 | 0.584485 | 3.734644 | false | false | false | false |
kjung/scikit-learn | examples/linear_model/plot_polynomial_interpolation.py | 156 | 2088 | #!/usr/bin/env python
"""
========================
Polynomial interpolation
========================
This example demonstrates how to approximate a function with a polynomial of
degree n_degree by using ridge regression. Concretely, from n_samples 1d
points, it suffices to build the Vandermonde matrix, which is n_samples x
n_degree+1 and has the following form:
[[1, x_1, x_1 ** 2, x_1 ** 3, ...],
[1, x_2, x_2 ** 2, x_2 ** 3, ...],
...]
Intuitively, this matrix can be interpreted as a matrix of pseudo features (the
points raised to some power). The matrix is akin to (but different from) the
matrix induced by a polynomial kernel.
This example shows that you can do non-linear regression with a linear model,
using a pipeline to add non-linear features. Kernel methods extend this idea
and can induce very high (even infinite) dimensional feature spaces.
"""
print(__doc__)
# Author: Mathieu Blondel
# Jake Vanderplas
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import Ridge
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
def f(x):
""" function to approximate by polynomial interpolation"""
return x * np.sin(x)
# generate points used to plot
x_plot = np.linspace(0, 10, 100)
# generate points and keep a subset of them
x = np.linspace(0, 10, 100)
rng = np.random.RandomState(0)
rng.shuffle(x)
x = np.sort(x[:20])
y = f(x)
# create matrix versions of these arrays
X = x[:, np.newaxis]
X_plot = x_plot[:, np.newaxis]
colors = ['teal', 'yellowgreen', 'gold']
lw = 2
plt.plot(x_plot, f(x_plot), color='cornflowerblue', linewidth=lw,
label="ground truth")
plt.scatter(x, y, color='navy', s=30, marker='o', label="training points")
for count, degree in enumerate([3, 4, 5]):
model = make_pipeline(PolynomialFeatures(degree), Ridge())
model.fit(X, y)
y_plot = model.predict(X_plot)
plt.plot(x_plot, y_plot, color=colors[count], linewidth=lw,
label="degree %d" % degree)
plt.legend(loc='lower left')
plt.show()
| bsd-3-clause | 83b1c0f14524c3ade7dfa5fbb249b680 | 28 | 79 | 0.686782 | 3.324841 | false | false | false | false |
kjung/scikit-learn | benchmarks/bench_plot_svd.py | 322 | 2899 | """Benchmarks of Singular Value Decomposition (Exact and Approximate)
The data is mostly low rank but is a fat infinite tail.
"""
import gc
from time import time
import numpy as np
from collections import defaultdict
from scipy.linalg import svd
from sklearn.utils.extmath import randomized_svd
from sklearn.datasets.samples_generator import make_low_rank_matrix
def compute_bench(samples_range, features_range, n_iter=3, rank=50):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
X = make_low_rank_matrix(n_samples, n_features,
effective_rank=rank,
tail_strength=0.2)
gc.collect()
print("benchmarking scipy svd: ")
tstart = time()
svd(X, full_matrices=False)
results['scipy svd'].append(time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=0")
tstart = time()
randomized_svd(X, rank, n_iter=0)
results['scikit-learn randomized_svd (n_iter=0)'].append(
time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=%d "
% n_iter)
tstart = time()
randomized_svd(X, rank, n_iter=n_iter)
results['scikit-learn randomized_svd (n_iter=%d)'
% n_iter].append(time() - tstart)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(2, 1000, 4).astype(np.int)
features_range = np.linspace(2, 1000, 4).astype(np.int)
results = compute_bench(samples_range, features_range)
label = 'scikit-learn singular value decomposition benchmark results'
fig = plt.figure(label)
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbg', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.legend()
plt.show()
| bsd-3-clause | 8ae9391c7a3ba8b106722fb2a2359139 | 34.353659 | 75 | 0.575026 | 3.750323 | false | false | false | false |
kjung/scikit-learn | sklearn/datasets/olivetti_faces.py | 62 | 4699 | """Modified Olivetti faces dataset.
The original database was available from (now defunct)
http://www.uk.research.att.com/facedatabase.html
The version retrieved here comes in MATLAB format from the personal
web page of Sam Roweis:
http://www.cs.nyu.edu/~roweis/
There are ten different images of each of 40 distinct subjects. For some
subjects, the images were taken at different times, varying the lighting,
facial expressions (open / closed eyes, smiling / not smiling) and facial
details (glasses / no glasses). All the images were taken against a dark
homogeneous background with the subjects in an upright, frontal position (with
tolerance for some side movement).
The original dataset consisted of 92 x 112, while the Roweis version
consists of 64x64 images.
"""
# Copyright (c) 2011 David Warde-Farley <wardefar at iro dot umontreal dot ca>
# License: BSD 3 clause
from io import BytesIO
from os.path import exists
from os import makedirs
try:
# Python 2
import urllib2
urlopen = urllib2.urlopen
except ImportError:
# Python 3
import urllib.request
urlopen = urllib.request.urlopen
import numpy as np
from scipy.io.matlab import loadmat
from .base import get_data_home, Bunch
from .base import _pkl_filepath
from ..utils import check_random_state
from ..externals import joblib
DATA_URL = "http://cs.nyu.edu/~roweis/data/olivettifaces.mat"
TARGET_FILENAME = "olivetti.pkz"
# Grab the module-level docstring to use as a description of the
# dataset
MODULE_DOCS = __doc__
def fetch_olivetti_faces(data_home=None, shuffle=False, random_state=0,
download_if_missing=True):
"""Loader for the Olivetti faces data-set from AT&T.
Read more in the :ref:`User Guide <olivetti_faces>`.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
shuffle : boolean, optional
If True the order of the dataset is shuffled to avoid having
images of the same person grouped.
download_if_missing: optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
random_state : optional, integer or RandomState object
The seed or the random number generator used to shuffle the
data.
Returns
-------
An object with the following attributes:
data : numpy array of shape (400, 4096)
Each row corresponds to a ravelled face image of original size 64 x 64 pixels.
images : numpy array of shape (400, 64, 64)
Each row is a face image corresponding to one of the 40 subjects of the dataset.
target : numpy array of shape (400, )
Labels associated to each face image. Those labels are ranging from
0-39 and correspond to the Subject IDs.
DESCR : string
Description of the modified Olivetti Faces Dataset.
Notes
------
This dataset consists of 10 pictures each of 40 individuals. The original
database was available from (now defunct)
http://www.uk.research.att.com/facedatabase.html
The version retrieved here comes in MATLAB format from the personal
web page of Sam Roweis:
http://www.cs.nyu.edu/~roweis/
"""
data_home = get_data_home(data_home=data_home)
if not exists(data_home):
makedirs(data_home)
filepath = _pkl_filepath(data_home, TARGET_FILENAME)
if not exists(filepath):
print('downloading Olivetti faces from %s to %s'
% (DATA_URL, data_home))
fhandle = urlopen(DATA_URL)
buf = BytesIO(fhandle.read())
mfile = loadmat(buf)
faces = mfile['faces'].T.copy()
joblib.dump(faces, filepath, compress=6)
del mfile
else:
faces = joblib.load(filepath)
# We want floating point data, but float32 is enough (there is only
# one byte of precision in the original uint8s anyway)
faces = np.float32(faces)
faces = faces - faces.min()
faces /= faces.max()
faces = faces.reshape((400, 64, 64)).transpose(0, 2, 1)
# 10 images per class, 400 images total, each class is contiguous.
target = np.array([i // 10 for i in range(400)])
if shuffle:
random_state = check_random_state(random_state)
order = random_state.permutation(len(faces))
faces = faces[order]
target = target[order]
return Bunch(data=faces.reshape(len(faces), -1),
images=faces,
target=target,
DESCR=MODULE_DOCS)
| bsd-3-clause | feadc23ec0c37d152a677af2a047b210 | 32.564286 | 88 | 0.681634 | 3.873866 | false | false | false | false |
kjung/scikit-learn | examples/decomposition/plot_pca_iris.py | 28 | 1484 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
PCA example with Iris Data-set
=========================================================
Principal Component Analysis applied to the Iris dataset.
See `here <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import decomposition
from sklearn import datasets
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
fig = plt.figure(1, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
pca = decomposition.PCA(n_components=3)
pca.fit(X)
X = pca.transform(X)
for name, label in [('Setosa', 0), ('Versicolour', 1), ('Virginica', 2)]:
ax.text3D(X[y == label, 0].mean(),
X[y == label, 1].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=y, cmap=plt.cm.spectral)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
plt.show()
| bsd-3-clause | c5828e19d18ee747b4c68528e92a6e13 | 24.135593 | 73 | 0.587997 | 3.002024 | false | false | false | false |
kjung/scikit-learn | sklearn/externals/odict.py | 54 | 9148 | # Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
# Copyright 2009 Raymond Hettinger
# http://code.activestate.com/recipes/576693/
"Ordered dictionary"
try:
from thread import get_ident as _get_ident
except ImportError:
try:
from dummy_thread import get_ident as _get_ident
except ImportError:
# Ensure that this module is still importable under Python3 to avoid
# crashing code-inspecting tools like nose.
from _dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
| bsd-3-clause | abae930781db79e25e7fec4f9cc19040 | 33.390977 | 87 | 0.551268 | 3.993016 | false | false | false | false |
kjung/scikit-learn | examples/applications/wikipedia_principal_eigenvector.py | 16 | 7819 | """
===============================
Wikipedia principal eigenvector
===============================
A classical way to assert the relative importance of vertices in a
graph is to compute the principal eigenvector of the adjacency matrix
so as to assign to each vertex the values of the components of the first
eigenvector as a centrality score:
http://en.wikipedia.org/wiki/Eigenvector_centrality
On the graph of webpages and links those values are called the PageRank
scores by Google.
The goal of this example is to analyze the graph of links inside
wikipedia articles to rank articles by relative importance according to
this eigenvector centrality.
The traditional way to compute the principal eigenvector is to use the
power iteration method:
http://en.wikipedia.org/wiki/Power_iteration
Here the computation is achieved thanks to Martinsson's Randomized SVD
algorithm implemented in the scikit.
The graph data is fetched from the DBpedia dumps. DBpedia is an extraction
of the latent structured data of the Wikipedia content.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from __future__ import print_function
from bz2 import BZ2File
import os
from datetime import datetime
from pprint import pprint
from time import time
import numpy as np
from scipy import sparse
from sklearn.decomposition import randomized_svd
from sklearn.externals.joblib import Memory
from sklearn.externals.six.moves.urllib.request import urlopen
from sklearn.externals.six import iteritems
print(__doc__)
###############################################################################
# Where to download the data, if not already on disk
redirects_url = "http://downloads.dbpedia.org/3.5.1/en/redirects_en.nt.bz2"
redirects_filename = redirects_url.rsplit("/", 1)[1]
page_links_url = "http://downloads.dbpedia.org/3.5.1/en/page_links_en.nt.bz2"
page_links_filename = page_links_url.rsplit("/", 1)[1]
resources = [
(redirects_url, redirects_filename),
(page_links_url, page_links_filename),
]
for url, filename in resources:
if not os.path.exists(filename):
print("Downloading data from '%s', please wait..." % url)
opener = urlopen(url)
open(filename, 'wb').write(opener.read())
print()
###############################################################################
# Loading the redirect files
memory = Memory(cachedir=".")
def index(redirects, index_map, k):
"""Find the index of an article name after redirect resolution"""
k = redirects.get(k, k)
return index_map.setdefault(k, len(index_map))
DBPEDIA_RESOURCE_PREFIX_LEN = len("http://dbpedia.org/resource/")
SHORTNAME_SLICE = slice(DBPEDIA_RESOURCE_PREFIX_LEN + 1, -1)
def short_name(nt_uri):
"""Remove the < and > URI markers and the common URI prefix"""
return nt_uri[SHORTNAME_SLICE]
def get_redirects(redirects_filename):
"""Parse the redirections and build a transitively closed map out of it"""
redirects = {}
print("Parsing the NT redirect file")
for l, line in enumerate(BZ2File(redirects_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
redirects[short_name(split[0])] = short_name(split[2])
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
# compute the transitive closure
print("Computing the transitive closure of the redirect relation")
for l, source in enumerate(redirects.keys()):
transitive_target = None
target = redirects[source]
seen = set([source])
while True:
transitive_target = target
target = redirects.get(target)
if target is None or target in seen:
break
seen.add(target)
redirects[source] = transitive_target
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
return redirects
# disabling joblib as the pickling of large dicts seems much too slow
#@memory.cache
def get_adjacency_matrix(redirects_filename, page_links_filename, limit=None):
"""Extract the adjacency graph as a scipy sparse matrix
Redirects are resolved first.
Returns X, the scipy sparse adjacency matrix, redirects as python
dict from article names to article names and index_map a python dict
from article names to python int (article indexes).
"""
print("Computing the redirect map")
redirects = get_redirects(redirects_filename)
print("Computing the integer index map")
index_map = dict()
links = list()
for l, line in enumerate(BZ2File(page_links_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
i = index(redirects, index_map, short_name(split[0]))
j = index(redirects, index_map, short_name(split[2]))
links.append((i, j))
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
if limit is not None and l >= limit - 1:
break
print("Computing the adjacency matrix")
X = sparse.lil_matrix((len(index_map), len(index_map)), dtype=np.float32)
for i, j in links:
X[i, j] = 1.0
del links
print("Converting to CSR representation")
X = X.tocsr()
print("CSR conversion done")
return X, redirects, index_map
# stop after 5M links to make it possible to work in RAM
X, redirects, index_map = get_adjacency_matrix(
redirects_filename, page_links_filename, limit=5000000)
names = dict((i, name) for name, i in iteritems(index_map))
print("Computing the principal singular vectors using randomized_svd")
t0 = time()
U, s, V = randomized_svd(X, 5, n_iter=3)
print("done in %0.3fs" % (time() - t0))
# print the names of the wikipedia related strongest components of the the
# principal singular vector which should be similar to the highest eigenvector
print("Top wikipedia pages according to principal singular vectors")
pprint([names[i] for i in np.abs(U.T[0]).argsort()[-10:]])
pprint([names[i] for i in np.abs(V[0]).argsort()[-10:]])
def centrality_scores(X, alpha=0.85, max_iter=100, tol=1e-10):
"""Power iteration computation of the principal eigenvector
This method is also known as Google PageRank and the implementation
is based on the one from the NetworkX project (BSD licensed too)
with copyrights by:
Aric Hagberg <hagberg@lanl.gov>
Dan Schult <dschult@colgate.edu>
Pieter Swart <swart@lanl.gov>
"""
n = X.shape[0]
X = X.copy()
incoming_counts = np.asarray(X.sum(axis=1)).ravel()
print("Normalizing the graph")
for i in incoming_counts.nonzero()[0]:
X.data[X.indptr[i]:X.indptr[i + 1]] *= 1.0 / incoming_counts[i]
dangle = np.asarray(np.where(X.sum(axis=1) == 0, 1.0 / n, 0)).ravel()
scores = np.ones(n, dtype=np.float32) / n # initial guess
for i in range(max_iter):
print("power iteration #%d" % i)
prev_scores = scores
scores = (alpha * (scores * X + np.dot(dangle, prev_scores))
+ (1 - alpha) * prev_scores.sum() / n)
# check convergence: normalized l_inf norm
scores_max = np.abs(scores).max()
if scores_max == 0.0:
scores_max = 1.0
err = np.abs(scores - prev_scores).max() / scores_max
print("error: %0.6f" % err)
if err < n * tol:
return scores
return scores
print("Computing principal eigenvector score using a power iteration method")
t0 = time()
scores = centrality_scores(X, max_iter=100, tol=1e-10)
print("done in %0.3fs" % (time() - t0))
pprint([names[i] for i in np.abs(scores).argsort()[-10:]])
| bsd-3-clause | e634e05f27c61d6d5e1bf7e53120fd92 | 32.995652 | 79 | 0.650467 | 3.686469 | false | false | false | false |
kjung/scikit-learn | sklearn/datasets/svmlight_format.py | 28 | 16073 | """This module implements a loader and dumper for the svmlight format
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable to
predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
"""
# Authors: Mathieu Blondel <mathieu@mblondel.org>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from contextlib import closing
import io
import os.path
import numpy as np
import scipy.sparse as sp
from ._svmlight_format import _load_svmlight_file
from .. import __version__
from ..externals import six
from ..externals.six import u, b
from ..externals.six.moves import range, zip
from ..utils import check_array
from ..utils.fixes import frombuffer_empty
def load_svmlight_file(f, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load datasets in the svmlight / libsvm format into sparse CSR matrix
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
Parsing a text based source can be expensive. When working on
repeatedly on the same dataset, it is recommended to wrap this
loader with joblib.Memory.cache to store a memmapped backup of the
CSR results of the first call and benefit from the near instantaneous
loading of memmapped structures for the subsequent calls.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
This implementation is written in Cython and is reasonably fast.
However, a faster API-compatible loader is also available at:
https://github.com/mblondel/svmlight-loader
Parameters
----------
f : {str, file-like, int}
(Path to) a file to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. A file-like or file descriptor will not be closed
by this function. A file-like object must be opened in binary mode.
n_features : int or None
The number of features to use. If None, it will be inferred. This
argument is useful to load several files that are subsets of a
bigger sliced dataset: each subset might not have examples of
every feature, hence the inferred shape might vary from one
slice to another.
multilabel : boolean, optional, default False
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based : boolean or "auto", optional, default "auto"
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id : boolean, default False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
X: scipy.sparse matrix of shape (n_samples, n_features)
y: ndarray of shape (n_samples,), or, in the multilabel a list of
tuples of length n_samples.
query_id: array of shape (n_samples,)
query_id for each sample. Only returned when query_id is set to
True.
See also
--------
load_svmlight_files: similar function for loading multiple files in this
format, enforcing the same number of features/columns on all of them.
Examples
--------
To use joblib.Memory to cache the svmlight file::
from sklearn.externals.joblib import Memory
from sklearn.datasets import load_svmlight_file
mem = Memory("./mycache")
@mem.cache
def get_data():
data = load_svmlight_file("mysvmlightfile")
return data[0], data[1]
X, y = get_data()
"""
return tuple(load_svmlight_files([f], n_features, dtype, multilabel,
zero_based, query_id))
def _gen_open(f):
if isinstance(f, int): # file descriptor
return io.open(f, "rb", closefd=False)
elif not isinstance(f, six.string_types):
raise TypeError("expected {str, int, file-like}, got %s" % type(f))
_, ext = os.path.splitext(f)
if ext == ".gz":
import gzip
return gzip.open(f, "rb")
elif ext == ".bz2":
from bz2 import BZ2File
return BZ2File(f, "rb")
else:
return open(f, "rb")
def _open_and_load(f, dtype, multilabel, zero_based, query_id):
if hasattr(f, "read"):
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# XXX remove closing when Python 2.7+/3.1+ required
else:
with closing(_gen_open(f)) as f:
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# convert from array.array, give data the right dtype
if not multilabel:
labels = frombuffer_empty(labels, np.float64)
data = frombuffer_empty(data, actual_dtype)
indices = frombuffer_empty(ind, np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc) # never empty
query = frombuffer_empty(query, np.intc)
data = np.asarray(data, dtype=dtype) # no-op for float{32,64}
return data, indices, indptr, labels, query
def load_svmlight_files(files, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load dataset from multiple files in SVMlight format
This function is equivalent to mapping load_svmlight_file over a list of
files, except that the results are concatenated into a single, flat list
and the samples vectors are constrained to all have the same number of
features.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
Parameters
----------
files : iterable over {str, file-like, int}
(Paths of) files to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. File-likes and file descriptors will not be
closed by this function. File-like objects must be opened in binary
mode.
n_features: int or None
The number of features to use. If None, it will be inferred from the
maximum column index occurring in any of the files.
This can be set to a higher value than the actual number of features
in any of the input files, but setting it to a lower value will cause
an exception to be raised.
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based: boolean or "auto", optional
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id: boolean, defaults to False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
[X1, y1, ..., Xn, yn]
where each (Xi, yi) pair is the result from load_svmlight_file(files[i]).
If query_id is set to True, this will return instead [X1, y1, q1,
..., Xn, yn, qn] where (Xi, yi, qi) is the result from
load_svmlight_file(files[i])
Notes
-----
When fitting a model to a matrix X_train and evaluating it against a
matrix X_test, it is essential that X_train and X_test have the same
number of features (X_train.shape[1] == X_test.shape[1]). This may not
be the case if you load the files individually with load_svmlight_file.
See also
--------
load_svmlight_file
"""
r = [_open_and_load(f, dtype, multilabel, bool(zero_based), bool(query_id))
for f in files]
if (zero_based is False
or zero_based == "auto" and all(np.min(tmp[1]) > 0 for tmp in r)):
for ind in r:
indices = ind[1]
indices -= 1
n_f = max(ind[1].max() for ind in r) + 1
if n_features is None:
n_features = n_f
elif n_features < n_f:
raise ValueError("n_features was set to {},"
" but input file contains {} features"
.format(n_features, n_f))
result = []
for data, indices, indptr, y, query_values in r:
shape = (indptr.shape[0] - 1, n_features)
X = sp.csr_matrix((data, indices, indptr), shape)
X.sort_indices()
result += X, y
if query_id:
result.append(query_values)
return result
def _dump_svmlight(X, y, f, multilabel, one_based, comment, query_id):
is_sp = int(hasattr(X, "tocsr"))
if X.dtype.kind == 'i':
value_pattern = u("%d:%d")
else:
value_pattern = u("%d:%.16g")
if y.dtype.kind == 'i':
label_pattern = u("%d")
else:
label_pattern = u("%.16g")
line_pattern = u("%s")
if query_id is not None:
line_pattern += u(" qid:%d")
line_pattern += u(" %s\n")
if comment:
f.write(b("# Generated by dump_svmlight_file from scikit-learn %s\n"
% __version__))
f.write(b("# Column indices are %s-based\n"
% ["zero", "one"][one_based]))
f.write(b("#\n"))
f.writelines(b("# %s\n" % line) for line in comment.splitlines())
for i in range(X.shape[0]):
if is_sp:
span = slice(X.indptr[i], X.indptr[i + 1])
row = zip(X.indices[span], X.data[span])
else:
nz = X[i] != 0
row = zip(np.where(nz)[0], X[i, nz])
s = " ".join(value_pattern % (j + one_based, x) for j, x in row)
if multilabel:
nz_labels = np.where(y[i] != 0)[0]
labels_str = ",".join(label_pattern % j for j in nz_labels)
else:
labels_str = label_pattern % y[i]
if query_id is not None:
feat = (labels_str, query_id[i], s)
else:
feat = (labels_str, s)
f.write((line_pattern % feat).encode('ascii'))
def dump_svmlight_file(X, y, f, zero_based=True, comment=None, query_id=None,
multilabel=False):
"""Dump the dataset in svmlight / libsvm file format.
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_labels]
Target values. Class labels must be an integer or float, or array-like
objects of integer or float for multilabel classifications.
f : string or file-like in binary mode
If string, specifies the path that will contain the data.
If file-like, data will be written to f. f should be opened in binary
mode.
zero_based : boolean, optional
Whether column indices should be written zero-based (True) or one-based
(False).
comment : string, optional
Comment to insert at the top of the file. This should be either a
Unicode string, which will be encoded as UTF-8, or an ASCII byte
string.
If a comment is given, then it will be preceded by one that identifies
the file as having been dumped by scikit-learn. Note that not all
tools grok comments in SVMlight files.
query_id : array-like, shape = [n_samples]
Array containing pairwise preference constraints (qid in svmlight
format).
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
.. versionadded:: 0.17
parameter *multilabel* to support multilabel datasets.
"""
if comment is not None:
# Convert comment string to list of lines in UTF-8.
# If a byte string is passed, then check whether it's ASCII;
# if a user wants to get fancy, they'll have to decode themselves.
# Avoid mention of str and unicode types for Python 3.x compat.
if isinstance(comment, bytes):
comment.decode("ascii") # just for the exception
else:
comment = comment.encode("utf-8")
if six.b("\0") in comment:
raise ValueError("comment string contains NUL byte")
y = np.asarray(y)
if y.ndim != 1 and not multilabel:
raise ValueError("expected y of shape (n_samples,), got %r"
% (y.shape,))
Xval = check_array(X, accept_sparse='csr')
if Xval.shape[0] != y.shape[0]:
raise ValueError("X.shape[0] and y.shape[0] should be the same, got"
" %r and %r instead." % (Xval.shape[0], y.shape[0]))
# We had some issues with CSR matrices with unsorted indices (e.g. #1501),
# so sort them here, but first make sure we don't modify the user's X.
# TODO We can do this cheaper; sorted_indices copies the whole matrix.
if Xval is X and hasattr(Xval, "sorted_indices"):
X = Xval.sorted_indices()
else:
X = Xval
if hasattr(X, "sort_indices"):
X.sort_indices()
if query_id is not None:
query_id = np.asarray(query_id)
if query_id.shape[0] != y.shape[0]:
raise ValueError("expected query_id of shape (n_samples,), got %r"
% (query_id.shape,))
one_based = not zero_based
if hasattr(f, "write"):
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
else:
with open(f, "wb") as f:
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
| bsd-3-clause | c9f504dd7685c3042513b05e6b25fb46 | 37.269048 | 79 | 0.635911 | 3.860918 | false | false | false | false |
kjung/scikit-learn | sklearn/linear_model/passive_aggressive.py | 58 | 10566 | # Authors: Rob Zinkov, Mathieu Blondel
# License: BSD 3 clause
from .stochastic_gradient import BaseSGDClassifier
from .stochastic_gradient import BaseSGDRegressor
from .stochastic_gradient import DEFAULT_EPSILON
class PassiveAggressiveClassifier(BaseSGDClassifier):
"""Passive Aggressive Classifier
Read more in the :ref:`User Guide <passive_aggressive>`.
Parameters
----------
C : float
Maximum step size (regularization). Defaults to 1.0.
fit_intercept : bool, default=False
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered.
n_iter : int, optional
The number of passes over the training data (aka epochs).
Defaults to 5.
shuffle : bool, default=True
Whether or not the training data should be shuffled after each epoch.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
loss : string, optional
The loss function to be used:
hinge: equivalent to PA-I in the reference paper.
squared_hinge: equivalent to PA-II in the reference paper.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
.. versionadded:: 0.17
parameter *class_weight* to automatically weight samples.
Attributes
----------
coef_ : array, shape = [1, n_features] if n_classes == 2 else [n_classes,\
n_features]
Weights assigned to the features.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
SGDClassifier
Perceptron
References
----------
Online Passive-Aggressive Algorithms
<http://jmlr.csail.mit.edu/papers/volume7/crammer06a/crammer06a.pdf>
K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006)
"""
def __init__(self, C=1.0, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, loss="hinge", n_jobs=1, random_state=None,
warm_start=False, class_weight=None):
super(PassiveAggressiveClassifier, self).__init__(
penalty=None,
fit_intercept=fit_intercept,
n_iter=n_iter,
shuffle=shuffle,
verbose=verbose,
random_state=random_state,
eta0=1.0,
warm_start=warm_start,
class_weight=class_weight,
n_jobs=n_jobs)
self.C = C
self.loss = loss
def partial_fit(self, X, y, classes=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Subset of the training data
y : numpy array of shape [n_samples]
Subset of the target values
classes : array, shape = [n_classes]
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight == 'balanced':
raise ValueError("class_weight 'balanced' is not supported for "
"partial_fit. For 'balanced' weights, use "
"`sklearn.utils.compute_class_weight` with "
"`class_weight='balanced'`. In place of y you "
"can use a large enough subset of the full "
"training set target to properly estimate the "
"class frequency distributions. Pass the "
"resulting weights as the class_weight "
"parameter.")
lr = "pa1" if self.loss == "hinge" else "pa2"
return self._partial_fit(X, y, alpha=1.0, C=self.C,
loss="hinge", learning_rate=lr, n_iter=1,
classes=classes, sample_weight=None,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : numpy array of shape [n_samples]
Target values
coef_init : array, shape = [n_classes,n_features]
The initial coefficients to warm-start the optimization.
intercept_init : array, shape = [n_classes]
The initial intercept to warm-start the optimization.
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "hinge" else "pa2"
return self._fit(X, y, alpha=1.0, C=self.C,
loss="hinge", learning_rate=lr,
coef_init=coef_init, intercept_init=intercept_init)
class PassiveAggressiveRegressor(BaseSGDRegressor):
"""Passive Aggressive Regressor
Read more in the :ref:`User Guide <passive_aggressive>`.
Parameters
----------
C : float
Maximum step size (regularization). Defaults to 1.0.
epsilon : float
If the difference between the current prediction and the correct label
is below this threshold, the model is not updated.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs).
Defaults to 5.
shuffle : bool, default=True
Whether or not the training data should be shuffled after each epoch.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
loss : string, optional
The loss function to be used:
epsilon_insensitive: equivalent to PA-I in the reference paper.
squared_epsilon_insensitive: equivalent to PA-II in the reference
paper.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Attributes
----------
coef_ : array, shape = [1, n_features] if n_classes == 2 else [n_classes,\
n_features]
Weights assigned to the features.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
SGDRegressor
References
----------
Online Passive-Aggressive Algorithms
<http://jmlr.csail.mit.edu/papers/volume7/crammer06a/crammer06a.pdf>
K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006)
"""
def __init__(self, C=1.0, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, loss="epsilon_insensitive",
epsilon=DEFAULT_EPSILON, random_state=None, warm_start=False):
super(PassiveAggressiveRegressor, self).__init__(
penalty=None,
l1_ratio=0,
epsilon=epsilon,
eta0=1.0,
fit_intercept=fit_intercept,
n_iter=n_iter,
shuffle=shuffle,
verbose=verbose,
random_state=random_state,
warm_start=warm_start)
self.C = C
self.loss = loss
def partial_fit(self, X, y):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Subset of training data
y : numpy array of shape [n_samples]
Subset of target values
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2"
return self._partial_fit(X, y, alpha=1.0, C=self.C,
loss="epsilon_insensitive",
learning_rate=lr, n_iter=1,
sample_weight=None,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : numpy array of shape [n_samples]
Target values
coef_init : array, shape = [n_features]
The initial coefficients to warm-start the optimization.
intercept_init : array, shape = [1]
The initial intercept to warm-start the optimization.
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2"
return self._fit(X, y, alpha=1.0, C=self.C,
loss="epsilon_insensitive",
learning_rate=lr,
coef_init=coef_init,
intercept_init=intercept_init)
| bsd-3-clause | e4b50ecfb96c4d164ea1720e5c367484 | 34.337793 | 79 | 0.583475 | 4.281199 | false | false | false | false |
kjung/scikit-learn | sklearn/gaussian_process/correlation_models.py | 51 | 7654 | # -*- coding: utf-8 -*-
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# (mostly translation, see implementation details)
# Licence: BSD 3 clause
"""
The built-in correlation models submodule for the gaussian_process module.
"""
import numpy as np
def absolute_exponential(theta, d):
"""
Absolute exponential autocorrelation model.
(Ornstein-Uhlenbeck stochastic process)::
n
theta, d --> r(theta, d) = exp( sum - theta_i * |d_i| )
i = 1
Parameters
----------
theta : array_like
An array with shape 1 (isotropic) or n (anisotropic) giving the
autocorrelation parameter(s).
d : array_like
An array with shape (n_eval, n_features) giving the componentwise
distances between locations x and x' at which the correlation model
should be evaluated.
Returns
-------
r : array_like
An array with shape (n_eval, ) containing the values of the
autocorrelation model.
"""
theta = np.asarray(theta, dtype=np.float64)
d = np.abs(np.asarray(d, dtype=np.float64))
if d.ndim > 1:
n_features = d.shape[1]
else:
n_features = 1
if theta.size == 1:
return np.exp(- theta[0] * np.sum(d, axis=1))
elif theta.size != n_features:
raise ValueError("Length of theta must be 1 or %s" % n_features)
else:
return np.exp(- np.sum(theta.reshape(1, n_features) * d, axis=1))
def squared_exponential(theta, d):
"""
Squared exponential correlation model (Radial Basis Function).
(Infinitely differentiable stochastic process, very smooth)::
n
theta, d --> r(theta, d) = exp( sum - theta_i * (d_i)^2 )
i = 1
Parameters
----------
theta : array_like
An array with shape 1 (isotropic) or n (anisotropic) giving the
autocorrelation parameter(s).
d : array_like
An array with shape (n_eval, n_features) giving the componentwise
distances between locations x and x' at which the correlation model
should be evaluated.
Returns
-------
r : array_like
An array with shape (n_eval, ) containing the values of the
autocorrelation model.
"""
theta = np.asarray(theta, dtype=np.float64)
d = np.asarray(d, dtype=np.float64)
if d.ndim > 1:
n_features = d.shape[1]
else:
n_features = 1
if theta.size == 1:
return np.exp(-theta[0] * np.sum(d ** 2, axis=1))
elif theta.size != n_features:
raise ValueError("Length of theta must be 1 or %s" % n_features)
else:
return np.exp(-np.sum(theta.reshape(1, n_features) * d ** 2, axis=1))
def generalized_exponential(theta, d):
"""
Generalized exponential correlation model.
(Useful when one does not know the smoothness of the function to be
predicted.)::
n
theta, d --> r(theta, d) = exp( sum - theta_i * |d_i|^p )
i = 1
Parameters
----------
theta : array_like
An array with shape 1+1 (isotropic) or n+1 (anisotropic) giving the
autocorrelation parameter(s) (theta, p).
d : array_like
An array with shape (n_eval, n_features) giving the componentwise
distances between locations x and x' at which the correlation model
should be evaluated.
Returns
-------
r : array_like
An array with shape (n_eval, ) with the values of the autocorrelation
model.
"""
theta = np.asarray(theta, dtype=np.float64)
d = np.asarray(d, dtype=np.float64)
if d.ndim > 1:
n_features = d.shape[1]
else:
n_features = 1
lth = theta.size
if n_features > 1 and lth == 2:
theta = np.hstack([np.repeat(theta[0], n_features), theta[1]])
elif lth != n_features + 1:
raise Exception("Length of theta must be 2 or %s" % (n_features + 1))
else:
theta = theta.reshape(1, lth)
td = theta[:, 0:-1].reshape(1, n_features) * np.abs(d) ** theta[:, -1]
r = np.exp(- np.sum(td, 1))
return r
def pure_nugget(theta, d):
"""
Spatial independence correlation model (pure nugget).
(Useful when one wants to solve an ordinary least squares problem!)::
n
theta, d --> r(theta, d) = 1 if sum |d_i| == 0
i = 1
0 otherwise
Parameters
----------
theta : array_like
None.
d : array_like
An array with shape (n_eval, n_features) giving the componentwise
distances between locations x and x' at which the correlation model
should be evaluated.
Returns
-------
r : array_like
An array with shape (n_eval, ) with the values of the autocorrelation
model.
"""
theta = np.asarray(theta, dtype=np.float64)
d = np.asarray(d, dtype=np.float64)
n_eval = d.shape[0]
r = np.zeros(n_eval)
r[np.all(d == 0., axis=1)] = 1.
return r
def cubic(theta, d):
"""
Cubic correlation model::
theta, d --> r(theta, d) =
n
prod max(0, 1 - 3(theta_j*d_ij)^2 + 2(theta_j*d_ij)^3) , i = 1,...,m
j = 1
Parameters
----------
theta : array_like
An array with shape 1 (isotropic) or n (anisotropic) giving the
autocorrelation parameter(s).
d : array_like
An array with shape (n_eval, n_features) giving the componentwise
distances between locations x and x' at which the correlation model
should be evaluated.
Returns
-------
r : array_like
An array with shape (n_eval, ) with the values of the autocorrelation
model.
"""
theta = np.asarray(theta, dtype=np.float64)
d = np.asarray(d, dtype=np.float64)
if d.ndim > 1:
n_features = d.shape[1]
else:
n_features = 1
lth = theta.size
if lth == 1:
td = np.abs(d) * theta
elif lth != n_features:
raise Exception("Length of theta must be 1 or " + str(n_features))
else:
td = np.abs(d) * theta.reshape(1, n_features)
td[td > 1.] = 1.
ss = 1. - td ** 2. * (3. - 2. * td)
r = np.prod(ss, 1)
return r
def linear(theta, d):
"""
Linear correlation model::
theta, d --> r(theta, d) =
n
prod max(0, 1 - theta_j*d_ij) , i = 1,...,m
j = 1
Parameters
----------
theta : array_like
An array with shape 1 (isotropic) or n (anisotropic) giving the
autocorrelation parameter(s).
d : array_like
An array with shape (n_eval, n_features) giving the componentwise
distances between locations x and x' at which the correlation model
should be evaluated.
Returns
-------
r : array_like
An array with shape (n_eval, ) with the values of the autocorrelation
model.
"""
theta = np.asarray(theta, dtype=np.float64)
d = np.asarray(d, dtype=np.float64)
if d.ndim > 1:
n_features = d.shape[1]
else:
n_features = 1
lth = theta.size
if lth == 1:
td = np.abs(d) * theta
elif lth != n_features:
raise Exception("Length of theta must be 1 or %s" % n_features)
else:
td = np.abs(d) * theta.reshape(1, n_features)
td[td > 1.] = 1.
ss = 1. - td
r = np.prod(ss, 1)
return r
| bsd-3-clause | d875d22e2538556db06cea2cc3e1976e | 25.950704 | 78 | 0.549125 | 3.606975 | false | false | false | false |
kjung/scikit-learn | sklearn/externals/joblib/parallel.py | 31 | 35665 | """
Helpers for embarrassingly parallel code.
"""
# Author: Gael Varoquaux < gael dot varoquaux at normalesup dot org >
# Copyright: 2010, Gael Varoquaux
# License: BSD 3 clause
from __future__ import division
import os
import sys
import gc
import warnings
from math import sqrt
import functools
import time
import threading
import itertools
from numbers import Integral
try:
import cPickle as pickle
except:
import pickle
from ._multiprocessing_helpers import mp
if mp is not None:
from .pool import MemmapingPool
from multiprocessing.pool import ThreadPool
from .format_stack import format_exc, format_outer_frames
from .logger import Logger, short_format_time
from .my_exceptions import TransportableException, _mk_exception
from .disk import memstr_to_kbytes
from ._compat import _basestring
VALID_BACKENDS = ['multiprocessing', 'threading']
# Environment variables to protect against bad situations when nesting
JOBLIB_SPAWNED_PROCESS = "__JOBLIB_SPAWNED_PARALLEL__"
# In seconds, should be big enough to hide multiprocessing dispatching
# overhead.
# This settings was found by running benchmarks/bench_auto_batching.py
# with various parameters on various platforms.
MIN_IDEAL_BATCH_DURATION = .2
# Should not be too high to avoid stragglers: long jobs running alone
# on a single worker while other workers have no work to process any more.
MAX_IDEAL_BATCH_DURATION = 2
# Under Linux or OS X the default start method of multiprocessing
# can cause third party libraries to crash. Under Python 3.4+ it is possible
# to set an environment variable to switch the default start method from
# 'fork' to 'forkserver' or 'spawn' to avoid this issue albeit at the cost
# of causing semantic changes and some additional pool instanciation overhead.
if hasattr(mp, 'get_context'):
method = os.environ.get('JOBLIB_START_METHOD', '').strip() or None
DEFAULT_MP_CONTEXT = mp.get_context(method=method)
else:
DEFAULT_MP_CONTEXT = None
class BatchedCalls(object):
"""Wrap a sequence of (func, args, kwargs) tuples as a single callable"""
def __init__(self, iterator_slice):
self.items = list(iterator_slice)
self._size = len(self.items)
def __call__(self):
return [func(*args, **kwargs) for func, args, kwargs in self.items]
def __len__(self):
return self._size
###############################################################################
# CPU count that works also when multiprocessing has been disabled via
# the JOBLIB_MULTIPROCESSING environment variable
def cpu_count():
""" Return the number of CPUs.
"""
if mp is None:
return 1
return mp.cpu_count()
###############################################################################
# For verbosity
def _verbosity_filter(index, verbose):
""" Returns False for indices increasingly apart, the distance
depending on the value of verbose.
We use a lag increasing as the square of index
"""
if not verbose:
return True
elif verbose > 10:
return False
if index == 0:
return False
verbose = .5 * (11 - verbose) ** 2
scale = sqrt(index / verbose)
next_scale = sqrt((index + 1) / verbose)
return (int(next_scale) == int(scale))
###############################################################################
class WorkerInterrupt(Exception):
""" An exception that is not KeyboardInterrupt to allow subprocesses
to be interrupted.
"""
pass
###############################################################################
class SafeFunction(object):
""" Wraps a function to make it exception with full traceback in
their representation.
Useful for parallel computing with multiprocessing, for which
exceptions cannot be captured.
"""
def __init__(self, func):
self.func = func
def __call__(self, *args, **kwargs):
try:
return self.func(*args, **kwargs)
except KeyboardInterrupt:
# We capture the KeyboardInterrupt and reraise it as
# something different, as multiprocessing does not
# interrupt processing for a KeyboardInterrupt
raise WorkerInterrupt()
except:
e_type, e_value, e_tb = sys.exc_info()
text = format_exc(e_type, e_value, e_tb, context=10,
tb_offset=1)
raise TransportableException(text, e_type)
###############################################################################
def delayed(function, check_pickle=True):
"""Decorator used to capture the arguments of a function.
Pass `check_pickle=False` when:
- performing a possibly repeated check is too costly and has been done
already once outside of the call to delayed.
- when used in conjunction `Parallel(backend='threading')`.
"""
# Try to pickle the input function, to catch the problems early when
# using with multiprocessing:
if check_pickle:
pickle.dumps(function)
def delayed_function(*args, **kwargs):
return function, args, kwargs
try:
delayed_function = functools.wraps(function)(delayed_function)
except AttributeError:
" functools.wraps fails on some callable objects "
return delayed_function
###############################################################################
class ImmediateComputeBatch(object):
"""Sequential computation of a batch of tasks.
This replicates the async computation API but actually does not delay
the computations when joblib.Parallel runs in sequential mode.
"""
def __init__(self, batch):
# Don't delay the application, to avoid keeping the input
# arguments in memory
self.results = batch()
def get(self):
return self.results
###############################################################################
class BatchCompletionCallBack(object):
"""Callback used by joblib.Parallel's multiprocessing backend.
This callable is executed by the parent process whenever a worker process
has returned the results of a batch of tasks.
It is used for progress reporting, to update estimate of the batch
processing duration and to schedule the next batch of tasks to be
processed.
"""
def __init__(self, dispatch_timestamp, batch_size, parallel):
self.dispatch_timestamp = dispatch_timestamp
self.batch_size = batch_size
self.parallel = parallel
def __call__(self, out):
self.parallel.n_completed_tasks += self.batch_size
this_batch_duration = time.time() - self.dispatch_timestamp
if (self.parallel.batch_size == 'auto'
and self.batch_size == self.parallel._effective_batch_size):
# Update the smoothed streaming estimate of the duration of a batch
# from dispatch to completion
old_duration = self.parallel._smoothed_batch_duration
if old_duration == 0:
# First record of duration for this batch size after the last
# reset.
new_duration = this_batch_duration
else:
# Update the exponentially weighted average of the duration of
# batch for the current effective size.
new_duration = 0.8 * old_duration + 0.2 * this_batch_duration
self.parallel._smoothed_batch_duration = new_duration
self.parallel.print_progress()
if self.parallel._original_iterator is not None:
self.parallel.dispatch_next()
###############################################################################
class Parallel(Logger):
''' Helper class for readable parallel mapping.
Parameters
-----------
n_jobs: int, default: 1
The maximum number of concurrently running jobs, such as the number
of Python worker processes when backend="multiprocessing"
or the size of the thread-pool when backend="threading".
If -1 all CPUs are used. If 1 is given, no parallel computing code
is used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all
CPUs but one are used.
backend: str or None, default: 'multiprocessing'
Specify the parallelization backend implementation.
Supported backends are:
- "multiprocessing" used by default, can induce some
communication and memory overhead when exchanging input and
output data with the with the worker Python processes.
- "threading" is a very low-overhead backend but it suffers
from the Python Global Interpreter Lock if the called function
relies a lot on Python objects. "threading" is mostly useful
when the execution bottleneck is a compiled extension that
explicitly releases the GIL (for instance a Cython loop wrapped
in a "with nogil" block or an expensive call to a library such
as NumPy).
verbose: int, optional
The verbosity level: if non zero, progress messages are
printed. Above 50, the output is sent to stdout.
The frequency of the messages increases with the verbosity level.
If it more than 10, all iterations are reported.
pre_dispatch: {'all', integer, or expression, as in '3*n_jobs'}
The number of batches (of tasks) to be pre-dispatched.
Default is '2*n_jobs'. When batch_size="auto" this is reasonable
default and the multiprocessing workers shoud never starve.
batch_size: int or 'auto', default: 'auto'
The number of atomic tasks to dispatch at once to each
worker. When individual evaluations are very fast, multiprocessing
can be slower than sequential computation because of the overhead.
Batching fast computations together can mitigate this.
The ``'auto'`` strategy keeps track of the time it takes for a batch
to complete, and dynamically adjusts the batch size to keep the time
on the order of half a second, using a heuristic. The initial batch
size is 1.
``batch_size="auto"`` with ``backend="threading"`` will dispatch
batches of a single task at a time as the threading backend has
very little overhead and using larger batch size has not proved to
bring any gain in that case.
temp_folder: str, optional
Folder to be used by the pool for memmaping large arrays
for sharing memory with worker processes. If None, this will try in
order:
- a folder pointed by the JOBLIB_TEMP_FOLDER environment variable,
- /dev/shm if the folder exists and is writable: this is a RAMdisk
filesystem available by default on modern Linux distributions,
- the default system temporary folder that can be overridden
with TMP, TMPDIR or TEMP environment variables, typically /tmp
under Unix operating systems.
Only active when backend="multiprocessing".
max_nbytes int, str, or None, optional, 1M by default
Threshold on the size of arrays passed to the workers that
triggers automated memory mapping in temp_folder. Can be an int
in Bytes, or a human-readable string, e.g., '1M' for 1 megabyte.
Use None to disable memmaping of large arrays.
Only active when backend="multiprocessing".
Notes
-----
This object uses the multiprocessing module to compute in
parallel the application of a function to many different
arguments. The main functionality it brings in addition to
using the raw multiprocessing API are (see examples for details):
* More readable code, in particular since it avoids
constructing list of arguments.
* Easier debugging:
- informative tracebacks even when the error happens on
the client side
- using 'n_jobs=1' enables to turn off parallel computing
for debugging without changing the codepath
- early capture of pickling errors
* An optional progress meter.
* Interruption of multiprocesses jobs with 'Ctrl-C'
* Flexible pickling control for the communication to and from
the worker processes.
* Ability to use shared memory efficiently with worker
processes for large numpy-based datastructures.
Examples
--------
A simple example:
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
Reshaping the output when the function has several return
values:
>>> from math import modf
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=1)(delayed(modf)(i/2.) for i in range(10))
>>> res, i = zip(*r)
>>> res
(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5)
>>> i
(0.0, 0.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0)
The progress meter: the higher the value of `verbose`, the more
messages::
>>> from time import sleep
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=2, verbose=5)(delayed(sleep)(.1) for _ in range(10)) #doctest: +SKIP
[Parallel(n_jobs=2)]: Done 1 out of 10 | elapsed: 0.1s remaining: 0.9s
[Parallel(n_jobs=2)]: Done 3 out of 10 | elapsed: 0.2s remaining: 0.5s
[Parallel(n_jobs=2)]: Done 6 out of 10 | elapsed: 0.3s remaining: 0.2s
[Parallel(n_jobs=2)]: Done 9 out of 10 | elapsed: 0.5s remaining: 0.1s
[Parallel(n_jobs=2)]: Done 10 out of 10 | elapsed: 0.5s finished
Traceback example, note how the line of the error is indicated
as well as the values of the parameter passed to the function that
triggered the exception, even though the traceback happens in the
child process::
>>> from heapq import nlargest
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=2)(delayed(nlargest)(2, n) for n in (range(4), 'abcde', 3)) #doctest: +SKIP
#...
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
TypeError Mon Nov 12 11:37:46 2012
PID: 12934 Python 2.7.3: /usr/bin/python
...........................................................................
/usr/lib/python2.7/heapq.pyc in nlargest(n=2, iterable=3, key=None)
419 if n >= size:
420 return sorted(iterable, key=key, reverse=True)[:n]
421
422 # When key is none, use simpler decoration
423 if key is None:
--> 424 it = izip(iterable, count(0,-1)) # decorate
425 result = _nlargest(n, it)
426 return map(itemgetter(0), result) # undecorate
427
428 # General case, slowest method
TypeError: izip argument #1 must support iteration
___________________________________________________________________________
Using pre_dispatch in a producer/consumer situation, where the
data is generated on the fly. Note how the producer is first
called a 3 times before the parallel loop is initiated, and then
called to generate new data on the fly. In this case the total
number of iterations cannot be reported in the progress messages::
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> def producer():
... for i in range(6):
... print('Produced %s' % i)
... yield i
>>> out = Parallel(n_jobs=2, verbose=100, pre_dispatch='1.5*n_jobs')(
... delayed(sqrt)(i) for i in producer()) #doctest: +SKIP
Produced 0
Produced 1
Produced 2
[Parallel(n_jobs=2)]: Done 1 jobs | elapsed: 0.0s
Produced 3
[Parallel(n_jobs=2)]: Done 2 jobs | elapsed: 0.0s
Produced 4
[Parallel(n_jobs=2)]: Done 3 jobs | elapsed: 0.0s
Produced 5
[Parallel(n_jobs=2)]: Done 4 jobs | elapsed: 0.0s
[Parallel(n_jobs=2)]: Done 5 out of 6 | elapsed: 0.0s remaining: 0.0s
[Parallel(n_jobs=2)]: Done 6 out of 6 | elapsed: 0.0s finished
'''
def __init__(self, n_jobs=1, backend='multiprocessing', verbose=0,
pre_dispatch='2 * n_jobs', batch_size='auto',
temp_folder=None, max_nbytes='1M', mmap_mode='r'):
self.verbose = verbose
self._mp_context = DEFAULT_MP_CONTEXT
if backend is None:
# `backend=None` was supported in 0.8.2 with this effect
backend = "multiprocessing"
elif hasattr(backend, 'Pool') and hasattr(backend, 'Lock'):
# Make it possible to pass a custom multiprocessing context as
# backend to change the start method to forkserver or spawn or
# preload modules on the forkserver helper process.
self._mp_context = backend
backend = "multiprocessing"
if backend not in VALID_BACKENDS:
raise ValueError("Invalid backend: %s, expected one of %r"
% (backend, VALID_BACKENDS))
self.backend = backend
self.n_jobs = n_jobs
if (batch_size == 'auto'
or isinstance(batch_size, Integral) and batch_size > 0):
self.batch_size = batch_size
else:
raise ValueError(
"batch_size must be 'auto' or a positive integer, got: %r"
% batch_size)
self.pre_dispatch = pre_dispatch
self._temp_folder = temp_folder
if isinstance(max_nbytes, _basestring):
self._max_nbytes = 1024 * memstr_to_kbytes(max_nbytes)
else:
self._max_nbytes = max_nbytes
self._mmap_mode = mmap_mode
# Not starting the pool in the __init__ is a design decision, to be
# able to close it ASAP, and not burden the user with closing it
# unless they choose to use the context manager API with a with block.
self._pool = None
self._output = None
self._jobs = list()
self._managed_pool = False
# This lock is used coordinate the main thread of this process with
# the async callback thread of our the pool.
self._lock = threading.Lock()
def __enter__(self):
self._managed_pool = True
self._initialize_pool()
return self
def __exit__(self, exc_type, exc_value, traceback):
self._terminate_pool()
self._managed_pool = False
def _effective_n_jobs(self):
n_jobs = self.n_jobs
if n_jobs == 0:
raise ValueError('n_jobs == 0 in Parallel has no meaning')
elif mp is None or n_jobs is None:
# multiprocessing is not available or disabled, fallback
# to sequential mode
return 1
elif n_jobs < 0:
n_jobs = max(mp.cpu_count() + 1 + n_jobs, 1)
return n_jobs
def _initialize_pool(self):
"""Build a process or thread pool and return the number of workers"""
n_jobs = self._effective_n_jobs()
# The list of exceptions that we will capture
self.exceptions = [TransportableException]
if n_jobs == 1:
# Sequential mode: do not use a pool instance to avoid any
# useless dispatching overhead
self._pool = None
elif self.backend == 'threading':
self._pool = ThreadPool(n_jobs)
elif self.backend == 'multiprocessing':
if mp.current_process().daemon:
# Daemonic processes cannot have children
self._pool = None
warnings.warn(
'Multiprocessing-backed parallel loops cannot be nested,'
' setting n_jobs=1',
stacklevel=3)
return 1
elif threading.current_thread().name != 'MainThread':
# Prevent posix fork inside in non-main posix threads
self._pool = None
warnings.warn(
'Multiprocessing backed parallel loops cannot be nested'
' below threads, setting n_jobs=1',
stacklevel=3)
return 1
else:
already_forked = int(os.environ.get(JOBLIB_SPAWNED_PROCESS, 0))
if already_forked:
raise ImportError('[joblib] Attempting to do parallel computing '
'without protecting your import on a system that does '
'not support forking. To use parallel-computing in a '
'script, you must protect your main loop using "if '
"__name__ == '__main__'"
'". Please see the joblib documentation on Parallel '
'for more information'
)
# Set an environment variable to avoid infinite loops
os.environ[JOBLIB_SPAWNED_PROCESS] = '1'
# Make sure to free as much memory as possible before forking
gc.collect()
poolargs = dict(
max_nbytes=self._max_nbytes,
mmap_mode=self._mmap_mode,
temp_folder=self._temp_folder,
verbose=max(0, self.verbose - 50),
)
if self._mp_context is not None:
# Use Python 3.4+ multiprocessing context isolation
poolargs['context'] = self._mp_context
self._pool = MemmapingPool(n_jobs, **poolargs)
# We are using multiprocessing, we also want to capture
# KeyboardInterrupts
self.exceptions.extend([KeyboardInterrupt, WorkerInterrupt])
else:
raise ValueError("Unsupported backend: %s" % self.backend)
return n_jobs
def _terminate_pool(self):
if self._pool is not None:
self._pool.close()
self._pool.terminate() # terminate does a join()
self._pool = None
if self.backend == 'multiprocessing':
os.environ.pop(JOBLIB_SPAWNED_PROCESS, 0)
def _dispatch(self, batch):
"""Queue the batch for computing, with or without multiprocessing
WARNING: this method is not thread-safe: it should be only called
indirectly via dispatch_one_batch.
"""
# If job.get() catches an exception, it closes the queue:
if self._aborting:
return
if self._pool is None:
job = ImmediateComputeBatch(batch)
self._jobs.append(job)
self.n_dispatched_batches += 1
self.n_dispatched_tasks += len(batch)
self.n_completed_tasks += len(batch)
if not _verbosity_filter(self.n_dispatched_batches, self.verbose):
self._print('Done %3i tasks | elapsed: %s',
(self.n_completed_tasks,
short_format_time(time.time() - self._start_time)
))
else:
dispatch_timestamp = time.time()
cb = BatchCompletionCallBack(dispatch_timestamp, len(batch), self)
job = self._pool.apply_async(SafeFunction(batch), callback=cb)
self._jobs.append(job)
self.n_dispatched_tasks += len(batch)
self.n_dispatched_batches += 1
def dispatch_next(self):
"""Dispatch more data for parallel processing
This method is meant to be called concurrently by the multiprocessing
callback. We rely on the thread-safety of dispatch_one_batch to protect
against concurrent consumption of the unprotected iterator.
"""
if not self.dispatch_one_batch(self._original_iterator):
self._iterating = False
self._original_iterator = None
def dispatch_one_batch(self, iterator):
"""Prefetch the tasks for the next batch and dispatch them.
The effective size of the batch is computed here.
If there are no more jobs to dispatch, return False, else return True.
The iterator consumption and dispatching is protected by the same
lock so calling this function should be thread safe.
"""
if self.batch_size == 'auto' and self.backend == 'threading':
# Batching is never beneficial with the threading backend
batch_size = 1
elif self.batch_size == 'auto':
old_batch_size = self._effective_batch_size
batch_duration = self._smoothed_batch_duration
if (batch_duration > 0 and
batch_duration < MIN_IDEAL_BATCH_DURATION):
# The current batch size is too small: the duration of the
# processing of a batch of task is not large enough to hide
# the scheduling overhead.
ideal_batch_size = int(
old_batch_size * MIN_IDEAL_BATCH_DURATION / batch_duration)
# Multiply by two to limit oscilations between min and max.
batch_size = max(2 * ideal_batch_size, 1)
self._effective_batch_size = batch_size
if self.verbose >= 10:
self._print("Batch computation too fast (%.4fs.) "
"Setting batch_size=%d.", (
batch_duration, batch_size))
elif (batch_duration > MAX_IDEAL_BATCH_DURATION and
old_batch_size >= 2):
# The current batch size is too big. If we schedule overly long
# running batches some CPUs might wait with nothing left to do
# while a couple of CPUs a left processing a few long running
# batches. Better reduce the batch size a bit to limit the
# likelihood of scheduling such stragglers.
self._effective_batch_size = batch_size = old_batch_size // 2
if self.verbose >= 10:
self._print("Batch computation too slow (%.2fs.) "
"Setting batch_size=%d.", (
batch_duration, batch_size))
else:
# No batch size adjustment
batch_size = old_batch_size
if batch_size != old_batch_size:
# Reset estimation of the smoothed mean batch duration: this
# estimate is updated in the multiprocessing apply_async
# CallBack as long as the batch_size is constant. Therefore
# we need to reset the estimate whenever we re-tune the batch
# size.
self._smoothed_batch_duration = 0
else:
# Fixed batch size strategy
batch_size = self.batch_size
with self._lock:
tasks = BatchedCalls(itertools.islice(iterator, batch_size))
if not tasks:
# No more tasks available in the iterator: tell caller to stop.
return False
else:
self._dispatch(tasks)
return True
def _print(self, msg, msg_args):
"""Display the message on stout or stderr depending on verbosity"""
# XXX: Not using the logger framework: need to
# learn to use logger better.
if not self.verbose:
return
if self.verbose < 50:
writer = sys.stderr.write
else:
writer = sys.stdout.write
msg = msg % msg_args
writer('[%s]: %s\n' % (self, msg))
def print_progress(self):
"""Display the process of the parallel execution only a fraction
of time, controlled by self.verbose.
"""
if not self.verbose:
return
elapsed_time = time.time() - self._start_time
# This is heuristic code to print only 'verbose' times a messages
# The challenge is that we may not know the queue length
if self._original_iterator:
if _verbosity_filter(self.n_dispatched_batches, self.verbose):
return
self._print('Done %3i tasks | elapsed: %s',
(self.n_completed_tasks,
short_format_time(elapsed_time),
))
else:
index = self.n_dispatched_batches
# We are finished dispatching
total_tasks = self.n_dispatched_tasks
# We always display the first loop
if not index == 0:
# Display depending on the number of remaining items
# A message as soon as we finish dispatching, cursor is 0
cursor = (total_tasks - index + 1
- self._pre_dispatch_amount)
frequency = (total_tasks // self.verbose) + 1
is_last_item = (index + 1 == total_tasks)
if (is_last_item or cursor % frequency):
return
remaining_time = (elapsed_time / (index + 1) *
(self.n_dispatched_tasks - index - 1.))
self._print('Done %3i out of %3i | elapsed: %s remaining: %s',
(index + 1,
total_tasks,
short_format_time(elapsed_time),
short_format_time(remaining_time),
))
def retrieve(self):
self._output = list()
while self._iterating or len(self._jobs) > 0:
if len(self._jobs) == 0:
# Wait for an async callback to dispatch new jobs
time.sleep(0.01)
continue
# We need to be careful: the job list can be filling up as
# we empty it and Python list are not thread-safe by default hence
# the use of the lock
with self._lock:
job = self._jobs.pop(0)
try:
self._output.extend(job.get())
except tuple(self.exceptions) as exception:
# Stop dispatching any new job in the async callback thread
self._aborting = True
if isinstance(exception, TransportableException):
# Capture exception to add information on the local
# stack in addition to the distant stack
this_report = format_outer_frames(context=10,
stack_start=1)
report = """Multiprocessing exception:
%s
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
%s""" % (this_report, exception.message)
# Convert this to a JoblibException
exception_type = _mk_exception(exception.etype)[0]
exception = exception_type(report)
# Kill remaining running processes without waiting for
# the results as we will raise the exception we got back
# to the caller instead of returning any result.
self._terminate_pool()
if self._managed_pool:
# In case we had to terminate a managed pool, let
# us start a new one to ensure that subsequent calls
# to __call__ on the same Parallel instance will get
# a working pool as they expect.
self._initialize_pool()
raise exception
def __call__(self, iterable):
if self._jobs:
raise ValueError('This Parallel instance is already running')
# A flag used to abort the dispatching of jobs in case an
# exception is found
self._aborting = False
if not self._managed_pool:
n_jobs = self._initialize_pool()
else:
n_jobs = self._effective_n_jobs()
if self.batch_size == 'auto':
self._effective_batch_size = 1
iterator = iter(iterable)
pre_dispatch = self.pre_dispatch
if pre_dispatch == 'all' or n_jobs == 1:
# prevent further dispatch via multiprocessing callback thread
self._original_iterator = None
self._pre_dispatch_amount = 0
else:
self._original_iterator = iterator
if hasattr(pre_dispatch, 'endswith'):
pre_dispatch = eval(pre_dispatch)
self._pre_dispatch_amount = pre_dispatch = int(pre_dispatch)
# The main thread will consume the first pre_dispatch items and
# the remaining items will later be lazily dispatched by async
# callbacks upon task completions.
iterator = itertools.islice(iterator, pre_dispatch)
self._start_time = time.time()
self.n_dispatched_batches = 0
self.n_dispatched_tasks = 0
self.n_completed_tasks = 0
self._smoothed_batch_duration = 0.0
try:
# Only set self._iterating to True if at least a batch
# was dispatched. In particular this covers the edge
# case of Parallel used with an exhausted iterator.
while self.dispatch_one_batch(iterator):
self._iterating = True
else:
self._iterating = False
if pre_dispatch == "all" or n_jobs == 1:
# The iterable was consumed all at once by the above for loop.
# No need to wait for async callbacks to trigger to
# consumption.
self._iterating = False
self.retrieve()
# Make sure that we get a last message telling us we are done
elapsed_time = time.time() - self._start_time
self._print('Done %3i out of %3i | elapsed: %s finished',
(len(self._output), len(self._output),
short_format_time(elapsed_time)))
finally:
if not self._managed_pool:
self._terminate_pool()
self._jobs = list()
output = self._output
self._output = None
return output
def __repr__(self):
return '%s(n_jobs=%s)' % (self.__class__.__name__, self.n_jobs)
| bsd-3-clause | e50afb979c7cddc314c16f13cb2acc4f | 42.230303 | 104 | 0.562428 | 4.669416 | false | false | false | false |
kjung/scikit-learn | sklearn/ensemble/voting_classifier.py | 5 | 8545 | """
Soft Voting/Majority Rule classifier.
This module contains a Soft Voting/Majority Rule classifier for
classification estimators.
"""
# Authors: Sebastian Raschka <se.raschka@gmail.com>,
# Gilles Louppe <g.louppe@gmail.com>
#
# Licence: BSD 3 clause
import numpy as np
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import TransformerMixin
from ..base import clone
from ..preprocessing import LabelEncoder
from ..externals import six
from ..exceptions import NotFittedError
from ..utils.validation import check_is_fitted
class VotingClassifier(BaseEstimator, ClassifierMixin, TransformerMixin):
"""Soft Voting/Majority Rule classifier for unfitted estimators.
.. versionadded:: 0.17
Read more in the :ref:`User Guide <voting_classifier>`.
Parameters
----------
estimators : list of (string, estimator) tuples
Invoking the ``fit`` method on the ``VotingClassifier`` will fit clones
of those original estimators that will be stored in the class attribute
`self.estimators_`.
voting : str, {'hard', 'soft'} (default='hard')
If 'hard', uses predicted class labels for majority rule voting.
Else if 'soft', predicts the class label based on the argmax of
the sums of the predicted probabilities, which is recommended for
an ensemble of well-calibrated classifiers.
weights : array-like, shape = [n_classifiers], optional (default=`None`)
Sequence of weights (`float` or `int`) to weight the occurrences of
predicted class labels (`hard` voting) or class probabilities
before averaging (`soft` voting). Uses uniform weights if `None`.
Attributes
----------
classes_ : array-like, shape = [n_predictions]
Examples
--------
>>> import numpy as np
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.ensemble import RandomForestClassifier
>>> clf1 = LogisticRegression(random_state=1)
>>> clf2 = RandomForestClassifier(random_state=1)
>>> clf3 = GaussianNB()
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> eclf1 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)], voting='hard')
>>> eclf1 = eclf1.fit(X, y)
>>> print(eclf1.predict(X))
[1 1 1 2 2 2]
>>> eclf2 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
... voting='soft')
>>> eclf2 = eclf2.fit(X, y)
>>> print(eclf2.predict(X))
[1 1 1 2 2 2]
>>> eclf3 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
... voting='soft', weights=[2,1,1])
>>> eclf3 = eclf3.fit(X, y)
>>> print(eclf3.predict(X))
[1 1 1 2 2 2]
>>>
"""
def __init__(self, estimators, voting='hard', weights=None):
self.estimators = estimators
self.named_estimators = dict(estimators)
self.voting = voting
self.weights = weights
def fit(self, X, y):
""" Fit the estimators.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
"""
if isinstance(y, np.ndarray) and len(y.shape) > 1 and y.shape[1] > 1:
raise NotImplementedError('Multilabel and multi-output'
' classification is not supported.')
if self.voting not in ('soft', 'hard'):
raise ValueError("Voting must be 'soft' or 'hard'; got (voting=%r)"
% self.voting)
if self.estimators is None or len(self.estimators) == 0:
raise AttributeError('Invalid `estimators` attribute, `estimators`'
' should be a list of (string, estimator)'
' tuples')
if self.weights and len(self.weights) != len(self.estimators):
raise ValueError('Number of classifiers and weights must be equal'
'; got %d weights, %d estimators'
% (len(self.weights), len(self.estimators)))
self.le_ = LabelEncoder()
self.le_.fit(y)
self.classes_ = self.le_.classes_
self.estimators_ = []
for name, clf in self.estimators:
fitted_clf = clone(clf).fit(X, self.le_.transform(y))
self.estimators_.append(fitted_clf)
return self
def predict(self, X):
""" Predict class labels for X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
maj : array-like, shape = [n_samples]
Predicted class labels.
"""
check_is_fitted(self, 'estimators_')
if self.voting == 'soft':
maj = np.argmax(self.predict_proba(X), axis=1)
else: # 'hard' voting
predictions = self._predict(X)
maj = np.apply_along_axis(lambda x:
np.argmax(np.bincount(x,
weights=self.weights)),
axis=1,
arr=predictions.astype('int'))
maj = self.le_.inverse_transform(maj)
return maj
def _collect_probas(self, X):
"""Collect results from clf.predict calls. """
return np.asarray([clf.predict_proba(X) for clf in self.estimators_])
def _predict_proba(self, X):
"""Predict class probabilities for X in 'soft' voting """
if self.voting == 'hard':
raise AttributeError("predict_proba is not available when"
" voting=%r" % self.voting)
check_is_fitted(self, 'estimators_')
avg = np.average(self._collect_probas(X), axis=0, weights=self.weights)
return avg
@property
def predict_proba(self):
"""Compute probabilities of possible outcomes for samples in X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
avg : array-like, shape = [n_samples, n_classes]
Weighted average probability for each class per sample.
"""
return self._predict_proba
def transform(self, X):
"""Return class labels or probabilities for X for each estimator.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
If `voting='soft'`:
array-like = [n_classifiers, n_samples, n_classes]
Class probabilities calculated by each classifier.
If `voting='hard'`:
array-like = [n_classifiers, n_samples]
Class labels predicted by each classifier.
"""
check_is_fitted(self, 'estimators_')
if self.voting == 'soft':
return self._collect_probas(X)
else:
return self._predict(X)
def get_params(self, deep=True):
"""Return estimator parameter names for GridSearch support"""
if not deep:
return super(VotingClassifier, self).get_params(deep=False)
else:
out = super(VotingClassifier, self).get_params(deep=False)
out.update(self.named_estimators.copy())
for name, step in six.iteritems(self.named_estimators):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
return out
def _predict(self, X):
"""Collect results from clf.predict calls. """
return np.asarray([clf.predict(X) for clf in self.estimators_]).T
| bsd-3-clause | 61e583e2c20e6d1a208baf5c63cf40fe | 35.054852 | 79 | 0.569573 | 4.106199 | false | false | false | false |
kjung/scikit-learn | examples/manifold/plot_compare_methods.py | 37 | 4036 | """
=========================================
Comparison of Manifold Learning methods
=========================================
An illustration of dimensionality reduction on the S-curve dataset
with various manifold learning methods.
For a discussion and comparison of these algorithms, see the
:ref:`manifold module page <manifold>`
For a similar example, where the methods are applied to a
sphere dataset, see :ref:`example_manifold_plot_manifold_sphere.py`
Note that the purpose of the MDS is to find a low-dimensional
representation of the data (here 2D) in which the distances respect well
the distances in the original high-dimensional space, unlike other
manifold-learning algorithms, it does not seeks an isotropic
representation of the data in the low-dimensional space.
"""
# Author: Jake Vanderplas -- <vanderplas@astro.washington.edu>
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold, datasets
# Next line to silence pyflakes. This import is needed.
Axes3D
n_points = 1000
X, color = datasets.samples_generator.make_s_curve(n_points, random_state=0)
n_neighbors = 10
n_components = 2
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(251, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.view_init(4, -72)
except:
ax = fig.add_subplot(251, projection='3d')
plt.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral)
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
Y = manifold.LocallyLinearEmbedding(n_neighbors, n_components,
eigen_solver='auto',
method=method).fit_transform(X)
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
Y = manifold.Isomap(n_neighbors, n_components).fit_transform(X)
t1 = time()
print("Isomap: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("Isomap (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
mds = manifold.MDS(n_components, max_iter=100, n_init=1)
Y = mds.fit_transform(X)
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
se = manifold.SpectralEmbedding(n_components=n_components,
n_neighbors=n_neighbors)
Y = se.fit_transform(X)
t1 = time()
print("SpectralEmbedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("SpectralEmbedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
tsne = manifold.TSNE(n_components=n_components, init='pca', random_state=0)
Y = tsne.fit_transform(X)
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(2, 5, 10)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
| bsd-3-clause | adce37532962e501276a1ac08aa065c8 | 31.813008 | 76 | 0.657582 | 2.969831 | false | false | false | false |
kjung/scikit-learn | benchmarks/bench_random_projections.py | 390 | 8900 | """
===========================
Random projection benchmark
===========================
Benchmarks for random projections.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import collections
import numpy as np
import scipy.sparse as sp
from sklearn import clone
from sklearn.externals.six.moves import xrange
from sklearn.random_projection import (SparseRandomProjection,
GaussianRandomProjection,
johnson_lindenstrauss_min_dim)
def type_auto_or_float(val):
if val == "auto":
return "auto"
else:
return float(val)
def type_auto_or_int(val):
if val == "auto":
return "auto"
else:
return int(val)
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_scikit_transformer(X, transfomer):
gc.collect()
clf = clone(transfomer)
# start time
t_start = datetime.now()
clf.fit(X)
delta = (datetime.now() - t_start)
# stop time
time_to_fit = compute_time(t_start, delta)
# start time
t_start = datetime.now()
clf.transform(X)
delta = (datetime.now() - t_start)
# stop time
time_to_transform = compute_time(t_start, delta)
return time_to_fit, time_to_transform
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros,
random_state=None):
rng = np.random.RandomState(random_state)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def print_row(clf_type, time_fit, time_transform):
print("%s | %s | %s" % (clf_type.ljust(30),
("%.4fs" % time_fit).center(12),
("%.4fs" % time_transform).center(12)))
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-features",
dest="n_features", default=10 ** 4, type=int,
help="Number of features in the benchmarks")
op.add_option("--n-components",
dest="n_components", default="auto",
help="Size of the random subspace."
" ('auto' or int > 0)")
op.add_option("--ratio-nonzeros",
dest="ratio_nonzeros", default=10 ** -3, type=float,
help="Number of features in the benchmarks")
op.add_option("--n-samples",
dest="n_samples", default=500, type=int,
help="Number of samples in the benchmarks")
op.add_option("--random-seed",
dest="random_seed", default=13, type=int,
help="Seed used by the random number generators.")
op.add_option("--density",
dest="density", default=1 / 3,
help="Density used by the sparse random projection."
" ('auto' or float (0.0, 1.0]")
op.add_option("--eps",
dest="eps", default=0.5, type=float,
help="See the documentation of the underlying transformers.")
op.add_option("--transformers",
dest="selected_transformers",
default='GaussianRandomProjection,SparseRandomProjection',
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. Available: "
"GaussianRandomProjection,SparseRandomProjection")
op.add_option("--dense",
dest="dense",
default=False,
action="store_true",
help="Set input space as a dense matrix.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
opts.n_components = type_auto_or_int(opts.n_components)
opts.density = type_auto_or_float(opts.density)
selected_transformers = opts.selected_transformers.split(',')
###########################################################################
# Generate dataset
###########################################################################
n_nonzeros = int(opts.ratio_nonzeros * opts.n_features)
print('Dataset statics')
print("===========================")
print('n_samples \t= %s' % opts.n_samples)
print('n_features \t= %s' % opts.n_features)
if opts.n_components == "auto":
print('n_components \t= %s (auto)' %
johnson_lindenstrauss_min_dim(n_samples=opts.n_samples,
eps=opts.eps))
else:
print('n_components \t= %s' % opts.n_components)
print('n_elements \t= %s' % (opts.n_features * opts.n_samples))
print('n_nonzeros \t= %s per feature' % n_nonzeros)
print('ratio_nonzeros \t= %s' % opts.ratio_nonzeros)
print('')
###########################################################################
# Set transformer input
###########################################################################
transformers = {}
###########################################################################
# Set GaussianRandomProjection input
gaussian_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed
}
transformers["GaussianRandomProjection"] = \
GaussianRandomProjection(**gaussian_matrix_params)
###########################################################################
# Set SparseRandomProjection input
sparse_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed,
"density": opts.density,
"eps": opts.eps,
}
transformers["SparseRandomProjection"] = \
SparseRandomProjection(**sparse_matrix_params)
###########################################################################
# Perform benchmark
###########################################################################
time_fit = collections.defaultdict(list)
time_transform = collections.defaultdict(list)
print('Benchmarks')
print("===========================")
print("Generate dataset benchmarks... ", end="")
X_dense, X_sparse = make_sparse_random_data(opts.n_samples,
opts.n_features,
n_nonzeros,
random_state=opts.random_seed)
X = X_dense if opts.dense else X_sparse
print("done")
for name in selected_transformers:
print("Perform benchmarks for %s..." % name)
for iteration in xrange(opts.n_times):
print("\titer %s..." % iteration, end="")
time_to_fit, time_to_transform = bench_scikit_transformer(X_dense,
transformers[name])
time_fit[name].append(time_to_fit)
time_transform[name].append(time_to_transform)
print("done")
print("")
###########################################################################
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Transformer performance:")
print("===========================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
print("%s | %s | %s" % ("Transformer".ljust(30),
"fit".center(12),
"transform".center(12)))
print(31 * "-" + ("|" + "-" * 14) * 2)
for name in sorted(selected_transformers):
print_row(name,
np.mean(time_fit[name]),
np.mean(time_transform[name]))
print("")
print("")
| bsd-3-clause | c707476c1ad28622ab69547ef8275e83 | 34.03937 | 80 | 0.488876 | 4.392892 | false | false | false | false |
kjung/scikit-learn | examples/linear_model/plot_logistic_l1_l2_sparsity.py | 377 | 2601 | """
==============================================
L1 Penalty and Sparsity in Logistic Regression
==============================================
Comparison of the sparsity (percentage of zero coefficients) of solutions when
L1 and L2 penalty are used for different values of C. We can see that large
values of C give more freedom to the model. Conversely, smaller values of C
constrain the model more. In the L1 penalty case, this leads to sparser
solutions.
We classify 8x8 images of digits into two classes: 0-4 against 5-9.
The visualization shows coefficients of the models for varying C.
"""
print(__doc__)
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
digits = datasets.load_digits()
X, y = digits.data, digits.target
X = StandardScaler().fit_transform(X)
# classify small against large digits
y = (y > 4).astype(np.int)
# Set regularization parameter
for i, C in enumerate((100, 1, 0.01)):
# turn down tolerance for short training time
clf_l1_LR = LogisticRegression(C=C, penalty='l1', tol=0.01)
clf_l2_LR = LogisticRegression(C=C, penalty='l2', tol=0.01)
clf_l1_LR.fit(X, y)
clf_l2_LR.fit(X, y)
coef_l1_LR = clf_l1_LR.coef_.ravel()
coef_l2_LR = clf_l2_LR.coef_.ravel()
# coef_l1_LR contains zeros due to the
# L1 sparsity inducing norm
sparsity_l1_LR = np.mean(coef_l1_LR == 0) * 100
sparsity_l2_LR = np.mean(coef_l2_LR == 0) * 100
print("C=%.2f" % C)
print("Sparsity with L1 penalty: %.2f%%" % sparsity_l1_LR)
print("score with L1 penalty: %.4f" % clf_l1_LR.score(X, y))
print("Sparsity with L2 penalty: %.2f%%" % sparsity_l2_LR)
print("score with L2 penalty: %.4f" % clf_l2_LR.score(X, y))
l1_plot = plt.subplot(3, 2, 2 * i + 1)
l2_plot = plt.subplot(3, 2, 2 * (i + 1))
if i == 0:
l1_plot.set_title("L1 penalty")
l2_plot.set_title("L2 penalty")
l1_plot.imshow(np.abs(coef_l1_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
l2_plot.imshow(np.abs(coef_l2_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
plt.text(-8, 3, "C = %.2f" % C)
l1_plot.set_xticks(())
l1_plot.set_yticks(())
l2_plot.set_xticks(())
l2_plot.set_yticks(())
plt.show()
| bsd-3-clause | e5f364437ce01ac7b502fa891558405e | 31.924051 | 78 | 0.636294 | 3.010417 | false | false | false | false |
kjung/scikit-learn | examples/linear_model/plot_theilsen.py | 98 | 3846 | """
====================
Theil-Sen Regression
====================
Computes a Theil-Sen Regression on a synthetic dataset.
See :ref:`theil_sen_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the Theil-Sen
estimator is robust against outliers. It has a breakdown point of about 29.3%
in case of a simple linear regression which means that it can tolerate
arbitrary corrupted data (outliers) of up to 29.3% in the two-dimensional
case.
The estimation of the model is done by calculating the slopes and intercepts
of a subpopulation of all possible combinations of p subsample points. If an
intercept is fitted, p must be greater than or equal to n_features + 1. The
final slope and intercept is then defined as the spatial median of these
slopes and intercepts.
In certain cases Theil-Sen performs better than :ref:`RANSAC
<ransac_regression>` which is also a robust method. This is illustrated in the
second example below where outliers with respect to the x-axis perturb RANSAC.
Tuning the ``residual_threshold`` parameter of RANSAC remedies this but in
general a priori knowledge about the data and the nature of the outliers is
needed.
Due to the computational complexity of Theil-Sen it is recommended to use it
only for small problems in terms of number of samples and features. For larger
problems the ``max_subpopulation`` parameter restricts the magnitude of all
possible combinations of p subsample points to a randomly chosen subset and
therefore also limits the runtime. Therefore, Theil-Sen is applicable to larger
problems with the drawback of losing some of its mathematical properties since
it then works on a random subset.
"""
# Author: Florian Wilhelm -- <florian.wilhelm@gmail.com>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model import RANSACRegressor
print(__doc__)
estimators = [('OLS', LinearRegression()),
('Theil-Sen', TheilSenRegressor(random_state=42)),
('RANSAC', RANSACRegressor(random_state=42)), ]
colors = {'OLS': 'turquoise', 'Theil-Sen': 'gold', 'RANSAC': 'lightgreen'}
lw = 2
##############################################################################
# Outliers only in the y direction
np.random.seed(0)
n_samples = 200
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
w = 3.
c = 2.
noise = 0.1 * np.random.randn(n_samples)
y = w * x + c + noise
# 10% outliers
y[-20:] += -20 * x[-20:]
X = x[:, np.newaxis]
plt.scatter(x, y, color='indigo', marker='x', s=40)
line_x = np.array([-3, 3])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(line_x, y_pred, color=colors[name], linewidth=lw,
label='%s (fit time: %.2fs)' % (name, elapsed_time))
plt.axis('tight')
plt.legend(loc='upper left')
plt.title("Corrupt y")
##############################################################################
# Outliers in the X direction
np.random.seed(0)
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
noise = 0.1 * np.random.randn(n_samples)
y = 3 * x + 2 + noise
# 10% outliers
x[-20:] = 9.9
y[-20:] += 22
X = x[:, np.newaxis]
plt.figure()
plt.scatter(x, y, color='indigo', marker='x', s=40)
line_x = np.array([-3, 10])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(line_x, y_pred, color=colors[name], linewidth=lw,
label='%s (fit time: %.2fs)' % (name, elapsed_time))
plt.axis('tight')
plt.legend(loc='upper left')
plt.title("Corrupt x")
plt.show()
| bsd-3-clause | a0214f05a7886b6a11c43927deba1667 | 33.648649 | 79 | 0.673427 | 3.332756 | false | false | false | false |
kjung/scikit-learn | sklearn/tree/tests/test_export.py | 30 | 9588 | """
Testing for export functions of decision trees (sklearn.tree.export).
"""
from re import finditer
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.tree import export_graphviz
from sklearn.externals.six import StringIO
from sklearn.utils.testing import assert_in
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
y2 = [[-1, 1], [-1, 1], [-1, 1], [1, 2], [1, 2], [1, 3]]
w = [1, 1, 1, .5, .5, .5]
def test_graphviz_toy():
# Check correctness of export_graphviz
clf = DecisionTreeClassifier(max_depth=3,
min_samples_split=2,
criterion="gini",
random_state=2)
clf.fit(X, y)
# Test export code
out = StringIO()
export_graphviz(clf, out_file=out)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test with feature_names
out = StringIO()
export_graphviz(clf, out_file=out, feature_names=["feature0", "feature1"])
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="feature0 <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test with class_names
out = StringIO()
export_graphviz(clf, out_file=out, class_names=["yes", "no"])
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]\\nclass = yes"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]\\n' \
'class = yes"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]\\n' \
'class = no"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test plot_options
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, impurity=False,
proportion=True, special_characters=True, rounded=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled, rounded", color="black", ' \
'fontname=helvetica] ;\n' \
'edge [fontname=helvetica] ;\n' \
'0 [label=<X<SUB>0</SUB> ≤ 0.0<br/>samples = 100.0%<br/>' \
'value = [0.5, 0.5]>, fillcolor="#e5813900"] ;\n' \
'1 [label=<samples = 50.0%<br/>value = [1.0, 0.0]>, ' \
'fillcolor="#e58139ff"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label=<samples = 50.0%<br/>value = [0.0, 1.0]>, ' \
'fillcolor="#399de5ff"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test max_depth
out = StringIO()
export_graphviz(clf, out_file=out, max_depth=0, class_names=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]\\nclass = y[0]"] ;\n' \
'1 [label="(...)"] ;\n' \
'0 -> 1 ;\n' \
'2 [label="(...)"] ;\n' \
'0 -> 2 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test max_depth with plot_options
out = StringIO()
export_graphviz(clf, out_file=out, max_depth=0, filled=True,
node_ids=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="node #0\\nX[0] <= 0.0\\ngini = 0.5\\n' \
'samples = 6\\nvalue = [3, 3]", fillcolor="#e5813900"] ;\n' \
'1 [label="(...)", fillcolor="#C0C0C0"] ;\n' \
'0 -> 1 ;\n' \
'2 [label="(...)", fillcolor="#C0C0C0"] ;\n' \
'0 -> 2 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test multi-output with weighted samples
clf = DecisionTreeClassifier(max_depth=2,
min_samples_split=2,
criterion="gini",
random_state=2)
clf = clf.fit(X, y2, sample_weight=w)
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, impurity=False)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="X[0] <= 0.0\\nsamples = 6\\n' \
'value = [[3.0, 1.5, 0.0]\\n' \
'[3.0, 1.0, 0.5]]", fillcolor="#e5813900"] ;\n' \
'1 [label="samples = 3\\nvalue = [[3, 0, 0]\\n' \
'[3, 0, 0]]", fillcolor="#e58139ff"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="X[0] <= 1.5\\nsamples = 3\\n' \
'value = [[0.0, 1.5, 0.0]\\n' \
'[0.0, 1.0, 0.5]]", fillcolor="#e5813986"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'3 [label="samples = 2\\nvalue = [[0, 1, 0]\\n' \
'[0, 1, 0]]", fillcolor="#e58139ff"] ;\n' \
'2 -> 3 ;\n' \
'4 [label="samples = 1\\nvalue = [[0.0, 0.5, 0.0]\\n' \
'[0.0, 0.0, 0.5]]", fillcolor="#e58139ff"] ;\n' \
'2 -> 4 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test regression output with plot_options
clf = DecisionTreeRegressor(max_depth=3,
min_samples_split=2,
criterion="mse",
random_state=2)
clf.fit(X, y)
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, leaves_parallel=True,
rotate=True, rounded=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled, rounded", color="black", ' \
'fontname=helvetica] ;\n' \
'graph [ranksep=equally, splines=polyline] ;\n' \
'edge [fontname=helvetica] ;\n' \
'rankdir=LR ;\n' \
'0 [label="X[0] <= 0.0\\nmse = 1.0\\nsamples = 6\\n' \
'value = 0.0", fillcolor="#e5813980"] ;\n' \
'1 [label="mse = 0.0\\nsamples = 3\\nvalue = -1.0", ' \
'fillcolor="#e5813900"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="True"] ;\n' \
'2 [label="mse = 0.0\\nsamples = 3\\nvalue = 1.0", ' \
'fillcolor="#e58139ff"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=45, ' \
'headlabel="False"] ;\n' \
'{rank=same ; 0} ;\n' \
'{rank=same ; 1; 2} ;\n' \
'}'
assert_equal(contents1, contents2)
def test_graphviz_errors():
# Check for errors of export_graphviz
clf = DecisionTreeClassifier(max_depth=3, min_samples_split=2)
clf.fit(X, y)
# Check feature_names error
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, feature_names=[])
# Check class_names error
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, class_names=[])
def test_friedman_mse_in_graphviz():
clf = DecisionTreeRegressor(criterion="friedman_mse", random_state=0)
clf.fit(X, y)
dot_data = StringIO()
export_graphviz(clf, out_file=dot_data)
clf = GradientBoostingClassifier(n_estimators=2, random_state=0)
clf.fit(X, y)
for estimator in clf.estimators_:
export_graphviz(estimator[0], out_file=dot_data)
for finding in finditer("\[.*?samples.*?\]", dot_data.getvalue()):
assert_in("friedman_mse", finding.group())
| bsd-3-clause | 32b51bbada6fda898ad5cfbe93115337 | 39.455696 | 78 | 0.472361 | 3.173784 | false | true | false | false |
kjung/scikit-learn | examples/cluster/plot_face_segmentation.py | 70 | 2839 | """
===================================================
Segmenting the picture of a raccoon face in regions
===================================================
This example uses :ref:`spectral_clustering` on a graph created from
voxel-to-voxel difference on an image to break this image into multiple
partly-homogeneous regions.
This procedure (spectral clustering on an image) is an efficient
approximate solution for finding normalized graph cuts.
There are two options to assign labels:
* with 'kmeans' spectral clustering will cluster samples in the embedding space
using a kmeans algorithm
* whereas 'discrete' will iteratively search for the closest partition
space to the embedding space.
"""
print(__doc__)
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>, Brian Cheung
# License: BSD 3 clause
import time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
from sklearn.utils.testing import SkipTest
from sklearn.utils.fixes import sp_version
if sp_version < (0, 12):
raise SkipTest("Skipping because SciPy version earlier than 0.12.0 and "
"thus does not include the scipy.misc.face() image.")
# load the raccoon face as a numpy array
try:
face = sp.face(gray=True)
except AttributeError:
# Newer versions of scipy have face in misc
from scipy import misc
face = misc.face(gray=True)
# Resize it to 10% of the original size to speed up the processing
face = sp.misc.imresize(face, 0.10) / 255.
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(face)
# Take a decreasing function of the gradient: an exponential
# The smaller beta is, the more independent the segmentation is of the
# actual image. For beta=1, the segmentation is close to a voronoi
beta = 5
eps = 1e-6
graph.data = np.exp(-beta * graph.data / graph.data.std()) + eps
# Apply spectral clustering (this step goes much faster if you have pyamg
# installed)
N_REGIONS = 25
#############################################################################
# Visualize the resulting regions
for assign_labels in ('kmeans', 'discretize'):
t0 = time.time()
labels = spectral_clustering(graph, n_clusters=N_REGIONS,
assign_labels=assign_labels, random_state=1)
t1 = time.time()
labels = labels.reshape(face.shape)
plt.figure(figsize=(5, 5))
plt.imshow(face, cmap=plt.cm.gray)
for l in range(N_REGIONS):
plt.contour(labels == l, contours=1,
colors=[plt.cm.spectral(l / float(N_REGIONS))])
plt.xticks(())
plt.yticks(())
title = 'Spectral clustering: %s, %.2fs' % (assign_labels, (t1 - t0))
print(title)
plt.title(title)
plt.show()
| bsd-3-clause | 30a0040917d0282c410455c26247d8bc | 31.632184 | 79 | 0.667136 | 3.795455 | false | false | false | false |
kjung/scikit-learn | sklearn/feature_extraction/text.py | 7 | 50272 | # -*- coding: utf-8 -*-
# Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Robert Layton <robertlayton@gmail.com>
# Jochen Wersdörfer <jochen@wersdoerfer.de>
# Roman Sinayev <roman.sinayev@gmail.com>
#
# License: BSD 3 clause
"""
The :mod:`sklearn.feature_extraction.text` submodule gathers utilities to
build feature vectors from text documents.
"""
from __future__ import unicode_literals
import array
from collections import Mapping, defaultdict
import numbers
from operator import itemgetter
import re
import unicodedata
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..preprocessing import normalize
from .hashing import FeatureHasher
from .stop_words import ENGLISH_STOP_WORDS
from ..utils import deprecated
from ..utils.fixes import frombuffer_empty, bincount
from ..utils.validation import check_is_fitted
__all__ = ['CountVectorizer',
'ENGLISH_STOP_WORDS',
'TfidfTransformer',
'TfidfVectorizer',
'strip_accents_ascii',
'strip_accents_unicode',
'strip_tags']
def strip_accents_unicode(s):
"""Transform accentuated unicode symbols into their simple counterpart
Warning: the python-level loop and join operations make this
implementation 20 times slower than the strip_accents_ascii basic
normalization.
See also
--------
strip_accents_ascii
Remove accentuated char for any unicode symbol that has a direct
ASCII equivalent.
"""
return ''.join([c for c in unicodedata.normalize('NFKD', s)
if not unicodedata.combining(c)])
def strip_accents_ascii(s):
"""Transform accentuated unicode symbols into ascii or nothing
Warning: this solution is only suited for languages that have a direct
transliteration to ASCII symbols.
See also
--------
strip_accents_unicode
Remove accentuated char for any unicode symbol.
"""
nkfd_form = unicodedata.normalize('NFKD', s)
return nkfd_form.encode('ASCII', 'ignore').decode('ASCII')
def strip_tags(s):
"""Basic regexp based HTML / XML tag stripper function
For serious HTML/XML preprocessing you should rather use an external
library such as lxml or BeautifulSoup.
"""
return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s)
def _check_stop_list(stop):
if stop == "english":
return ENGLISH_STOP_WORDS
elif isinstance(stop, six.string_types):
raise ValueError("not a built-in stop list: %s" % stop)
elif stop is None:
return None
else: # assume it's a collection
return frozenset(stop)
class VectorizerMixin(object):
"""Provides common code for text vectorizers (tokenization logic)."""
_white_spaces = re.compile(r"\s\s+")
def decode(self, doc):
"""Decode the input into a string of unicode symbols
The decoding strategy depends on the vectorizer parameters.
"""
if self.input == 'filename':
with open(doc, 'rb') as fh:
doc = fh.read()
elif self.input == 'file':
doc = doc.read()
if isinstance(doc, bytes):
doc = doc.decode(self.encoding, self.decode_error)
if doc is np.nan:
raise ValueError("np.nan is an invalid document, expected byte or "
"unicode string.")
return doc
def _word_ngrams(self, tokens, stop_words=None):
"""Turn tokens into a sequence of n-grams after stop words filtering"""
# handle stop words
if stop_words is not None:
tokens = [w for w in tokens if w not in stop_words]
# handle token n-grams
min_n, max_n = self.ngram_range
if max_n != 1:
original_tokens = tokens
tokens = []
n_original_tokens = len(original_tokens)
for n in xrange(min_n,
min(max_n + 1, n_original_tokens + 1)):
for i in xrange(n_original_tokens - n + 1):
tokens.append(" ".join(original_tokens[i: i + n]))
return tokens
def _char_ngrams(self, text_document):
"""Tokenize text_document into a sequence of character n-grams"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
text_len = len(text_document)
ngrams = []
min_n, max_n = self.ngram_range
for n in xrange(min_n, min(max_n + 1, text_len + 1)):
for i in xrange(text_len - n + 1):
ngrams.append(text_document[i: i + n])
return ngrams
def _char_wb_ngrams(self, text_document):
"""Whitespace sensitive char-n-gram tokenization.
Tokenize text_document into a sequence of character n-grams
excluding any whitespace (operating only inside word boundaries)"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
min_n, max_n = self.ngram_range
ngrams = []
for w in text_document.split():
w = ' ' + w + ' '
w_len = len(w)
for n in xrange(min_n, max_n + 1):
offset = 0
ngrams.append(w[offset:offset + n])
while offset + n < w_len:
offset += 1
ngrams.append(w[offset:offset + n])
if offset == 0: # count a short word (w_len < n) only once
break
return ngrams
def build_preprocessor(self):
"""Return a function to preprocess the text before tokenization"""
if self.preprocessor is not None:
return self.preprocessor
# unfortunately python functools package does not have an efficient
# `compose` function that would have allowed us to chain a dynamic
# number of functions. However the cost of a lambda call is a few
# hundreds of nanoseconds which is negligible when compared to the
# cost of tokenizing a string of 1000 chars for instance.
noop = lambda x: x
# accent stripping
if not self.strip_accents:
strip_accents = noop
elif callable(self.strip_accents):
strip_accents = self.strip_accents
elif self.strip_accents == 'ascii':
strip_accents = strip_accents_ascii
elif self.strip_accents == 'unicode':
strip_accents = strip_accents_unicode
else:
raise ValueError('Invalid value for "strip_accents": %s' %
self.strip_accents)
if self.lowercase:
return lambda x: strip_accents(x.lower())
else:
return strip_accents
def build_tokenizer(self):
"""Return a function that splits a string into a sequence of tokens"""
if self.tokenizer is not None:
return self.tokenizer
token_pattern = re.compile(self.token_pattern)
return lambda doc: token_pattern.findall(doc)
def get_stop_words(self):
"""Build or fetch the effective stop words list"""
return _check_stop_list(self.stop_words)
def build_analyzer(self):
"""Return a callable that handles preprocessing and tokenization"""
if callable(self.analyzer):
return self.analyzer
preprocess = self.build_preprocessor()
if self.analyzer == 'char':
return lambda doc: self._char_ngrams(preprocess(self.decode(doc)))
elif self.analyzer == 'char_wb':
return lambda doc: self._char_wb_ngrams(
preprocess(self.decode(doc)))
elif self.analyzer == 'word':
stop_words = self.get_stop_words()
tokenize = self.build_tokenizer()
return lambda doc: self._word_ngrams(
tokenize(preprocess(self.decode(doc))), stop_words)
else:
raise ValueError('%s is not a valid tokenization scheme/analyzer' %
self.analyzer)
def _validate_vocabulary(self):
vocabulary = self.vocabulary
if vocabulary is not None:
if isinstance(vocabulary, set):
vocabulary = sorted(vocabulary)
if not isinstance(vocabulary, Mapping):
vocab = {}
for i, t in enumerate(vocabulary):
if vocab.setdefault(t, i) != i:
msg = "Duplicate term in vocabulary: %r" % t
raise ValueError(msg)
vocabulary = vocab
else:
indices = set(six.itervalues(vocabulary))
if len(indices) != len(vocabulary):
raise ValueError("Vocabulary contains repeated indices.")
for i in xrange(len(vocabulary)):
if i not in indices:
msg = ("Vocabulary of size %d doesn't contain index "
"%d." % (len(vocabulary), i))
raise ValueError(msg)
if not vocabulary:
raise ValueError("empty vocabulary passed to fit")
self.fixed_vocabulary_ = True
self.vocabulary_ = dict(vocabulary)
else:
self.fixed_vocabulary_ = False
def _check_vocabulary(self):
"""Check if vocabulary is empty or missing (not fit-ed)"""
msg = "%(name)s - Vocabulary wasn't fitted."
check_is_fitted(self, 'vocabulary_', msg=msg),
if len(self.vocabulary_) == 0:
raise ValueError("Vocabulary is empty")
@property
@deprecated("The `fixed_vocabulary` attribute is deprecated and will be "
"removed in 0.18. Please use `fixed_vocabulary_` instead.")
def fixed_vocabulary(self):
return self.fixed_vocabulary_
class HashingVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token occurrences
It turns a collection of text documents into a scipy.sparse matrix holding
token occurrence counts (or binary occurrence information), possibly
normalized as token frequencies if norm='l1' or projected on the euclidean
unit sphere if norm='l2'.
This text vectorizer implementation uses the hashing trick to find the
token string name to feature integer index mapping.
This strategy has several advantages:
- it is very low memory scalable to large datasets as there is no need to
store a vocabulary dictionary in memory
- it is fast to pickle and un-pickle as it holds no state besides the
constructor parameters
- it can be used in a streaming (partial fit) or parallel pipeline as there
is no state computed during fit.
There are also a couple of cons (vs using a CountVectorizer with an
in-memory vocabulary):
- there is no way to compute the inverse transform (from feature indices to
string feature names) which can be a problem when trying to introspect
which features are most important to a model.
- there can be collisions: distinct tokens can be mapped to the same
feature index. However in practice this is rarely an issue if n_features
is large enough (e.g. 2 ** 18 for text classification problems).
- no IDF weighting as this would render the transformer stateful.
The hash function employed is the signed 32-bit version of Murmurhash3.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, default='utf-8'
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n), default=(1, 1)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
lowercase : boolean, default=True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
n_features : integer, default=(2 ** 20)
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
binary: boolean, default=False.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype: type, optional
Type of the matrix returned by fit_transform() or transform().
non_negative : boolean, default=False
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
See also
--------
CountVectorizer, TfidfVectorizer
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word', n_features=(2 ** 20),
binary=False, norm='l2', non_negative=False,
dtype=np.float64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.n_features = n_features
self.ngram_range = ngram_range
self.binary = binary
self.norm = norm
self.non_negative = non_negative
self.dtype = dtype
def partial_fit(self, X, y=None):
"""Does nothing: this transformer is stateless.
This method is just there to mark the fact that this transformer
can work in a streaming setup.
"""
return self
def fit(self, X, y=None):
"""Does nothing: this transformer is stateless."""
# triggers a parameter validation
self._get_hasher().fit(X, y=y)
return self
def transform(self, X, y=None):
"""Transform a sequence of documents to a document-term matrix.
Parameters
----------
X : iterable over raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Document-term matrix.
"""
analyzer = self.build_analyzer()
X = self._get_hasher().transform(analyzer(doc) for doc in X)
if self.binary:
X.data.fill(1)
if self.norm is not None:
X = normalize(X, norm=self.norm, copy=False)
return X
# Alias transform to fit_transform for convenience
fit_transform = transform
def _get_hasher(self):
return FeatureHasher(n_features=self.n_features,
input_type='string', dtype=self.dtype,
non_negative=self.non_negative)
def _document_frequency(X):
"""Count the number of non-zero values for each feature in sparse X."""
if sp.isspmatrix_csr(X):
return bincount(X.indices, minlength=X.shape[1])
else:
return np.diff(sp.csc_matrix(X, copy=False).indptr)
class CountVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token counts
This implementation produces a sparse representation of the counts using
scipy.sparse.coo_matrix.
If you do not provide an a-priori dictionary and you do not use an analyzer
that does some kind of feature selection then the number of features will
be equal to the vocabulary size found by analyzing the data.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, True by default
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp select tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents. Indices
in the mapping should not be repeated and should not have any gap
between 0 and the largest index.
binary : boolean, default=False
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
HashingVectorizer, TfidfVectorizer
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word',
max_df=1.0, min_df=1, max_features=None,
vocabulary=None, binary=False, dtype=np.int64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.max_df = max_df
self.min_df = min_df
if max_df < 0 or min_df < 0:
raise ValueError("negative value for max_df or min_df")
self.max_features = max_features
if max_features is not None:
if (not isinstance(max_features, numbers.Integral) or
max_features <= 0):
raise ValueError(
"max_features=%r, neither a positive integer nor None"
% max_features)
self.ngram_range = ngram_range
self.vocabulary = vocabulary
self.binary = binary
self.dtype = dtype
def _sort_features(self, X, vocabulary):
"""Sort features by name
Returns a reordered matrix and modifies the vocabulary in place
"""
sorted_features = sorted(six.iteritems(vocabulary))
map_index = np.empty(len(sorted_features), dtype=np.int32)
for new_val, (term, old_val) in enumerate(sorted_features):
map_index[new_val] = old_val
vocabulary[term] = new_val
return X[:, map_index]
def _limit_features(self, X, vocabulary, high=None, low=None,
limit=None):
"""Remove too rare or too common features.
Prune features that are non zero in more samples than high or less
documents than low, modifying the vocabulary, and restricting it to
at most the limit most frequent.
This does not prune samples with zero features.
"""
if high is None and low is None and limit is None:
return X, set()
# Calculate a mask based on document frequencies
dfs = _document_frequency(X)
tfs = np.asarray(X.sum(axis=0)).ravel()
mask = np.ones(len(dfs), dtype=bool)
if high is not None:
mask &= dfs <= high
if low is not None:
mask &= dfs >= low
if limit is not None and mask.sum() > limit:
mask_inds = (-tfs[mask]).argsort()[:limit]
new_mask = np.zeros(len(dfs), dtype=bool)
new_mask[np.where(mask)[0][mask_inds]] = True
mask = new_mask
new_indices = np.cumsum(mask) - 1 # maps old indices to new
removed_terms = set()
for term, old_index in list(six.iteritems(vocabulary)):
if mask[old_index]:
vocabulary[term] = new_indices[old_index]
else:
del vocabulary[term]
removed_terms.add(term)
kept_indices = np.where(mask)[0]
if len(kept_indices) == 0:
raise ValueError("After pruning, no terms remain. Try a lower"
" min_df or a higher max_df.")
return X[:, kept_indices], removed_terms
def _count_vocab(self, raw_documents, fixed_vocab):
"""Create sparse feature matrix, and vocabulary where fixed_vocab=False
"""
if fixed_vocab:
vocabulary = self.vocabulary_
else:
# Add a new value when a new vocabulary item is seen
vocabulary = defaultdict()
vocabulary.default_factory = vocabulary.__len__
analyze = self.build_analyzer()
j_indices = _make_int_array()
indptr = _make_int_array()
indptr.append(0)
for doc in raw_documents:
for feature in analyze(doc):
try:
j_indices.append(vocabulary[feature])
except KeyError:
# Ignore out-of-vocabulary items for fixed_vocab=True
continue
indptr.append(len(j_indices))
if not fixed_vocab:
# disable defaultdict behaviour
vocabulary = dict(vocabulary)
if not vocabulary:
raise ValueError("empty vocabulary; perhaps the documents only"
" contain stop words")
j_indices = frombuffer_empty(j_indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
values = np.ones(len(j_indices))
X = sp.csr_matrix((values, j_indices, indptr),
shape=(len(indptr) - 1, len(vocabulary)),
dtype=self.dtype)
X.sum_duplicates()
return vocabulary, X
def fit(self, raw_documents, y=None):
"""Learn a vocabulary dictionary of all tokens in the raw documents.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
self
"""
self.fit_transform(raw_documents)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn the vocabulary dictionary and return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : array, [n_samples, n_features]
Document-term matrix.
"""
# We intentionally don't call the transform method to make
# fit_transform overridable without unwanted side effects in
# TfidfVectorizer.
self._validate_vocabulary()
max_df = self.max_df
min_df = self.min_df
max_features = self.max_features
vocabulary, X = self._count_vocab(raw_documents,
self.fixed_vocabulary_)
if self.binary:
X.data.fill(1)
if not self.fixed_vocabulary_:
X = self._sort_features(X, vocabulary)
n_doc = X.shape[0]
max_doc_count = (max_df
if isinstance(max_df, numbers.Integral)
else max_df * n_doc)
min_doc_count = (min_df
if isinstance(min_df, numbers.Integral)
else min_df * n_doc)
if max_doc_count < min_doc_count:
raise ValueError(
"max_df corresponds to < documents than min_df")
X, self.stop_words_ = self._limit_features(X, vocabulary,
max_doc_count,
min_doc_count,
max_features)
self.vocabulary_ = vocabulary
return X
def transform(self, raw_documents):
"""Transform documents to document-term matrix.
Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided to the constructor.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Document-term matrix.
"""
if not hasattr(self, 'vocabulary_'):
self._validate_vocabulary()
self._check_vocabulary()
# use the same matrix-building strategy as fit_transform
_, X = self._count_vocab(raw_documents, fixed_vocab=True)
if self.binary:
X.data.fill(1)
return X
def inverse_transform(self, X):
"""Return terms per document with nonzero entries in X.
Parameters
----------
X : {array, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
X_inv : list of arrays, len = n_samples
List of arrays of terms.
"""
self._check_vocabulary()
if sp.issparse(X):
# We need CSR format for fast row manipulations.
X = X.tocsr()
else:
# We need to convert X to a matrix, so that the indexing
# returns 2D objects
X = np.asmatrix(X)
n_samples = X.shape[0]
terms = np.array(list(self.vocabulary_.keys()))
indices = np.array(list(self.vocabulary_.values()))
inverse_vocabulary = terms[np.argsort(indices)]
return [inverse_vocabulary[X[i, :].nonzero()[1]].ravel()
for i in range(n_samples)]
def get_feature_names(self):
"""Array mapping from feature integer indices to feature name"""
self._check_vocabulary()
return [t for t, i in sorted(six.iteritems(self.vocabulary_),
key=itemgetter(1))]
def _make_int_array():
"""Construct an array.array of a type suitable for scipy.sparse indices."""
return array.array(str("i"))
class TfidfTransformer(BaseEstimator, TransformerMixin):
"""Transform a count matrix to a normalized tf or tf-idf representation
Tf means term-frequency while tf-idf means term-frequency times inverse
document-frequency. This is a common term weighting scheme in information
retrieval, that has also found good use in document classification.
The goal of using tf-idf instead of the raw frequencies of occurrence of a
token in a given document is to scale down the impact of tokens that occur
very frequently in a given corpus and that are hence empirically less
informative than features that occur in a small fraction of the training
corpus.
The actual formula used for tf-idf is tf * (idf + 1) = tf + tf * idf,
instead of tf * idf. The effect of this is that terms with zero idf, i.e.
that occur in all documents of a training set, will not be entirely
ignored. The formulas used to compute tf and idf depend on parameter
settings that correspond to the SMART notation used in IR, as follows:
Tf is "n" (natural) by default, "l" (logarithmic) when sublinear_tf=True.
Idf is "t" when use_idf is given, "n" (none) otherwise.
Normalization is "c" (cosine) when norm='l2', "n" (none) when norm=None.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
References
----------
.. [Yates2011] `R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern
Information Retrieval. Addison Wesley, pp. 68-74.`
.. [MRS2008] `C.D. Manning, P. Raghavan and H. Schuetze (2008).
Introduction to Information Retrieval. Cambridge University
Press, pp. 118-120.`
"""
def __init__(self, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
self.norm = norm
self.use_idf = use_idf
self.smooth_idf = smooth_idf
self.sublinear_tf = sublinear_tf
def fit(self, X, y=None):
"""Learn the idf vector (global term weights)
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
"""
if not sp.issparse(X):
X = sp.csc_matrix(X)
if self.use_idf:
n_samples, n_features = X.shape
df = _document_frequency(X)
# perform idf smoothing if required
df += int(self.smooth_idf)
n_samples += int(self.smooth_idf)
# log+1 instead of log makes sure terms with zero idf don't get
# suppressed entirely.
idf = np.log(float(n_samples) / df) + 1.0
self._idf_diag = sp.spdiags(idf,
diags=0, m=n_features, n=n_features)
return self
def transform(self, X, copy=True):
"""Transform a count matrix to a tf or tf-idf representation
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
vectors : sparse matrix, [n_samples, n_features]
"""
if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float):
# preserve float family dtype
X = sp.csr_matrix(X, copy=copy)
else:
# convert counts or binary occurrences to floats
X = sp.csr_matrix(X, dtype=np.float64, copy=copy)
n_samples, n_features = X.shape
if self.sublinear_tf:
np.log(X.data, X.data)
X.data += 1
if self.use_idf:
check_is_fitted(self, '_idf_diag', 'idf vector is not fitted')
expected_n_features = self._idf_diag.shape[0]
if n_features != expected_n_features:
raise ValueError("Input has n_features=%d while the model"
" has been trained with n_features=%d" % (
n_features, expected_n_features))
# *= doesn't work
X = X * self._idf_diag
if self.norm:
X = normalize(X, norm=self.norm, copy=False)
return X
@property
def idf_(self):
if hasattr(self, "_idf_diag"):
return np.ravel(self._idf_diag.sum(axis=0))
else:
return None
class TfidfVectorizer(CountVectorizer):
"""Convert a collection of raw documents to a matrix of TF-IDF features.
Equivalent to CountVectorizer followed by TfidfTransformer.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char'} or callable
Whether the feature should be made of word or character n-grams.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If a string, it is passed to _check_stop_list and the appropriate stop
list is returned. 'english' is currently the only supported string
value.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, default True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents.
binary : boolean, default=False
If True, all non-zero term counts are set to 1. This does not mean
outputs will have only 0/1 values, only that the tf term in tf-idf
is binary. (Set idf and normalization to False to get 0/1 outputs.)
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
idf_ : array, shape = [n_features], or None
The learned idf vector (global term weights)
when ``use_idf`` is set to True, None otherwise.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
CountVectorizer
Tokenize the documents and count the occurrences of token and return
them as a sparse matrix
TfidfTransformer
Apply Term Frequency Inverse Document Frequency normalization to a
sparse matrix of occurrence counts.
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None, lowercase=True,
preprocessor=None, tokenizer=None, analyzer='word',
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), max_df=1.0, min_df=1,
max_features=None, vocabulary=None, binary=False,
dtype=np.int64, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
super(TfidfVectorizer, self).__init__(
input=input, encoding=encoding, decode_error=decode_error,
strip_accents=strip_accents, lowercase=lowercase,
preprocessor=preprocessor, tokenizer=tokenizer, analyzer=analyzer,
stop_words=stop_words, token_pattern=token_pattern,
ngram_range=ngram_range, max_df=max_df, min_df=min_df,
max_features=max_features, vocabulary=vocabulary, binary=binary,
dtype=dtype)
self._tfidf = TfidfTransformer(norm=norm, use_idf=use_idf,
smooth_idf=smooth_idf,
sublinear_tf=sublinear_tf)
# Broadcast the TF-IDF parameters to the underlying transformer instance
# for easy grid search and repr
@property
def norm(self):
return self._tfidf.norm
@norm.setter
def norm(self, value):
self._tfidf.norm = value
@property
def use_idf(self):
return self._tfidf.use_idf
@use_idf.setter
def use_idf(self, value):
self._tfidf.use_idf = value
@property
def smooth_idf(self):
return self._tfidf.smooth_idf
@smooth_idf.setter
def smooth_idf(self, value):
self._tfidf.smooth_idf = value
@property
def sublinear_tf(self):
return self._tfidf.sublinear_tf
@sublinear_tf.setter
def sublinear_tf(self, value):
self._tfidf.sublinear_tf = value
@property
def idf_(self):
return self._tfidf.idf_
def fit(self, raw_documents, y=None):
"""Learn vocabulary and idf from training set.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
self : TfidfVectorizer
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn vocabulary and idf, return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
# X is already a transformed view of raw_documents so
# we set copy to False
return self._tfidf.transform(X, copy=False)
def transform(self, raw_documents, copy=True):
"""Transform documents to document-term matrix.
Uses the vocabulary and document frequencies (df) learned by fit (or
fit_transform).
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
check_is_fitted(self, '_tfidf', 'The tfidf vector is not fitted')
X = super(TfidfVectorizer, self).transform(raw_documents)
return self._tfidf.transform(X, copy=False)
| bsd-3-clause | a89a2613da16fb9a308a6ab89969350b | 36.627994 | 79 | 0.615623 | 4.438157 | false | false | false | false |
kjung/scikit-learn | examples/classification/plot_lda.py | 136 | 2419 | """
====================================================================
Normal and Shrinkage Linear Discriminant Analysis for classification
====================================================================
Shows how shrinkage improves classification.
"""
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
n_train = 20 # samples for training
n_test = 200 # samples for testing
n_averages = 50 # how often to repeat classification
n_features_max = 75 # maximum number of features
step = 4 # step size for the calculation
def generate_data(n_samples, n_features):
"""Generate random blob-ish data with noisy features.
This returns an array of input data with shape `(n_samples, n_features)`
and an array of `n_samples` target labels.
Only one feature contains discriminative information, the other features
contain only noise.
"""
X, y = make_blobs(n_samples=n_samples, n_features=1, centers=[[-2], [2]])
# add non-discriminative features
if n_features > 1:
X = np.hstack([X, np.random.randn(n_samples, n_features - 1)])
return X, y
acc_clf1, acc_clf2 = [], []
n_features_range = range(1, n_features_max + 1, step)
for n_features in n_features_range:
score_clf1, score_clf2 = 0, 0
for _ in range(n_averages):
X, y = generate_data(n_train, n_features)
clf1 = LinearDiscriminantAnalysis(solver='lsqr', shrinkage='auto').fit(X, y)
clf2 = LinearDiscriminantAnalysis(solver='lsqr', shrinkage=None).fit(X, y)
X, y = generate_data(n_test, n_features)
score_clf1 += clf1.score(X, y)
score_clf2 += clf2.score(X, y)
acc_clf1.append(score_clf1 / n_averages)
acc_clf2.append(score_clf2 / n_averages)
features_samples_ratio = np.array(n_features_range) / n_train
plt.plot(features_samples_ratio, acc_clf1, linewidth=2,
label="Linear Discriminant Analysis with shrinkage", color='navy')
plt.plot(features_samples_ratio, acc_clf2, linewidth=2,
label="Linear Discriminant Analysis", color='gold')
plt.xlabel('n_features / n_samples')
plt.ylabel('Classification accuracy')
plt.legend(loc=1, prop={'size': 12})
plt.suptitle('Linear Discriminant Analysis vs. \
shrinkage Linear Discriminant Analysis (1 discriminative feature)')
plt.show()
| bsd-3-clause | 9be88cc7d0eb8edb282e719ae39e1155 | 33.070423 | 84 | 0.661844 | 3.465616 | false | false | false | false |
kjung/scikit-learn | examples/text/hashing_vs_dict_vectorizer.py | 282 | 3265 | """
===========================================
FeatureHasher and DictVectorizer Comparison
===========================================
Compares FeatureHasher and DictVectorizer by using both to vectorize
text documents.
The example demonstrates syntax and speed only; it doesn't actually do
anything useful with the extracted vectors. See the example scripts
{document_classification_20newsgroups,clustering}.py for actual learning
on text documents.
A discrepancy between the number of terms reported for DictVectorizer and
for FeatureHasher is to be expected due to hash collisions.
"""
# Author: Lars Buitinck <L.J.Buitinck@uva.nl>
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import re
import sys
from time import time
import numpy as np
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction import DictVectorizer, FeatureHasher
def n_nonzero_columns(X):
"""Returns the number of non-zero columns in a CSR matrix X."""
return len(np.unique(X.nonzero()[1]))
def tokens(doc):
"""Extract tokens from doc.
This uses a simple regex to break strings into tokens. For a more
principled approach, see CountVectorizer or TfidfVectorizer.
"""
return (tok.lower() for tok in re.findall(r"\w+", doc))
def token_freqs(doc):
"""Extract a dict mapping tokens from doc to their frequencies."""
freq = defaultdict(int)
for tok in tokens(doc):
freq[tok] += 1
return freq
categories = [
'alt.atheism',
'comp.graphics',
'comp.sys.ibm.pc.hardware',
'misc.forsale',
'rec.autos',
'sci.space',
'talk.religion.misc',
]
# Uncomment the following line to use a larger set (11k+ documents)
#categories = None
print(__doc__)
print("Usage: %s [n_features_for_hashing]" % sys.argv[0])
print(" The default number of features is 2**18.")
print()
try:
n_features = int(sys.argv[1])
except IndexError:
n_features = 2 ** 18
except ValueError:
print("not a valid number of features: %r" % sys.argv[1])
sys.exit(1)
print("Loading 20 newsgroups training data")
raw_data = fetch_20newsgroups(subset='train', categories=categories).data
data_size_mb = sum(len(s.encode('utf-8')) for s in raw_data) / 1e6
print("%d documents - %0.3fMB" % (len(raw_data), data_size_mb))
print()
print("DictVectorizer")
t0 = time()
vectorizer = DictVectorizer()
vectorizer.fit_transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % len(vectorizer.get_feature_names()))
print()
print("FeatureHasher on frequency dicts")
t0 = time()
hasher = FeatureHasher(n_features=n_features)
X = hasher.transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
print()
print("FeatureHasher on raw tokens")
t0 = time()
hasher = FeatureHasher(n_features=n_features, input_type="string")
X = hasher.transform(tokens(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
| bsd-3-clause | 4fa0490a3dd57f024dfd8c93f01ed853 | 28.414414 | 73 | 0.693109 | 3.401042 | false | false | false | false |
kjung/scikit-learn | examples/decomposition/plot_image_denoising.py | 69 | 6249 | """
=========================================
Image denoising using dictionary learning
=========================================
An example comparing the effect of reconstructing noisy fragments
of a raccoon face image using firstly online :ref:`DictionaryLearning` and
various transform methods.
The dictionary is fitted on the distorted left half of the image, and
subsequently used to reconstruct the right half. Note that even better
performance could be achieved by fitting to an undistorted (i.e.
noiseless) image, but here we start from the assumption that it is not
available.
A common practice for evaluating the results of image denoising is by looking
at the difference between the reconstruction and the original image. If the
reconstruction is perfect this will look like Gaussian noise.
It can be seen from the plots that the results of :ref:`omp` with two
non-zero coefficients is a bit less biased than when keeping only one
(the edges look less prominent). It is in addition closer from the ground
truth in Frobenius norm.
The result of :ref:`least_angle_regression` is much more strongly biased: the
difference is reminiscent of the local intensity value of the original image.
Thresholding is clearly not useful for denoising, but it is here to show that
it can produce a suggestive output with very high speed, and thus be useful
for other tasks such as object classification, where performance is not
necessarily related to visualisation.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.feature_extraction.image import extract_patches_2d
from sklearn.feature_extraction.image import reconstruct_from_patches_2d
from sklearn.utils.testing import SkipTest
from sklearn.utils.fixes import sp_version
if sp_version < (0, 12):
raise SkipTest("Skipping because SciPy version earlier than 0.12.0 and "
"thus does not include the scipy.misc.face() image.")
###############################################################################
try:
from scipy import misc
face = misc.face(gray=True)
except AttributeError:
# Old versions of scipy have face in the top level package
face = sp.face(gray=True)
# Convert from uint8 representation with values between 0 and 255 to
# a floating point representation with values between 0 and 1.
face = face / 255
# downsample for higher speed
face = face[::2, ::2] + face[1::2, ::2] + face[::2, 1::2] + face[1::2, 1::2]
face /= 4.0
height, width = face.shape
# Distort the right half of the image
print('Distorting image...')
distorted = face.copy()
distorted[:, width // 2:] += 0.075 * np.random.randn(height, width // 2)
# Extract all reference patches from the left half of the image
print('Extracting reference patches...')
t0 = time()
patch_size = (7, 7)
data = extract_patches_2d(distorted[:, :width // 2], patch_size)
data = data.reshape(data.shape[0], -1)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
print('done in %.2fs.' % (time() - t0))
###############################################################################
# Learn the dictionary from reference patches
print('Learning the dictionary...')
t0 = time()
dico = MiniBatchDictionaryLearning(n_components=100, alpha=1, n_iter=500)
V = dico.fit(data).components_
dt = time() - t0
print('done in %.2fs.' % dt)
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(V[:100]):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape(patch_size), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Dictionary learned from face patches\n' +
'Train time %.1fs on %d patches' % (dt, len(data)),
fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
###############################################################################
# Display the distorted image
def show_with_diff(image, reference, title):
"""Helper function to display denoising"""
plt.figure(figsize=(5, 3.3))
plt.subplot(1, 2, 1)
plt.title('Image')
plt.imshow(image, vmin=0, vmax=1, cmap=plt.cm.gray,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.subplot(1, 2, 2)
difference = image - reference
plt.title('Difference (norm: %.2f)' % np.sqrt(np.sum(difference ** 2)))
plt.imshow(difference, vmin=-0.5, vmax=0.5, cmap=plt.cm.PuOr,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle(title, size=16)
plt.subplots_adjust(0.02, 0.02, 0.98, 0.79, 0.02, 0.2)
show_with_diff(distorted, face, 'Distorted image')
###############################################################################
# Extract noisy patches and reconstruct them using the dictionary
print('Extracting noisy patches... ')
t0 = time()
data = extract_patches_2d(distorted[:, width // 2:], patch_size)
data = data.reshape(data.shape[0], -1)
intercept = np.mean(data, axis=0)
data -= intercept
print('done in %.2fs.' % (time() - t0))
transform_algorithms = [
('Orthogonal Matching Pursuit\n1 atom', 'omp',
{'transform_n_nonzero_coefs': 1}),
('Orthogonal Matching Pursuit\n2 atoms', 'omp',
{'transform_n_nonzero_coefs': 2}),
('Least-angle regression\n5 atoms', 'lars',
{'transform_n_nonzero_coefs': 5}),
('Thresholding\n alpha=0.1', 'threshold', {'transform_alpha': .1})]
reconstructions = {}
for title, transform_algorithm, kwargs in transform_algorithms:
print(title + '...')
reconstructions[title] = face.copy()
t0 = time()
dico.set_params(transform_algorithm=transform_algorithm, **kwargs)
code = dico.transform(data)
patches = np.dot(code, V)
patches += intercept
patches = patches.reshape(len(data), *patch_size)
if transform_algorithm == 'threshold':
patches -= patches.min()
patches /= patches.max()
reconstructions[title][:, width // 2:] = reconstruct_from_patches_2d(
patches, (height, width // 2))
dt = time() - t0
print('done in %.2fs.' % dt)
show_with_diff(reconstructions[title], face,
title + ' (time: %.1fs)' % dt)
plt.show()
| bsd-3-clause | 7069a6e1f1823997c66a36b310b8ce65 | 35.121387 | 79 | 0.647304 | 3.654386 | false | false | false | false |
kjung/scikit-learn | benchmarks/bench_plot_fastkmeans.py | 292 | 4676 | from __future__ import print_function
from collections import defaultdict
from time import time
import numpy as np
from numpy import random as nr
from sklearn.cluster.k_means_ import KMeans, MiniBatchKMeans
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
chunk = 100
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('==============================')
print('Iteration %03d of %03d' % (it, max_it))
print('==============================')
print()
data = nr.random_integers(-50, 50, (n_samples, n_features))
print('K-Means')
tstart = time()
kmeans = KMeans(init='k-means++', n_clusters=10).fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.5f" % kmeans.inertia_)
print()
results['kmeans_speed'].append(delta)
results['kmeans_quality'].append(kmeans.inertia_)
print('Fast K-Means')
# let's prepare the data in small chunks
mbkmeans = MiniBatchKMeans(init='k-means++',
n_clusters=10,
batch_size=chunk)
tstart = time()
mbkmeans.fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %f" % mbkmeans.inertia_)
print()
print()
results['MiniBatchKMeans Speed'].append(delta)
results['MiniBatchKMeans Quality'].append(mbkmeans.inertia_)
return results
def compute_bench_2(chunks):
results = defaultdict(lambda: [])
n_features = 50000
means = np.array([[1, 1], [-1, -1], [1, -1], [-1, 1],
[0.5, 0.5], [0.75, -0.5], [-1, 0.75], [1, 0]])
X = np.empty((0, 2))
for i in range(8):
X = np.r_[X, means[i] + 0.8 * np.random.randn(n_features, 2)]
max_it = len(chunks)
it = 0
for chunk in chunks:
it += 1
print('==============================')
print('Iteration %03d of %03d' % (it, max_it))
print('==============================')
print()
print('Fast K-Means')
tstart = time()
mbkmeans = MiniBatchKMeans(init='k-means++',
n_clusters=8,
batch_size=chunk)
mbkmeans.fit(X)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.3fs" % mbkmeans.inertia_)
print()
results['MiniBatchKMeans Speed'].append(delta)
results['MiniBatchKMeans Quality'].append(mbkmeans.inertia_)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(50, 150, 5).astype(np.int)
features_range = np.linspace(150, 50000, 5).astype(np.int)
chunks = np.linspace(500, 10000, 15).astype(np.int)
results = compute_bench(samples_range, features_range)
results_2 = compute_bench_2(chunks)
max_time = max([max(i) for i in [t for (label, t) in results.iteritems()
if "speed" in label]])
max_inertia = max([max(i) for i in [
t for (label, t) in results.iteritems()
if "speed" not in label]])
fig = plt.figure('scikit-learn K-Means benchmark results')
for c, (label, timings) in zip('brcy',
sorted(results.iteritems())):
if 'speed' in label:
ax = fig.add_subplot(2, 2, 1, projection='3d')
ax.set_zlim3d(0.0, max_time * 1.1)
else:
ax = fig.add_subplot(2, 2, 2, projection='3d')
ax.set_zlim3d(0.0, max_inertia * 1.1)
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.5)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
i = 0
for c, (label, timings) in zip('br',
sorted(results_2.iteritems())):
i += 1
ax = fig.add_subplot(2, 2, i + 2)
y = np.asarray(timings)
ax.plot(chunks, y, color=c, alpha=0.8)
ax.set_xlabel('Chunks')
ax.set_ylabel(label)
plt.show()
| bsd-3-clause | b5bf21059b8d85754418fa9671887377 | 32.884058 | 76 | 0.509624 | 3.594158 | false | false | false | false |
kjung/scikit-learn | sklearn/utils/tests/test_estimator_checks.py | 67 | 3894 | import scipy.sparse as sp
import numpy as np
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils.testing import assert_raises_regex, assert_true
from sklearn.utils.estimator_checks import check_estimator
from sklearn.utils.estimator_checks import check_estimators_unfitted
from sklearn.ensemble import AdaBoostClassifier
from sklearn.linear_model import MultiTaskElasticNet
from sklearn.utils.validation import check_X_y, check_array
class CorrectNotFittedError(ValueError):
"""Exception class to raise if estimator is used before fitting.
Like NotFittedError, it inherits from ValueError, but not from
AttributeError. Used for testing only.
"""
class BaseBadClassifier(BaseEstimator, ClassifierMixin):
def fit(self, X, y):
return self
def predict(self, X):
return np.ones(X.shape[0])
class NoCheckinPredict(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y)
return self
class NoSparseClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc'])
if sp.issparse(X):
raise ValueError("Nonsensical Error")
return self
def predict(self, X):
X = check_array(X)
return np.ones(X.shape[0])
class CorrectNotFittedErrorClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y)
self.coef_ = np.ones(X.shape[1])
return self
def predict(self, X):
if not hasattr(self, 'coef_'):
raise CorrectNotFittedError("estimator is not fitted yet")
X = check_array(X)
return np.ones(X.shape[0])
def test_check_estimator():
# tests that the estimator actually fails on "bad" estimators.
# not a complete test of all checks, which are very extensive.
# check that we have a set_params and can clone
msg = "it does not implement a 'get_params' methods"
assert_raises_regex(TypeError, msg, check_estimator, object)
# check that we have a fit method
msg = "object has no attribute 'fit'"
assert_raises_regex(AttributeError, msg, check_estimator, BaseEstimator)
# check that fit does input validation
msg = "TypeError not raised by fit"
assert_raises_regex(AssertionError, msg, check_estimator, BaseBadClassifier)
# check that predict does input validation (doesn't accept dicts in input)
msg = "Estimator doesn't check for NaN and inf in predict"
assert_raises_regex(AssertionError, msg, check_estimator, NoCheckinPredict)
# check for sparse matrix input handling
name = NoSparseClassifier.__name__
msg = "Estimator " + name + " doesn't seem to fail gracefully on sparse data"
# the check for sparse input handling prints to the stdout,
# instead of raising an error, so as not to remove the original traceback.
# that means we need to jump through some hoops to catch it.
old_stdout = sys.stdout
string_buffer = StringIO()
sys.stdout = string_buffer
try:
check_estimator(NoSparseClassifier)
except:
pass
finally:
sys.stdout = old_stdout
assert_true(msg in string_buffer.getvalue())
# doesn't error on actual estimator
check_estimator(AdaBoostClassifier)
check_estimator(MultiTaskElasticNet)
def test_check_estimators_unfitted():
# check that a ValueError/AttributeError is raised when calling predict
# on an unfitted estimator
msg = "AttributeError or ValueError not raised by predict"
assert_raises_regex(AssertionError, msg, check_estimators_unfitted,
"estimator", NoSparseClassifier)
# check that CorrectNotFittedError inherit from either ValueError
# or AttributeError
check_estimators_unfitted("estimator", CorrectNotFittedErrorClassifier)
| bsd-3-clause | 6300875b0e47a10cb59db94ac6ffe218 | 34.724771 | 81 | 0.704931 | 4.064718 | false | true | false | false |
kjung/scikit-learn | examples/mixture/plot_gmm_pdf.py | 282 | 1528 | """
=============================================
Density Estimation for a mixture of Gaussians
=============================================
Plot the density estimation of a mixture of two Gaussians. Data is
generated from two Gaussians with different centers and covariance
matrices.
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from sklearn import mixture
n_samples = 300
# generate random sample, two components
np.random.seed(0)
# generate spherical data centered on (20, 20)
shifted_gaussian = np.random.randn(n_samples, 2) + np.array([20, 20])
# generate zero centered stretched Gaussian data
C = np.array([[0., -0.7], [3.5, .7]])
stretched_gaussian = np.dot(np.random.randn(n_samples, 2), C)
# concatenate the two datasets into the final training set
X_train = np.vstack([shifted_gaussian, stretched_gaussian])
# fit a Gaussian Mixture Model with two components
clf = mixture.GMM(n_components=2, covariance_type='full')
clf.fit(X_train)
# display predicted scores by the model as a contour plot
x = np.linspace(-20.0, 30.0)
y = np.linspace(-20.0, 40.0)
X, Y = np.meshgrid(x, y)
XX = np.array([X.ravel(), Y.ravel()]).T
Z = -clf.score_samples(XX)[0]
Z = Z.reshape(X.shape)
CS = plt.contour(X, Y, Z, norm=LogNorm(vmin=1.0, vmax=1000.0),
levels=np.logspace(0, 3, 10))
CB = plt.colorbar(CS, shrink=0.8, extend='both')
plt.scatter(X_train[:, 0], X_train[:, 1], .8)
plt.title('Negative log-likelihood predicted by a GMM')
plt.axis('tight')
plt.show()
| bsd-3-clause | 4bacfeed60ae10da691085db51c57def | 29.56 | 69 | 0.667539 | 3.131148 | false | false | false | false |
pytorch/text | examples/tutorials/t5_demo.py | 1 | 24935 | """
T5-Base Model for Summarization, Sentiment Classification, and Translation
==========================================================================
**Author**: `Pendo Abbo <pabbo@fb.com>`__
"""
######################################################################
# Overview
# --------
#
# This tutorial demonstrates how to use a pre-trained T5 Model for summarization, sentiment classification, and
# translation tasks. We will demonstrate how to use the torchtext library to:
#
# 1. Build a text pre-processing pipeline for a T5 model
# 2. Instantiate a pre-trained T5 model with base configuration
# 3. Read in the CNNDM, IMDB, and Multi30k datasets and pre-process their texts in preparation for the model
# 4. Perform text summarization, sentiment classification, and translation
#
#
######################################################################
# Common imports
# --------------
import torch
import torch.nn.functional as F
DEVICE = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
#######################################################################
# Data Transformation
# -------------------
#
# The T5 model does not work with raw text. Instead, it requires the text to be transformed into numerical form
# in order to perform training and inference. The following transformations are required for the T5 model:
#
# 1. Tokenize text
# 2. Convert tokens into (integer) IDs
# 3. Truncate the sequences to a specified maximum length
# 4. Add end-of-sequence (EOS) and padding token IDs
#
# T5 uses a SentencePiece model for text tokenization. Below, we use a pre-trained SentencePiece model to build
# the text pre-processing pipeline using torchtext's T5Transform. Note that the transform supports both
# batched and non-batched text input (i.e. one can either pass a single sentence or a list of sentences), however
# the T5 model expects the input to be batched.
#
from torchtext.prototype.models import T5Transform
padding_idx = 0
eos_idx = 1
max_seq_len = 512
t5_sp_model_path = "https://download.pytorch.org/models/text/t5_tokenizer_base.model"
transform = T5Transform(
sp_model_path=t5_sp_model_path,
max_seq_len=max_seq_len,
eos_idx=eos_idx,
padding_idx=padding_idx,
)
#######################################################################
# Alternatively, we can also use the transform shipped with the pre-trained models that does all of the above out-of-the-box
#
# ::
#
# from torchtext.prototype.models import T5_BASE_GENERATION
# transform = T5_BASE_GENERATION.transform()
#
######################################################################
# Model Preparation
# -----------------
#
# torchtext provides SOTA pre-trained models that can be used directly for NLP tasks or fine-tuned on downstream tasks. Below
# we use the pre-trained T5 model with standard base configuration to perform text summarization, sentiment classification, and
# translation. For additional details on available pre-trained models, please refer to documentation at
# https://pytorch.org/text/main/models.html
#
#
from torchtext.prototype.models import T5_BASE_GENERATION
t5_base = T5_BASE_GENERATION
transform = t5_base.transform()
model = t5_base.get_model()
model.eval()
model.to(DEVICE)
#######################################################################
# Sequence Generator
# ------------------
#
# We can define a sequence generator to produce an output sequence based on the input sequence provided. This calls on the
# model's encoder and decoder, and iteratively expands the decoded sequences until the end-of-sequence token is generated
# for all sequences in the batch. The `generate` method shown below uses a beam search to generate the sequences. Larger
# beam sizes can result in better generation at the cost of computational complexity, and a beam size of 1 is equivalent to
# a greedy decoder.
#
from torch import Tensor
from torchtext.prototype.models import T5Model
def beam_search(
beam_size: int,
step: int,
bsz: int,
decoder_output: Tensor,
decoder_tokens: Tensor,
scores: Tensor,
incomplete_sentences: Tensor,
):
probs = F.log_softmax(decoder_output[:, -1], dim=-1)
top = torch.topk(probs, beam_size)
# N is number of sequences in decoder_tokens, L is length of sequences, B is beam_size
# decoder_tokens has shape (N,L) -> (N,B,L)
# top.indices has shape (N,B) - > (N,B,1)
# x has shape (N,B,L+1)
# note that when step == 1, N = batch_size, and when step > 1, N = batch_size * beam_size
x = torch.cat([decoder_tokens.unsqueeze(1).repeat(1, beam_size, 1), top.indices.unsqueeze(-1)], dim=-1)
# beams are first created for a given sequence
if step == 1:
# x has shape (batch_size, B, L+1) -> (batch_size * B, L+1)
# new_scores has shape (batch_size,B)
# incomplete_sentences has shape (batch_size * B) = (N)
new_decoder_tokens = x.view(-1, step + 1)
new_scores = top.values
new_incomplete_sentences = incomplete_sentences
# beams already exist, want to expand each beam into possible new tokens to add
# and for all expanded beams beloning to the same sequences, choose the top k
else:
# scores has shape (batch_size,B) -> (N,1) -> (N,B)
# top.values has shape (N,B)
# new_scores has shape (N,B) -> (batch_size, B^2)
new_scores = (scores.view(-1, 1).repeat(1, beam_size) + top.values).view(bsz, -1)
# v, i have shapes (batch_size, B)
v, i = torch.topk(new_scores, beam_size)
# x has shape (N,B,L+1) -> (batch_size, B, L+1)
# i has shape (batch_size, B) -> (batch_size, B, L+1)
# new_decoder_tokens has shape (batch_size, B, L+1) -> (N, L)
x = x.view(bsz, -1, step + 1)
new_decoder_tokens = x.gather(index=i.unsqueeze(-1).repeat(1, 1, step + 1), dim=1).view(-1, step + 1)
# need to update incomplete sentences in case one of the beams was kicked out
# y has shape (N) -> (N, 1) -> (N, B) -> (batch_size, B^2)
y = incomplete_sentences.unsqueeze(-1).repeat(1, beam_size).view(bsz, -1)
# now can use i to extract those beams that were selected
# new_incomplete_sentences has shape (batch_size, B^2) -> (batch_size, B) -> (N, 1) -> N
new_incomplete_sentences = y.gather(index=i, dim=1).view(bsz * beam_size, 1).squeeze(-1)
# new_scores has shape (batch_size, B)
new_scores = v
return new_decoder_tokens, new_scores, new_incomplete_sentences
def generate(encoder_tokens: Tensor, eos_idx: int, model: T5Model, beam_size: int) -> Tensor:
# pass tokens through encoder
bsz = encoder_tokens.size(0)
encoder_padding_mask = encoder_tokens.eq(model.padding_idx)
encoder_embeddings = model.dropout1(model.token_embeddings(encoder_tokens))
encoder_output = model.encoder(encoder_embeddings, tgt_key_padding_mask=encoder_padding_mask)[0]
encoder_output = model.norm1(encoder_output)
encoder_output = model.dropout2(encoder_output)
# initialize decoder input sequence; T5 uses padding index as starter index to decoder sequence
decoder_tokens = torch.ones((bsz, 1), dtype=torch.long) * model.padding_idx
scores = torch.zeros((bsz, beam_size))
# mask to keep track of sequences for which the decoder has not produced an end-of-sequence token yet
incomplete_sentences = torch.ones(bsz * beam_size, dtype=torch.long)
# iteratively generate output sequence until all sequences in the batch have generated the end-of-sequence token
for step in range(model.config.max_seq_len):
if step == 1:
# duplicate and order encoder output so that each beam is treated as its own independent sequence
new_order = torch.arange(bsz).view(-1, 1).repeat(1, beam_size).view(-1)
new_order = new_order.to(encoder_tokens.device).long()
encoder_output = encoder_output.index_select(0, new_order)
encoder_padding_mask = encoder_padding_mask.index_select(0, new_order)
# causal mask and padding mask for decoder sequence
tgt_len = decoder_tokens.shape[1]
decoder_mask = torch.triu(torch.ones((tgt_len, tgt_len), dtype=torch.float64), diagonal=1).bool()
decoder_padding_mask = decoder_tokens.eq(model.padding_idx)
# T5 implemention uses padding idx to start sequence. Want to ignore this when masking
decoder_padding_mask[:, 0] = False
# pass decoder sequence through decoder
decoder_embeddings = model.dropout3(model.token_embeddings(decoder_tokens))
decoder_output = model.decoder(
decoder_embeddings,
memory=encoder_output,
tgt_mask=decoder_mask,
tgt_key_padding_mask=decoder_padding_mask,
memory_key_padding_mask=encoder_padding_mask,
)[0]
decoder_output = model.norm2(decoder_output)
decoder_output = model.dropout4(decoder_output)
decoder_output = decoder_output * (model.config.embedding_dim ** -0.5)
decoder_output = model.lm_head(decoder_output)
decoder_tokens, scores, incomplete_sentences = beam_search(
beam_size, step + 1, bsz, decoder_output, decoder_tokens, scores, incomplete_sentences
)
# ignore newest tokens for sentences that are already complete
decoder_tokens[:, -1] *= incomplete_sentences
# update incomplete_sentences to remove those that were just ended
incomplete_sentences = incomplete_sentences - (decoder_tokens[:, -1] == eos_idx).long()
# early stop if all sentences have been ended
if (incomplete_sentences == 0).all():
break
# take most likely sequence
decoder_tokens = decoder_tokens.view(bsz, beam_size, -1)[:, 0, :]
return decoder_tokens
#######################################################################
# Datasets
# --------
# torchtext provides several standard NLP datasets. For a complete list, refer to the documentation
# at https://pytorch.org/text/stable/datasets.html. These datasets are built using composable torchdata
# datapipes and hence support standard flow-control and mapping/transformation using user defined
# functions and transforms.
#
# Below, we demonstrate how to pre-process the CNNDM dataset to include the prefix necessary for the
# model to indentify the task it is performing. The CNNDM dataset has a train, validation, and test
# split. Below we demo on the test split.
#
# The T5 model uses the prefix "summarize" for text summarization. For more information on task
# prefixes, please visit Appendix D of the T5 Paper at https://arxiv.org/pdf/1910.10683.pdf
#
# .. note::
# Using datapipes is still currently subject to a few caveats. If you wish
# to extend this example to include shuffling, multi-processing, or
# distributed learning, please see :ref:`this note <datapipes_warnings>`
# for further instructions.
from functools import partial
from torch.utils.data import DataLoader
from torchtext.datasets import CNNDM
cnndm_batch_size = 5
cnndm_datapipe = CNNDM(split="test")
task = "summarize"
def apply_prefix(task, x):
return f"{task}: " + x[0], x[1]
cnndm_datapipe = cnndm_datapipe.map(partial(apply_prefix, task))
cnndm_datapipe = cnndm_datapipe.batch(cnndm_batch_size)
cnndm_datapipe = cnndm_datapipe.rows2columnar(["article", "abstract"])
cnndm_dataloader = DataLoader(cnndm_datapipe, batch_size=None)
#######################################################################
# Alternately we can also use batched API (i.e apply the prefix on the whole batch)
#
# ::
#
# def batch_prefix(task, x):
# return {
# "article": [f'{task}: ' + y for y in x["article"]],
# "abstract": x["abstract"]
# }
#
# cnndm_batch_size = 5
# cnndm_datapipe = CNNDM(split="test")
# task = 'summarize'
#
# cnndm_datapipe = cnndm_datapipe.batch(cnndm_batch_size).rows2columnar(["article", "abstract"])
# cnndm_datapipe = cnndm_datapipe.map(partial(batch_prefix, task))
# cnndm_dataloader = DataLoader(cnndm_datapipe, batch_size=None)
#
#######################################################################
# We can also load the IMDB dataset, which will be used to demonstrate sentiment classification using the T5 model.
# This dataset has a train and test split. Below we demo on the test split.
#
# The T5 model was trained on the SST2 dataset (also available in torchtext) for sentiment classification using the
# prefix "sst2 sentence". Therefore, we will use this prefix to perform sentiment classification on the IMDB dataset.
#
from torchtext.datasets import IMDB
imdb_batch_size = 3
imdb_datapipe = IMDB(split="test")
task = "sst2 sentence"
labels = {"neg": "negative", "pos": "positive"}
def process_labels(labels, x):
return x[1], labels[x[0]]
imdb_datapipe = imdb_datapipe.map(partial(process_labels, labels))
imdb_datapipe = imdb_datapipe.map(partial(apply_prefix, task))
imdb_datapipe = imdb_datapipe.batch(imdb_batch_size)
imdb_datapipe = imdb_datapipe.rows2columnar(["text", "label"])
imdb_dataloader = DataLoader(imdb_datapipe, batch_size=None)
#######################################################################
# Finally, we can also load the Multi30k dataset to demonstrate English to German translation using the T5 model.
# This dataset has a train, validation, and test split. Below we demo on the test split.
#
# The T5 model uses the prefix "translate English to German" for this task.
from torchtext.datasets import Multi30k
multi_batch_size = 5
language_pair = ("en", "de")
multi_datapipe = Multi30k(split="test", language_pair=language_pair)
task = "translate English to German"
multi_datapipe = multi_datapipe.map(partial(apply_prefix, task))
multi_datapipe = multi_datapipe.batch(multi_batch_size)
multi_datapipe = multi_datapipe.rows2columnar(["english", "german"])
multi_dataloader = DataLoader(multi_datapipe, batch_size=None)
#######################################################################
# Generate Summaries
# ------------------
#
# We can put all of the components together to generate summaries on the first batch of articles in the CNNDM test set
# using a beam size of 3.
#
batch = next(iter(cnndm_dataloader))
input_text = batch["article"]
target = batch["abstract"]
beam_size = 3
model_input = transform(input_text)
model_output = generate(model=model, encoder_tokens=model_input, eos_idx=eos_idx, beam_size=beam_size)
output_text = transform.decode(model_output.tolist())
for i in range(cnndm_batch_size):
print(f"Example {i+1}:\n")
print(f"prediction: {output_text[i]}\n")
print(f"target: {target[i]}\n\n")
#######################################################################
# Summarization Output
# --------------------
#
# ::
#
# Example 1:
#
# prediction: the Palestinians become the 123rd member of the international criminal
# court . the accession was marked by a ceremony at the Hague, where the court is based .
# the ICC opened a preliminary examination into the situation in the occupied
# Palestinian territory .
#
# target: Membership gives the ICC jurisdiction over alleged crimes committed in
# Palestinian territories since last June . Israel and the United States opposed the
# move, which could open the door to war crimes investigations against Israelis .
#
#
# Example 2:
#
# prediction: a stray pooch has used up at least three of her own after being hit by a
# car and buried in a field . the dog managed to stagger to a nearby farm, dirt-covered
# and emaciated, where she was found . she suffered a dislocated jaw, leg injuries and a
# caved-in sinus cavity -- and still requires surgery to help her breathe .
#
# target: Theia, a bully breed mix, was apparently hit by a car, whacked with a hammer
# and buried in a field . "She's a true miracle dog and she deserves a good life," says
# Sara Mellado, who is looking for a home for Theia .
#
#
# Example 3:
#
# prediction: mohammad Javad Zarif arrived in Iran on a sunny friday morning . he has gone
# a long way to bring Iran in from the cold and allow it to rejoin the international
# community . but there are some facts about him that are less well-known .
#
# target: Mohammad Javad Zarif has spent more time with John Kerry than any other
# foreign minister . He once participated in a takeover of the Iranian Consulate in San
# Francisco . The Iranian foreign minister tweets in English .
#
#
# Example 4:
#
# prediction: five americans were monitored for three weeks after being exposed to Ebola in
# west africa . one of the five had a heart-related issue and has been discharged but hasn't
# left the area . they are clinicians for Partners in Health, a Boston-based aid group .
#
# target: 17 Americans were exposed to the Ebola virus while in Sierra Leone in March .
# Another person was diagnosed with the disease and taken to hospital in Maryland .
# National Institutes of Health says the patient is in fair condition after weeks of
# treatment .
#
#
# Example 5:
#
# prediction: the student was identified during an investigation by campus police and
# the office of student affairs . he admitted to placing the noose on the tree early
# Wednesday morning . the incident is one of several recent racist events to affect
# college students .
#
# target: Student is no longer on Duke University campus and will face disciplinary
# review . School officials identified student during investigation and the person
# admitted to hanging the noose, Duke says . The noose, made of rope, was discovered on
# campus about 2 a.m.
#
#######################################################################
# Generate Sentiment Classifications
# ----------------------------------
#
# Similarly, we can use the model to generate sentiment classifications on the first batch of reviews from the IMDB test set
# using a beam size of 1.
#
batch = next(iter(imdb_dataloader))
input_text = batch["text"]
target = batch["label"]
beam_size = 1
model_input = transform(input_text)
model_output = generate(model=model, encoder_tokens=model_input, eos_idx=eos_idx, beam_size=beam_size)
output_text = transform.decode(model_output.tolist())
for i in range(imdb_batch_size):
print(f"Example {i+1}:\n")
print(f"input_text: {input_text[i]}\n")
print(f"prediction: {output_text[i]}\n")
print(f"target: {target[i]}\n\n")
#######################################################################
# Sentiment Output
# ----------------
#
# ::
#
# Example 1:
#
# input_text: sst2 sentence: I love sci-fi and am willing to put up with a lot. Sci-fi
# movies/TV are usually underfunded, under-appreciated and misunderstood. I tried to like
# this, I really did, but it is to good TV sci-fi as Babylon 5 is to Star Trek (the original).
# Silly prosthetics, cheap cardboard sets, stilted dialogues, CG that doesn't match the
# background, and painfully one-dimensional characters cannot be overcome with a 'sci-fi'
# setting. (I'm sure there are those of you out there who think Babylon 5 is good sci-fi TV.
# It's not. It's clichéd and uninspiring.) While US viewers might like emotion and character
# development, sci-fi is a genre that does not take itself seriously (cf. Star Trek). It may
# treat important issues, yet not as a serious philosophy. It's really difficult to care about
# the characters here as they are not simply foolish, just missing a spark of life. Their
# actions and reactions are wooden and predictable, often painful to watch. The makers of Earth
# KNOW it's rubbish as they have to always say "Gene Roddenberry's Earth..." otherwise people
# would not continue watching. Roddenberry's ashes must be turning in their orbit as this dull,
# cheap, poorly edited (watching it without advert breaks really brings this home) trudging
# Trabant of a show lumbers into space. Spoiler. So, kill off a main character. And then bring
# him back as another actor. Jeeez. Dallas all over again.
#
# prediction: negative
#
# target: negative
#
#
# Example 2:
#
# input_text: sst2 sentence: Worth the entertainment value of a rental, especially if you like
# action movies. This one features the usual car chases, fights with the great Van Damme kick
# style, shooting battles with the 40 shell load shotgun, and even terrorist style bombs. All
# of this is entertaining and competently handled but there is nothing that really blows you
# away if you've seen your share before.<br /><br />The plot is made interesting by the
# inclusion of a rabbit, which is clever but hardly profound. Many of the characters are
# heavily stereotyped -- the angry veterans, the terrified illegal aliens, the crooked cops,
# the indifferent feds, the bitchy tough lady station head, the crooked politician, the fat
# federale who looks like he was typecast as the Mexican in a Hollywood movie from the 1940s.
# All passably acted but again nothing special.<br /><br />I thought the main villains were
# pretty well done and fairly well acted. By the end of the movie you certainly knew who the
# good guys were and weren't. There was an emotional lift as the really bad ones got their just
# deserts. Very simplistic, but then you weren't expecting Hamlet, right? The only thing I found
# really annoying was the constant cuts to VDs daughter during the last fight scene.<br /><br />
# Not bad. Not good. Passable 4.
#
# prediction: negative
#
# target: negative
#
#
# Example 3:
#
# input_text: sst2 sentence: its a totally average film with a few semi-alright action sequences
# that make the plot seem a little better and remind the viewer of the classic van dam films.
# parts of the plot don't make sense and seem to be added in to use up time. the end plot is that
# of a very basic type that doesn't leave the viewer guessing and any twists are obvious from the
# beginning. the end scene with the flask backs don't make sense as they are added in and seem to
# have little relevance to the history of van dam's character. not really worth watching again,
# bit disappointed in the end production, even though it is apparent it was shot on a low budget
# certain shots and sections in the film are of poor directed quality.
#
# prediction: negative
#
# target: negative
#
#######################################################################
# Generate Translations
# ---------------------
#
# Finally, we can also use the model to generate English to German translations on the first batch of examples from the Multi30k
# test set using a beam size of 4.
#
batch = next(iter(multi_dataloader))
input_text = batch["english"]
target = batch["german"]
beam_size = 4
model_input = transform(input_text)
model_output = generate(model=model, encoder_tokens=model_input, eos_idx=eos_idx, beam_size=beam_size)
output_text = transform.decode(model_output.tolist())
for i in range(multi_batch_size):
print(f"Example {i+1}:\n")
print(f"input_text: {input_text[i]}\n")
print(f"prediction: {output_text[i]}\n")
print(f"target: {target[i]}\n\n")
#######################################################################
# Translation Output
# ------------------
#
# ::
#
# Example 1:
#
# input_text: translate English to German: A man in an orange hat starring at something.
#
# prediction: Ein Mann in einem orangen Hut, der an etwas schaut.
#
# target: Ein Mann mit einem orangefarbenen Hut, der etwas anstarrt.
#
#
# Example 2:
#
# input_text: translate English to German: A Boston Terrier is running on lush green grass in front of a white fence.
#
# prediction: Ein Boston Terrier läuft auf üppigem grünem Gras vor einem weißen Zaun.
#
# target: Ein Boston Terrier läuft über saftig-grünes Gras vor einem weißen Zaun.
#
#
# Example 3:
#
# input_text: translate English to German: A girl in karate uniform breaking a stick with a front kick.
#
# prediction: Ein Mädchen in Karate-Uniform bricht einen Stöck mit einem Frontkick.
#
# target: Ein Mädchen in einem Karateanzug bricht ein Brett mit einem Tritt.
#
#
# Example 4:
#
# input_text: translate English to German: Five people wearing winter jackets and helmets stand in the snow, with snowmobiles in the background.
#
# prediction: Fünf Menschen mit Winterjacken und Helmen stehen im Schnee, mit Schneemobilen im Hintergrund.
#
# target: Fünf Leute in Winterjacken und mit Helmen stehen im Schnee mit Schneemobilen im Hintergrund.
#
#
# Example 5:
#
# input_text: translate English to German: People are fixing the roof of a house.
#
# prediction: Die Leute fixieren das Dach eines Hauses.
#
# target: Leute Reparieren das Dach eines Hauses.
#
| bsd-3-clause | c74eee3c62ede11e71b7ff4ca9b28870 | 40.673913 | 147 | 0.672284 | 3.463655 | false | false | false | false |
pytorch/text | benchmark/benchmark_bert_tokenizer.py | 1 | 1773 | from argparse import ArgumentParser
from benchmark.utils import Timer
from tokenizers import Tokenizer as hf_tokenizer_lib
from torchtext.datasets import EnWik9
from torchtext.transforms import BERTTokenizer as tt_bert_tokenizer
from transformers import BertTokenizer as hf_bert_tokenizer_slow
VOCAB_FILE = "https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt"
def benchmark_bert_tokenizer(args):
tt_tokenizer = tt_bert_tokenizer(VOCAB_FILE, return_tokens=True)
hf_tokenizer_slow = hf_bert_tokenizer_slow.from_pretrained("bert-base-uncased")
hf_tokenizer_fast = hf_tokenizer_lib.from_pretrained("bert-base-uncased")
dp = EnWik9().header(args.num_samples).batch(args.batch_size)
samples = list(dp)
with Timer("Running TorchText BERT Tokenizer on non-batched input"):
for batch in samples:
for s in batch:
tt_tokenizer(s)
with Timer("Running HF BERT Tokenizer (slow) on non-batched input"):
for batch in samples:
for s in batch:
hf_tokenizer_slow.tokenize(s)
with Timer("Running HF BERT Tokenizer (fast) on non-batched input"):
for batch in samples:
for s in batch:
hf_tokenizer_fast.encode(s)
with Timer("Running TorchText BERT Tokenizer on batched input"):
for batch in samples:
tt_tokenizer(batch)
with Timer("Running HF BERT Tokenizer (fast) on batched input"):
for batch in samples:
hf_tokenizer_fast.encode_batch(batch)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--num-samples", default=10000, type=int)
parser.add_argument("--batch-size", default=100, type=int)
benchmark_bert_tokenizer(parser.parse_args())
| bsd-3-clause | 92509feae1bf56268bffb7294e078ed4 | 35.183673 | 83 | 0.684715 | 3.618367 | false | false | false | false |
pytorch/text | torchtext/datasets/yelpreviewpolarity.py | 1 | 3186 | import os
from functools import partial
from typing import Union, Tuple
from torchdata.datapipes.iter import FileOpener, IterableWrapper
from torchtext._download_hooks import GDriveReader
from torchtext._internal.module_utils import is_module_available
from torchtext.data.datasets_utils import (
_wrap_split_argument,
_create_dataset_directory,
)
URL = "https://drive.google.com/uc?export=download&id=0Bz8a_Dbh9QhbNUpYQ2N3SGlFaDg"
MD5 = "620c8ae4bd5a150b730f1ba9a7c6a4d3"
NUM_LINES = {
"train": 560000,
"test": 38000,
}
_PATH = "yelp_review_polarity_csv.tar.gz"
DATASET_NAME = "YelpReviewPolarity"
_EXTRACTED_FILES = {
"train": os.path.join("yelp_review_polarity_csv", "train.csv"),
"test": os.path.join("yelp_review_polarity_csv", "test.csv"),
}
def _filepath_fn(root, _=None):
return os.path.join(root, _PATH)
def _extracted_filepath_fn(root, split, _=None):
return os.path.join(root, _EXTRACTED_FILES[split])
def _filter_fn(split, x):
return _EXTRACTED_FILES[split] in x[0]
def _modify_res(t):
return int(t[0]), " ".join(t[1:])
@_create_dataset_directory(dataset_name=DATASET_NAME)
@_wrap_split_argument(("train", "test"))
def YelpReviewPolarity(root: str, split: Union[Tuple[str], str]):
"""YelpReviewPolarity Dataset
.. warning::
using datapipes is still currently subject to a few caveats. if you wish
to use this dataset with shuffling, multi-processing, or distributed
learning, please see :ref:`this note <datapipes_warnings>` for further
instructions.
For additional details refer to https://arxiv.org/abs/1509.01626
Number of lines per split:
- train: 560000
- test: 38000
Args:
root: Directory where the datasets are saved. Default: os.path.expanduser('~/.torchtext/cache')
split: split or splits to be returned. Can be a string or tuple of strings. Default: (`train`, `test`)
:returns: DataPipe that yields tuple of label (1 to 2) and text containing the review
:rtype: (int, str)
"""
if not is_module_available("torchdata"):
raise ModuleNotFoundError(
"Package `torchdata` not found. Please install following instructions at https://github.com/pytorch/data"
)
url_dp = IterableWrapper([URL])
cache_compressed_dp = url_dp.on_disk_cache(
filepath_fn=partial(_filepath_fn, root),
hash_dict={_filepath_fn(root): MD5},
hash_type="md5",
)
cache_compressed_dp = GDriveReader(cache_compressed_dp).end_caching(mode="wb", same_filepath_fn=True)
cache_decompressed_dp = cache_compressed_dp.on_disk_cache(filepath_fn=partial(_extracted_filepath_fn, root, split))
cache_decompressed_dp = FileOpener(cache_decompressed_dp, mode="b")
cache_decompressed_dp = cache_decompressed_dp.load_from_tar()
cache_decompressed_dp = cache_decompressed_dp.filter(partial(_filter_fn, split))
cache_decompressed_dp = cache_decompressed_dp.end_caching(mode="wb", same_filepath_fn=True)
data_dp = FileOpener(cache_decompressed_dp, encoding="utf-8")
return data_dp.parse_csv().map(_modify_res).shuffle().set_shuffle(False).sharding_filter()
| bsd-3-clause | e48fa498b662e89d3285bf40a4fe1dd5 | 32.536842 | 119 | 0.696798 | 3.284536 | false | true | false | false |
pytorch/text | torchtext/prototype/vocab_factory.py | 1 | 3001 | from typing import Callable, Optional
import torch
from torchtext._torchtext import (
_build_vocab_from_text_file,
_build_vocab_from_text_file_using_python_tokenizer,
_load_vocab_from_file,
)
from torchtext.vocab import Vocab
__all__ = [
"build_vocab_from_text_file",
"load_vocab_from_file",
]
def build_vocab_from_text_file(
file_path: str, tokenizer: Optional[Callable] = None, min_freq: int = 1, num_cpus: Optional[int] = 4
) -> Vocab:
r"""Create a `Vocab` object from a raw text file.
The `file_path` can contain any raw text. This function applies a generic JITed tokenizer in
parallel to the text.
Args:
file_object: A file object to read data from.
tokenizer: A python callable to split input sentence into tokens. It can also be a Jited Module.
By default, the function will do tokenization based on python split() function.
min_freq: The minimum frequency needed to include a token in the vocabulary.
num_cpus: the number of cpus to use when loading the vectors from file. It will be ignored when tokenizer is not torch scripted (JIT'd)
Returns:
torchtext.vocab.Vocab: a `Vocab` object.
Examples:
>>> from torchtext.experimental.vocab_factory import build_vocab_from_text_file
>>> v = build_vocab_from_text_file('vocab.txt') # using python split function as tokenizer
>>> #using JIT'd tokenizer
>>> from torchtext.experimental.transforms import basic_english_normalize
>>> tokenizer = basic_english_normalize()
>>> tokenizer = basic_english_normalize()
>>> jit_tokenizer = torch.jit.script(tokenizer)
>>> v = build_vocab_from_text_file('vocab.txt', jit_tokenizer, num_cpus = 4)
"""
if not tokenizer:
def tokenizer(x):
return x.split()
if isinstance(tokenizer, torch.jit.ScriptModule) or isinstance(tokenizer, torch.jit.ScriptFunction):
vocab_obj = _build_vocab_from_text_file(file_path, min_freq, num_cpus, tokenizer)
else:
vocab_obj = _build_vocab_from_text_file_using_python_tokenizer(file_path, min_freq, tokenizer)
return Vocab(vocab_obj)
def load_vocab_from_file(file_path: str, min_freq: int = 1, num_cpus: int = 4) -> Vocab:
r"""Create a `Vocab` object from a text file.
The `file_path` should contain tokens separated by new lines.
Format for txt file:
token1
token2
...
token_n
Args:
file_object: A file like object to read data from.
min_freq: The minimum frequency needed to include a token in the vocabulary.
num_cpus: the number of cpus to use when loading the vectors from file.
Returns:
torchtext.vocab.Vocab: a `Vocab` object.
Examples:
>>> from torchtext.vocab import load_vocab_from_file
>>> v = load_vocab_from_file('vocab.txt')
"""
vocab_obj = _load_vocab_from_file(file_path, min_freq, num_cpus)
return Vocab(vocab_obj)
| bsd-3-clause | 3f81ef3558cf4e7513ee52d03f43945b | 36.987342 | 143 | 0.664445 | 3.72795 | false | false | false | false |
pytorch/text | test/torchtext_unittest/datasets/test_cnndm.py | 1 | 3647 | import hashlib
import os
import tarfile
from collections import defaultdict
from unittest.mock import patch
from parameterized import parameterized
from torchtext.datasets import CNNDM
from ..common.case_utils import TempDirMixin, zip_equal, get_random_unicode
from ..common.torchtext_test_case import TorchtextTestCase
def _get_mock_dataset(root_dir):
"""
root_dir: directory to the mocked dataset
"""
base_dir = os.path.join(root_dir, "CNNDM")
temp_dataset_dir = os.path.join(base_dir, "temp_dataset_dir")
os.makedirs(temp_dataset_dir, exist_ok=True)
seed = 1
mocked_data = defaultdict(list)
for source in ["cnn", "dailymail"]:
source_dir = os.path.join(temp_dataset_dir, source, "stories")
os.makedirs(source_dir, exist_ok=True)
for split in ["train", "val", "test"]:
stories = []
for i in range(5):
url = "_".join([source, split, str(i)])
h = hashlib.sha1()
h.update(url.encode())
filename = h.hexdigest() + ".story"
txt_file = os.path.join(source_dir, filename)
with open(txt_file, "w", encoding=("utf-8")) as f:
article = get_random_unicode(seed) + "."
abstract = get_random_unicode(seed + 1) + "."
dataset_line = (article, abstract)
f.writelines([article, "\n@highlight\n", abstract])
stories.append((txt_file, dataset_line))
seed += 2
# append stories to correct dataset split, must be in lexicographic order of filenames per dataset
stories.sort(key=lambda x: x[0])
mocked_data[split] += [t[1] for t in stories]
compressed_dataset_path = os.path.join(base_dir, f"{source}_stories.tgz")
# create zip file from dataset folder
with tarfile.open(compressed_dataset_path, "w:gz") as tar:
tar.add(os.path.join(temp_dataset_dir, source), arcname=source)
return mocked_data
class TestCNNDM(TempDirMixin, TorchtextTestCase):
root_dir = None
samples = []
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.root_dir = cls.get_base_temp_dir()
cls.samples = _get_mock_dataset(os.path.join(cls.root_dir, "datasets"))
cls.patcher = patch("torchdata.datapipes.iter.util.cacheholder._hash_check", return_value=True)
cls.patcher.start()
@classmethod
def tearDownClass(cls):
cls.patcher.stop()
super().tearDownClass()
def _mock_split_list(source, split):
story_fnames = []
for i in range(5):
url = "_".join([source, split, str(i)])
h = hashlib.sha1()
h.update(url.encode())
filename = h.hexdigest() + ".story"
story_fnames.append(filename)
return story_fnames
@parameterized.expand(["train", "val", "test"])
@patch("torchtext.datasets.cnndm._get_split_list", _mock_split_list)
def test_cnndm(self, split):
dataset = CNNDM(root=self.root_dir, split=split)
samples = list(dataset)
expected_samples = self.samples[split]
self.assertEqual(expected_samples, samples)
@parameterized.expand(["train", "val", "test"])
@patch("torchtext.datasets.cnndm._get_split_list", _mock_split_list)
def test_cnndm_split_argument(self, split):
dataset1 = CNNDM(root=self.root_dir, split=split)
(dataset2,) = CNNDM(root=self.root_dir, split=(split,))
for d1, d2 in zip_equal(dataset1, dataset2):
self.assertEqual(d1, d2)
| bsd-3-clause | b265d0cb46ddbf810d70489363286155 | 35.838384 | 110 | 0.603784 | 3.647 | false | true | false | false |
pallets/click | src/click/globals.py | 1 | 1961 | import typing as t
from threading import local
if t.TYPE_CHECKING:
import typing_extensions as te
from .core import Context
_local = local()
@t.overload
def get_current_context(silent: "te.Literal[False]" = False) -> "Context":
...
@t.overload
def get_current_context(silent: bool = ...) -> t.Optional["Context"]:
...
def get_current_context(silent: bool = False) -> t.Optional["Context"]:
"""Returns the current click context. This can be used as a way to
access the current context object from anywhere. This is a more implicit
alternative to the :func:`pass_context` decorator. This function is
primarily useful for helpers such as :func:`echo` which might be
interested in changing its behavior based on the current context.
To push the current context, :meth:`Context.scope` can be used.
.. versionadded:: 5.0
:param silent: if set to `True` the return value is `None` if no context
is available. The default behavior is to raise a
:exc:`RuntimeError`.
"""
try:
return t.cast("Context", _local.stack[-1])
except (AttributeError, IndexError) as e:
if not silent:
raise RuntimeError("There is no active click context.") from e
return None
def push_context(ctx: "Context") -> None:
"""Pushes a new context to the current stack."""
_local.__dict__.setdefault("stack", []).append(ctx)
def pop_context() -> None:
"""Removes the top level from the stack."""
_local.stack.pop()
def resolve_color_default(color: t.Optional[bool] = None) -> t.Optional[bool]:
"""Internal helper to get the default value of the color flag. If a
value is passed it's returned unchanged, otherwise it's looked up from
the current context.
"""
if color is not None:
return color
ctx = get_current_context(silent=True)
if ctx is not None:
return ctx.color
return None
| bsd-3-clause | 3ece3cdc0ed547ae084dd6e6c3bc4eec | 27.838235 | 78 | 0.654768 | 4.002041 | false | false | false | false |
pytorch/text | examples/vocab/fairseq_vocab.py | 1 | 2387 | from collections import OrderedDict
from typing import Dict, List, Optional
from fairseq.data.dictionary import Dictionary
from torchtext.vocab import Vocab
def build_fairseq_vocab(
vocab_file: str,
dictionary_class: Dictionary = Dictionary,
special_token_replacements: Dict[str, str] = None,
unk_token: str = "<unk>",
max_vocab: int = -1,
min_count: int = -1,
tokens_to_add: Optional[List[str]] = None,
):
"""Function builds a torchtext Vocab for models pre-trained using Fairseq
modules.
The dictionary class can take any Fairseq Dictionary class and is
used to load the vocab file.
"""
if not special_token_replacements:
special_token_replacements = {
"<pad>": "__PAD__",
"<s>": "__BEGIN_OF_SENTENCE__",
"</s>": "__END_OF_SENTENCE__",
"<unk>": "__UNKNOWN__",
"<mask>": "__MASK__",
}
unk_replacement = (
special_token_replacements[unk_token] if unk_token in special_token_replacements else unk_token
)
special_tokens_to_remove = [special_pair[0] for special_pair in special_token_replacements]
special_tokens_to_add = tuple(
special_pair[1] for special_pair in special_token_replacements if special_pair[0] != unk_token
)
with open(vocab_file) as f:
dictionary = dictionary_class.load(f)
# finalize will sort the dict based on frequency so only do this if
# a min_count or max_vocab size is specified
if min_count > 0 or max_vocab > 0:
dictionary.finalize(threshold=min_count, nwords=max_vocab, padding_factor=1)
if tokens_to_add:
for token in tokens_to_add:
dictionary.add_symbol(token)
dictionary_items = list(zip(dictionary.symbols, dictionary.count))
ordered_dict = OrderedDict()
# add special tokens to beginning of ordered_dict
for s in special_tokens_to_add:
ordered_dict[s] = 1
# add all other tokens from dictionary_items
for token, freq in dictionary_items:
ordered_dict[token] = freq
# remove special_tokens_to_remove from dict
for s in special_tokens_to_remove:
if s in ordered_dict:
del ordered_dict[s]
return Vocab(dictionary_items, unk_token=unk_replacement)
| bsd-3-clause | f54419c10419c63ec032477f0cf75fc5 | 35.166667 | 107 | 0.620444 | 3.913115 | false | false | false | false |
pallets/click | tests/test_imports.py | 1 | 1374 | import json
import subprocess
import sys
from click._compat import WIN
IMPORT_TEST = b"""\
import builtins
found_imports = set()
real_import = builtins.__import__
import sys
def tracking_import(module, locals=None, globals=None, fromlist=None,
level=0):
rv = real_import(module, locals, globals, fromlist, level)
if globals and globals['__name__'].startswith('click') and level == 0:
found_imports.add(module)
return rv
builtins.__import__ = tracking_import
import click
rv = list(found_imports)
import json
click.echo(json.dumps(rv))
"""
ALLOWED_IMPORTS = {
"weakref",
"os",
"struct",
"collections",
"sys",
"contextlib",
"functools",
"stat",
"re",
"codecs",
"inspect",
"itertools",
"io",
"threading",
"errno",
"fcntl",
"datetime",
"enum",
"typing",
"types",
"gettext",
}
if WIN:
ALLOWED_IMPORTS.update(["ctypes", "ctypes.wintypes", "msvcrt", "time"])
def test_light_imports():
c = subprocess.Popen(
[sys.executable, "-"], stdin=subprocess.PIPE, stdout=subprocess.PIPE
)
rv = c.communicate(IMPORT_TEST)[0]
rv = rv.decode("utf-8")
imported = json.loads(rv)
for module in imported:
if module == "click" or module.startswith("click."):
continue
assert module in ALLOWED_IMPORTS
| bsd-3-clause | ca72938f3020ac427ecf835bcbeb08ab | 19.205882 | 76 | 0.606259 | 3.587467 | false | false | false | false |
scipy/scipy | scipy/fftpack/_helper.py | 10 | 3354 | import operator
from numpy.fft.helper import fftshift, ifftshift, fftfreq
import scipy.fft._pocketfft.helper as _helper
import numpy as np
__all__ = ['fftshift', 'ifftshift', 'fftfreq', 'rfftfreq', 'next_fast_len']
def rfftfreq(n, d=1.0):
"""DFT sample frequencies (for usage with rfft, irfft).
The returned float array contains the frequency bins in
cycles/unit (with zero at the start) given a window length `n` and a
sample spacing `d`::
f = [0,1,1,2,2,...,n/2-1,n/2-1,n/2]/(d*n) if n is even
f = [0,1,1,2,2,...,n/2-1,n/2-1,n/2,n/2]/(d*n) if n is odd
Parameters
----------
n : int
Window length.
d : scalar, optional
Sample spacing. Default is 1.
Returns
-------
out : ndarray
The array of length `n`, containing the sample frequencies.
Examples
--------
>>> import numpy as np
>>> from scipy import fftpack
>>> sig = np.array([-2, 8, 6, 4, 1, 0, 3, 5], dtype=float)
>>> sig_fft = fftpack.rfft(sig)
>>> n = sig_fft.size
>>> timestep = 0.1
>>> freq = fftpack.rfftfreq(n, d=timestep)
>>> freq
array([ 0. , 1.25, 1.25, 2.5 , 2.5 , 3.75, 3.75, 5. ])
"""
n = operator.index(n)
if n < 0:
raise ValueError("n = %s is not valid. "
"n must be a nonnegative integer." % n)
return (np.arange(1, n + 1, dtype=int) // 2) / float(n * d)
def next_fast_len(target):
"""
Find the next fast size of input data to `fft`, for zero-padding, etc.
SciPy's FFTPACK has efficient functions for radix {2, 3, 4, 5}, so this
returns the next composite of the prime factors 2, 3, and 5 which is
greater than or equal to `target`. (These are also known as 5-smooth
numbers, regular numbers, or Hamming numbers.)
Parameters
----------
target : int
Length to start searching from. Must be a positive integer.
Returns
-------
out : int
The first 5-smooth number greater than or equal to `target`.
Notes
-----
.. versionadded:: 0.18.0
Examples
--------
On a particular machine, an FFT of prime length takes 133 ms:
>>> from scipy import fftpack
>>> import numpy as np
>>> rng = np.random.default_rng()
>>> min_len = 10007 # prime length is worst case for speed
>>> a = rng.standard_normal(min_len)
>>> b = fftpack.fft(a)
Zero-padding to the next 5-smooth length reduces computation time to
211 us, a speedup of 630 times:
>>> fftpack.next_fast_len(min_len)
10125
>>> b = fftpack.fft(a, 10125)
Rounding up to the next power of 2 is not optimal, taking 367 us to
compute, 1.7 times as long as the 5-smooth size:
>>> b = fftpack.fft(a, 16384)
"""
# Real transforms use regular sizes so this is backwards compatible
return _helper.good_size(target, True)
def _good_shape(x, shape, axes):
"""Ensure that shape argument is valid for scipy.fftpack
scipy.fftpack does not support len(shape) < x.ndim when axes is not given.
"""
if shape is not None and axes is None:
shape = _helper._iterable_of_int(shape, 'shape')
if len(shape) != np.ndim(x):
raise ValueError("when given, axes and shape arguments"
" have to be of the same length")
return shape
| bsd-3-clause | 74cb0333fc4267156f23d291818008ff | 28.946429 | 78 | 0.592725 | 3.41896 | false | false | false | false |
scipy/scipy | scipy/odr/_add_newdocs.py | 24 | 1090 | from numpy import add_newdoc
add_newdoc('scipy.odr', 'odr',
"""
odr(fcn, beta0, y, x, we=None, wd=None, fjacb=None, fjacd=None, extra_args=None, ifixx=None, ifixb=None, job=0, iprint=0, errfile=None, rptfile=None, ndigit=0, taufac=0.0, sstol=-1.0, partol=-1.0, maxit=-1, stpb=None, stpd=None, sclb=None, scld=None, work=None, iwork=None, full_output=0)
Low-level function for ODR.
See Also
--------
ODR : The ODR class gathers all information and coordinates the running of the main fitting routine.
Model : The Model class stores information about the function you wish to fit.
Data : The data to fit.
RealData : Data with weights as actual std. dev.s and/or covariances.
Notes
-----
This is a function performing the same operation as the `ODR`,
`Model`, and `Data` classes together. The parameters of this
function are explained in the class documentation.
""")
add_newdoc('scipy.odr.__odrpack', '_set_exceptions',
"""
_set_exceptions(odr_error, odr_stop)
Internal function: set exception classes.
""")
| bsd-3-clause | ea77bf3fbce1dc8f08fc4af1ae72f77b | 35.333333 | 292 | 0.673394 | 3.234421 | false | false | false | false |
scipy/scipy | benchmarks/benchmarks/common.py | 18 | 3954 | """
Airspeed Velocity benchmark utilities
"""
import sys
import os
import re
import time
import textwrap
import subprocess
import itertools
import random
class Benchmark:
"""
Base class with sensible options
"""
pass
def is_xslow():
try:
return int(os.environ.get('SCIPY_XSLOW', '0'))
except ValueError:
return False
class LimitedParamBenchmark(Benchmark):
"""
Limits parameter combinations to `max_number` choices, chosen
pseudo-randomly with fixed seed.
Raises NotImplementedError (skip) if not in active set.
"""
num_param_combinations = 0
def setup(self, *args, **kwargs):
slow = is_xslow()
if slow:
# no need to skip
return
param_seed = kwargs.pop('param_seed', None)
if param_seed is None:
param_seed = 1
params = kwargs.pop('params', None)
if params is None:
params = self.params
num_param_combinations = kwargs.pop('num_param_combinations', None)
if num_param_combinations is None:
num_param_combinations = self.num_param_combinations
all_choices = list(itertools.product(*params))
rng = random.Random(param_seed)
rng.shuffle(all_choices)
active_choices = all_choices[:num_param_combinations]
if args not in active_choices:
raise NotImplementedError("skipped")
def run_monitored(code):
"""
Run code in a new Python process, and monitor peak memory usage.
Returns
-------
duration : float
Duration in seconds (including Python startup time)
peak_memusage : float
Peak memory usage (rough estimate only) in bytes
"""
if not sys.platform.startswith('linux'):
raise RuntimeError("Peak memory monitoring only works on Linux")
code = textwrap.dedent(code)
process = subprocess.Popen([sys.executable, '-c', code])
peak_memusage = -1
start = time.time()
while True:
ret = process.poll()
if ret is not None:
break
with open('/proc/%d/status' % process.pid, 'r') as f:
procdata = f.read()
m = re.search(r'VmRSS:\s*(\d+)\s*kB', procdata, re.S | re.I)
if m is not None:
memusage = float(m.group(1)) * 1e3
peak_memusage = max(memusage, peak_memusage)
time.sleep(0.01)
process.wait()
duration = time.time() - start
if process.returncode != 0:
raise AssertionError("Running failed:\n%s" % code)
return duration, peak_memusage
def get_mem_info():
"""Get information about available memory"""
if not sys.platform.startswith('linux'):
raise RuntimeError("Memory information implemented only for Linux")
info = {}
with open('/proc/meminfo', 'r') as f:
for line in f:
p = line.split()
info[p[0].strip(':').lower()] = float(p[1]) * 1e3
return info
def set_mem_rlimit(max_mem=None):
"""
Set address space rlimit
"""
import resource
if max_mem is None:
mem_info = get_mem_info()
max_mem = int(mem_info['memtotal'] * 0.7)
cur_limit = resource.getrlimit(resource.RLIMIT_AS)
if cur_limit[0] > 0:
max_mem = min(max_mem, cur_limit[0])
resource.setrlimit(resource.RLIMIT_AS, (max_mem, cur_limit[1]))
def with_attributes(**attrs):
def decorator(func):
for key, value in attrs.items():
setattr(func, key, value)
return func
return decorator
class safe_import:
def __enter__(self):
self.error = False
return self
def __exit__(self, type_, value, traceback):
if type_ is not None:
self.error = True
suppress = not (
os.getenv('SCIPY_ALLOW_BENCH_IMPORT_ERRORS', '1').lower() in
('0', 'false') or not issubclass(type_, ImportError))
return suppress
| bsd-3-clause | bd3f207be3c0fd0fd689717cf8cb7b79 | 23.867925 | 76 | 0.595346 | 3.903258 | false | false | false | false |
scipy/scipy | scipy/stats/tests/test_mstats_extras.py | 10 | 6066 | import numpy as np
import numpy.ma as ma
import scipy.stats.mstats as ms
from numpy.testing import (assert_equal, assert_almost_equal, assert_,
assert_allclose)
def test_compare_medians_ms():
x = np.arange(7)
y = x + 10
assert_almost_equal(ms.compare_medians_ms(x, y), 0)
y2 = np.linspace(0, 1, num=10)
assert_almost_equal(ms.compare_medians_ms(x, y2), 0.017116406778)
def test_hdmedian():
# 1-D array
x = ma.arange(11)
assert_allclose(ms.hdmedian(x), 5, rtol=1e-14)
x.mask = ma.make_mask(x)
x.mask[:7] = False
assert_allclose(ms.hdmedian(x), 3, rtol=1e-14)
# Check that `var` keyword returns a value. TODO: check whether returned
# value is actually correct.
assert_(ms.hdmedian(x, var=True).size == 2)
# 2-D array
x2 = ma.arange(22).reshape((11, 2))
assert_allclose(ms.hdmedian(x2, axis=0), [10, 11])
x2.mask = ma.make_mask(x2)
x2.mask[:7, :] = False
assert_allclose(ms.hdmedian(x2, axis=0), [6, 7])
def test_rsh():
np.random.seed(132345)
x = np.random.randn(100)
res = ms.rsh(x)
# Just a sanity check that the code runs and output shape is correct.
# TODO: check that implementation is correct.
assert_(res.shape == x.shape)
# Check points keyword
res = ms.rsh(x, points=[0, 1.])
assert_(res.size == 2)
def test_mjci():
# Tests the Marits-Jarrett estimator
data = ma.array([77, 87, 88,114,151,210,219,246,253,262,
296,299,306,376,428,515,666,1310,2611])
assert_almost_equal(ms.mjci(data),[55.76819,45.84028,198.87875],5)
def test_trimmed_mean_ci():
# Tests the confidence intervals of the trimmed mean.
data = ma.array([545,555,558,572,575,576,578,580,
594,605,635,651,653,661,666])
assert_almost_equal(ms.trimmed_mean(data,0.2), 596.2, 1)
assert_equal(np.round(ms.trimmed_mean_ci(data,(0.2,0.2)),1),
[561.8, 630.6])
def test_idealfourths():
# Tests ideal-fourths
test = np.arange(100)
assert_almost_equal(np.asarray(ms.idealfourths(test)),
[24.416667,74.583333],6)
test_2D = test.repeat(3).reshape(-1,3)
assert_almost_equal(ms.idealfourths(test_2D, axis=0),
[[24.416667,24.416667,24.416667],
[74.583333,74.583333,74.583333]],6)
assert_almost_equal(ms.idealfourths(test_2D, axis=1),
test.repeat(2).reshape(-1,2))
test = [0, 0]
_result = ms.idealfourths(test)
assert_(np.isnan(_result).all())
class TestQuantiles:
data = [0.706560797,0.727229578,0.990399276,0.927065621,0.158953014,
0.887764025,0.239407086,0.349638551,0.972791145,0.149789972,
0.936947700,0.132359948,0.046041972,0.641675031,0.945530547,
0.224218684,0.771450991,0.820257774,0.336458052,0.589113496,
0.509736129,0.696838829,0.491323573,0.622767425,0.775189248,
0.641461450,0.118455200,0.773029450,0.319280007,0.752229111,
0.047841438,0.466295911,0.583850781,0.840581845,0.550086491,
0.466470062,0.504765074,0.226855960,0.362641207,0.891620942,
0.127898691,0.490094097,0.044882048,0.041441695,0.317976349,
0.504135618,0.567353033,0.434617473,0.636243375,0.231803616,
0.230154113,0.160011327,0.819464108,0.854706985,0.438809221,
0.487427267,0.786907310,0.408367937,0.405534192,0.250444460,
0.995309248,0.144389588,0.739947527,0.953543606,0.680051621,
0.388382017,0.863530727,0.006514031,0.118007779,0.924024803,
0.384236354,0.893687694,0.626534881,0.473051932,0.750134705,
0.241843555,0.432947602,0.689538104,0.136934797,0.150206859,
0.474335206,0.907775349,0.525869295,0.189184225,0.854284286,
0.831089744,0.251637345,0.587038213,0.254475554,0.237781276,
0.827928620,0.480283781,0.594514455,0.213641488,0.024194386,
0.536668589,0.699497811,0.892804071,0.093835427,0.731107772]
def test_hdquantiles(self):
data = self.data
assert_almost_equal(ms.hdquantiles(data,[0., 1.]),
[0.006514031, 0.995309248])
hdq = ms.hdquantiles(data,[0.25, 0.5, 0.75])
assert_almost_equal(hdq, [0.253210762, 0.512847491, 0.762232442,])
data = np.array(data).reshape(10,10)
hdq = ms.hdquantiles(data,[0.25,0.5,0.75],axis=0)
assert_almost_equal(hdq[:,0], ms.hdquantiles(data[:,0],[0.25,0.5,0.75]))
assert_almost_equal(hdq[:,-1], ms.hdquantiles(data[:,-1],[0.25,0.5,0.75]))
hdq = ms.hdquantiles(data,[0.25,0.5,0.75],axis=0,var=True)
assert_almost_equal(hdq[...,0],
ms.hdquantiles(data[:,0],[0.25,0.5,0.75],var=True))
assert_almost_equal(hdq[...,-1],
ms.hdquantiles(data[:,-1],[0.25,0.5,0.75], var=True))
def test_hdquantiles_sd(self):
# Standard deviation is a jackknife estimator, so we can check if
# the efficient version (hdquantiles_sd) matches a rudimentary,
# but clear version here.
hd_std_errs = ms.hdquantiles_sd(self.data)
# jacknnife standard error, Introduction to the Bootstrap Eq. 11.5
n = len(self.data)
jdata = np.broadcast_to(self.data, (n, n))
jselector = np.logical_not(np.eye(n)) # leave out one sample each row
jdata = jdata[jselector].reshape(n, n-1)
jdist = ms.hdquantiles(jdata, axis=1)
jdist_mean = np.mean(jdist, axis=0)
jstd = ((n-1)/n * np.sum((jdist - jdist_mean)**2, axis=0))**.5
assert_almost_equal(hd_std_errs, jstd)
# Test actual values for good measure
assert_almost_equal(hd_std_errs, [0.0379258, 0.0380656, 0.0380013])
two_data_points = ms.hdquantiles_sd([1, 2])
assert_almost_equal(two_data_points, [0.5, 0.5, 0.5])
def test_mquantiles_cimj(self):
# Only test that code runs, implementation not checked for correctness
ci_lower, ci_upper = ms.mquantiles_cimj(self.data)
assert_(ci_lower.size == ci_upper.size == 3)
| bsd-3-clause | a195ef98c572595e42eb79552bc6aee8 | 39.44 | 82 | 0.628586 | 2.564905 | false | true | false | false |
scipy/scipy | scipy/spatial/tests/test_qhull.py | 9 | 44147 | import os
import copy
import numpy as np
from numpy.testing import (assert_equal, assert_almost_equal,
assert_, assert_allclose, assert_array_equal)
import pytest
from pytest import raises as assert_raises
import scipy.spatial._qhull as qhull
from scipy.spatial import cKDTree as KDTree
from scipy.spatial import Voronoi
import itertools
def sorted_tuple(x):
return tuple(sorted(x))
def sorted_unique_tuple(x):
return tuple(np.unique(x))
def assert_unordered_tuple_list_equal(a, b, tpl=tuple):
if isinstance(a, np.ndarray):
a = a.tolist()
if isinstance(b, np.ndarray):
b = b.tolist()
a = list(map(tpl, a))
a.sort()
b = list(map(tpl, b))
b.sort()
assert_equal(a, b)
np.random.seed(1234)
points = [(0,0), (0,1), (1,0), (1,1), (0.5, 0.5), (0.5, 1.5)]
pathological_data_1 = np.array([
[-3.14,-3.14], [-3.14,-2.36], [-3.14,-1.57], [-3.14,-0.79],
[-3.14,0.0], [-3.14,0.79], [-3.14,1.57], [-3.14,2.36],
[-3.14,3.14], [-2.36,-3.14], [-2.36,-2.36], [-2.36,-1.57],
[-2.36,-0.79], [-2.36,0.0], [-2.36,0.79], [-2.36,1.57],
[-2.36,2.36], [-2.36,3.14], [-1.57,-0.79], [-1.57,0.79],
[-1.57,-1.57], [-1.57,0.0], [-1.57,1.57], [-1.57,-3.14],
[-1.57,-2.36], [-1.57,2.36], [-1.57,3.14], [-0.79,-1.57],
[-0.79,1.57], [-0.79,-3.14], [-0.79,-2.36], [-0.79,-0.79],
[-0.79,0.0], [-0.79,0.79], [-0.79,2.36], [-0.79,3.14],
[0.0,-3.14], [0.0,-2.36], [0.0,-1.57], [0.0,-0.79], [0.0,0.0],
[0.0,0.79], [0.0,1.57], [0.0,2.36], [0.0,3.14], [0.79,-3.14],
[0.79,-2.36], [0.79,-0.79], [0.79,0.0], [0.79,0.79],
[0.79,2.36], [0.79,3.14], [0.79,-1.57], [0.79,1.57],
[1.57,-3.14], [1.57,-2.36], [1.57,2.36], [1.57,3.14],
[1.57,-1.57], [1.57,0.0], [1.57,1.57], [1.57,-0.79],
[1.57,0.79], [2.36,-3.14], [2.36,-2.36], [2.36,-1.57],
[2.36,-0.79], [2.36,0.0], [2.36,0.79], [2.36,1.57],
[2.36,2.36], [2.36,3.14], [3.14,-3.14], [3.14,-2.36],
[3.14,-1.57], [3.14,-0.79], [3.14,0.0], [3.14,0.79],
[3.14,1.57], [3.14,2.36], [3.14,3.14],
])
pathological_data_2 = np.array([
[-1, -1], [-1, 0], [-1, 1],
[0, -1], [0, 0], [0, 1],
[1, -1 - np.finfo(np.float_).eps], [1, 0], [1, 1],
])
bug_2850_chunks = [np.random.rand(10, 2),
np.array([[0,0], [0,1], [1,0], [1,1]]) # add corners
]
# same with some additional chunks
bug_2850_chunks_2 = (bug_2850_chunks +
[np.random.rand(10, 2),
0.25 + np.array([[0,0], [0,1], [1,0], [1,1]])])
DATASETS = {
'some-points': np.asarray(points),
'random-2d': np.random.rand(30, 2),
'random-3d': np.random.rand(30, 3),
'random-4d': np.random.rand(30, 4),
'random-5d': np.random.rand(30, 5),
'random-6d': np.random.rand(10, 6),
'random-7d': np.random.rand(10, 7),
'random-8d': np.random.rand(10, 8),
'pathological-1': pathological_data_1,
'pathological-2': pathological_data_2
}
INCREMENTAL_DATASETS = {
'bug-2850': (bug_2850_chunks, None),
'bug-2850-2': (bug_2850_chunks_2, None),
}
def _add_inc_data(name, chunksize):
"""
Generate incremental datasets from basic data sets
"""
points = DATASETS[name]
ndim = points.shape[1]
opts = None
nmin = ndim + 2
if name == 'some-points':
# since Qz is not allowed, use QJ
opts = 'QJ Pp'
elif name == 'pathological-1':
# include enough points so that we get different x-coordinates
nmin = 12
chunks = [points[:nmin]]
for j in range(nmin, len(points), chunksize):
chunks.append(points[j:j+chunksize])
new_name = "%s-chunk-%d" % (name, chunksize)
assert new_name not in INCREMENTAL_DATASETS
INCREMENTAL_DATASETS[new_name] = (chunks, opts)
for name in DATASETS:
for chunksize in 1, 4, 16:
_add_inc_data(name, chunksize)
class Test_Qhull:
def test_swapping(self):
# Check that Qhull state swapping works
x = qhull._Qhull(b'v',
np.array([[0,0],[0,1],[1,0],[1,1.],[0.5,0.5]]),
b'Qz')
xd = copy.deepcopy(x.get_voronoi_diagram())
y = qhull._Qhull(b'v',
np.array([[0,0],[0,1],[1,0],[1,2.]]),
b'Qz')
yd = copy.deepcopy(y.get_voronoi_diagram())
xd2 = copy.deepcopy(x.get_voronoi_diagram())
x.close()
yd2 = copy.deepcopy(y.get_voronoi_diagram())
y.close()
assert_raises(RuntimeError, x.get_voronoi_diagram)
assert_raises(RuntimeError, y.get_voronoi_diagram)
assert_allclose(xd[0], xd2[0])
assert_unordered_tuple_list_equal(xd[1], xd2[1], tpl=sorted_tuple)
assert_unordered_tuple_list_equal(xd[2], xd2[2], tpl=sorted_tuple)
assert_unordered_tuple_list_equal(xd[3], xd2[3], tpl=sorted_tuple)
assert_array_equal(xd[4], xd2[4])
assert_allclose(yd[0], yd2[0])
assert_unordered_tuple_list_equal(yd[1], yd2[1], tpl=sorted_tuple)
assert_unordered_tuple_list_equal(yd[2], yd2[2], tpl=sorted_tuple)
assert_unordered_tuple_list_equal(yd[3], yd2[3], tpl=sorted_tuple)
assert_array_equal(yd[4], yd2[4])
x.close()
assert_raises(RuntimeError, x.get_voronoi_diagram)
y.close()
assert_raises(RuntimeError, y.get_voronoi_diagram)
def test_issue_8051(self):
points = np.array([[0, 0], [0, 1], [0, 2], [1, 0], [1, 1], [1, 2],[2, 0], [2, 1], [2, 2]])
Voronoi(points)
class TestUtilities:
"""
Check that utility functions work.
"""
def test_find_simplex(self):
# Simple check that simplex finding works
points = np.array([(0,0), (0,1), (1,1), (1,0)], dtype=np.double)
tri = qhull.Delaunay(points)
# +---+
# |\ 0|
# | \ |
# |1 \|
# +---+
assert_equal(tri.simplices, [[1, 3, 2], [3, 1, 0]])
for p in [(0.25, 0.25, 1),
(0.75, 0.75, 0),
(0.3, 0.2, 1)]:
i = tri.find_simplex(p[:2])
assert_equal(i, p[2], err_msg='%r' % (p,))
j = qhull.tsearch(tri, p[:2])
assert_equal(i, j)
def test_plane_distance(self):
# Compare plane distance from hyperplane equations obtained from Qhull
# to manually computed plane equations
x = np.array([(0,0), (1, 1), (1, 0), (0.99189033, 0.37674127),
(0.99440079, 0.45182168)], dtype=np.double)
p = np.array([0.99966555, 0.15685619], dtype=np.double)
tri = qhull.Delaunay(x)
z = tri.lift_points(x)
pz = tri.lift_points(p)
dist = tri.plane_distance(p)
for j, v in enumerate(tri.simplices):
x1 = z[v[0]]
x2 = z[v[1]]
x3 = z[v[2]]
n = np.cross(x1 - x3, x2 - x3)
n /= np.sqrt(np.dot(n, n))
n *= -np.sign(n[2])
d = np.dot(n, pz - x3)
assert_almost_equal(dist[j], d)
def test_convex_hull(self):
# Simple check that the convex hull seems to works
points = np.array([(0,0), (0,1), (1,1), (1,0)], dtype=np.double)
tri = qhull.Delaunay(points)
# +---+
# |\ 0|
# | \ |
# |1 \|
# +---+
assert_equal(tri.convex_hull, [[3, 2], [1, 2], [1, 0], [3, 0]])
def test_volume_area(self):
#Basic check that we get back the correct volume and area for a cube
points = np.array([(0, 0, 0), (0, 1, 0), (1, 0, 0), (1, 1, 0),
(0, 0, 1), (0, 1, 1), (1, 0, 1), (1, 1, 1)])
hull = qhull.ConvexHull(points)
assert_allclose(hull.volume, 1., rtol=1e-14,
err_msg="Volume of cube is incorrect")
assert_allclose(hull.area, 6., rtol=1e-14,
err_msg="Area of cube is incorrect")
def test_random_volume_area(self):
#Test that the results for a random 10-point convex are
#coherent with the output of qconvex Qt s FA
points = np.array([(0.362568364506, 0.472712355305, 0.347003084477),
(0.733731893414, 0.634480295684, 0.950513180209),
(0.511239955611, 0.876839441267, 0.418047827863),
(0.0765906233393, 0.527373281342, 0.6509863541),
(0.146694972056, 0.596725793348, 0.894860986685),
(0.513808585741, 0.069576205858, 0.530890338876),
(0.512343805118, 0.663537132612, 0.037689295973),
(0.47282965018, 0.462176697655, 0.14061843691),
(0.240584597123, 0.778660020591, 0.722913476339),
(0.951271745935, 0.967000673944, 0.890661319684)])
hull = qhull.ConvexHull(points)
assert_allclose(hull.volume, 0.14562013, rtol=1e-07,
err_msg="Volume of random polyhedron is incorrect")
assert_allclose(hull.area, 1.6670425, rtol=1e-07,
err_msg="Area of random polyhedron is incorrect")
def test_incremental_volume_area_random_input(self):
"""Test that incremental mode gives the same volume/area as
non-incremental mode and incremental mode with restart"""
nr_points = 20
dim = 3
points = np.random.random((nr_points, dim))
inc_hull = qhull.ConvexHull(points[:dim+1, :], incremental=True)
inc_restart_hull = qhull.ConvexHull(points[:dim+1, :], incremental=True)
for i in range(dim+1, nr_points):
hull = qhull.ConvexHull(points[:i+1, :])
inc_hull.add_points(points[i:i+1, :])
inc_restart_hull.add_points(points[i:i+1, :], restart=True)
assert_allclose(hull.volume, inc_hull.volume, rtol=1e-7)
assert_allclose(hull.volume, inc_restart_hull.volume, rtol=1e-7)
assert_allclose(hull.area, inc_hull.area, rtol=1e-7)
assert_allclose(hull.area, inc_restart_hull.area, rtol=1e-7)
def _check_barycentric_transforms(self, tri, err_msg="",
unit_cube=False,
unit_cube_tol=0):
"""Check that a triangulation has reasonable barycentric transforms"""
vertices = tri.points[tri.simplices]
sc = 1/(tri.ndim + 1.0)
centroids = vertices.sum(axis=1) * sc
# Either: (i) the simplex has a `nan` barycentric transform,
# or, (ii) the centroid is in the simplex
def barycentric_transform(tr, x):
r = tr[:,-1,:]
Tinv = tr[:,:-1,:]
return np.einsum('ijk,ik->ij', Tinv, x - r)
eps = np.finfo(float).eps
c = barycentric_transform(tri.transform, centroids)
with np.errstate(invalid="ignore"):
ok = np.isnan(c).all(axis=1) | (abs(c - sc)/sc < 0.1).all(axis=1)
assert_(ok.all(), "%s %s" % (err_msg, np.nonzero(~ok)))
# Invalid simplices must be (nearly) zero volume
q = vertices[:,:-1,:] - vertices[:,-1,None,:]
volume = np.array([np.linalg.det(q[k,:,:])
for k in range(tri.nsimplex)])
ok = np.isfinite(tri.transform[:,0,0]) | (volume < np.sqrt(eps))
assert_(ok.all(), "%s %s" % (err_msg, np.nonzero(~ok)))
# Also, find_simplex for the centroid should end up in some
# simplex for the non-degenerate cases
j = tri.find_simplex(centroids)
ok = (j != -1) | np.isnan(tri.transform[:,0,0])
assert_(ok.all(), "%s %s" % (err_msg, np.nonzero(~ok)))
if unit_cube:
# If in unit cube, no interior point should be marked out of hull
at_boundary = (centroids <= unit_cube_tol).any(axis=1)
at_boundary |= (centroids >= 1 - unit_cube_tol).any(axis=1)
ok = (j != -1) | at_boundary
assert_(ok.all(), "%s %s" % (err_msg, np.nonzero(~ok)))
def test_degenerate_barycentric_transforms(self):
# The triangulation should not produce invalid barycentric
# transforms that stump the simplex finding
data = np.load(os.path.join(os.path.dirname(__file__), 'data',
'degenerate_pointset.npz'))
points = data['c']
data.close()
tri = qhull.Delaunay(points)
# Check that there are not too many invalid simplices
bad_count = np.isnan(tri.transform[:,0,0]).sum()
assert_(bad_count < 23, bad_count)
# Check the transforms
self._check_barycentric_transforms(tri)
@pytest.mark.slow
def test_more_barycentric_transforms(self):
# Triangulate some "nasty" grids
eps = np.finfo(float).eps
npoints = {2: 70, 3: 11, 4: 5, 5: 3}
for ndim in range(2, 6):
# Generate an uniform grid in n-d unit cube
x = np.linspace(0, 1, npoints[ndim])
grid = np.c_[list(map(np.ravel, np.broadcast_arrays(*np.ix_(*([x]*ndim)))))].T
err_msg = "ndim=%d" % ndim
# Check using regular grid
tri = qhull.Delaunay(grid)
self._check_barycentric_transforms(tri, err_msg=err_msg,
unit_cube=True)
# Check with eps-perturbations
np.random.seed(1234)
m = (np.random.rand(grid.shape[0]) < 0.2)
grid[m,:] += 2*eps*(np.random.rand(*grid[m,:].shape) - 0.5)
tri = qhull.Delaunay(grid)
self._check_barycentric_transforms(tri, err_msg=err_msg,
unit_cube=True,
unit_cube_tol=2*eps)
# Check with duplicated data
tri = qhull.Delaunay(np.r_[grid, grid])
self._check_barycentric_transforms(tri, err_msg=err_msg,
unit_cube=True,
unit_cube_tol=2*eps)
class TestVertexNeighborVertices:
def _check(self, tri):
expected = [set() for j in range(tri.points.shape[0])]
for s in tri.simplices:
for a in s:
for b in s:
if a != b:
expected[a].add(b)
indptr, indices = tri.vertex_neighbor_vertices
got = [set(map(int, indices[indptr[j]:indptr[j+1]]))
for j in range(tri.points.shape[0])]
assert_equal(got, expected, err_msg="%r != %r" % (got, expected))
def test_triangle(self):
points = np.array([(0,0), (0,1), (1,0)], dtype=np.double)
tri = qhull.Delaunay(points)
self._check(tri)
def test_rectangle(self):
points = np.array([(0,0), (0,1), (1,1), (1,0)], dtype=np.double)
tri = qhull.Delaunay(points)
self._check(tri)
def test_complicated(self):
points = np.array([(0,0), (0,1), (1,1), (1,0),
(0.5, 0.5), (0.9, 0.5)], dtype=np.double)
tri = qhull.Delaunay(points)
self._check(tri)
class TestDelaunay:
"""
Check that triangulation works.
"""
def test_masked_array_fails(self):
masked_array = np.ma.masked_all(1)
assert_raises(ValueError, qhull.Delaunay, masked_array)
def test_array_with_nans_fails(self):
points_with_nan = np.array([(0,0), (0,1), (1,1), (1,np.nan)], dtype=np.double)
assert_raises(ValueError, qhull.Delaunay, points_with_nan)
def test_nd_simplex(self):
# simple smoke test: triangulate a n-dimensional simplex
for nd in range(2, 8):
points = np.zeros((nd+1, nd))
for j in range(nd):
points[j,j] = 1.0
points[-1,:] = 1.0
tri = qhull.Delaunay(points)
tri.simplices.sort()
assert_equal(tri.simplices, np.arange(nd+1, dtype=int)[None, :])
assert_equal(tri.neighbors, -1 + np.zeros((nd+1), dtype=int)[None,:])
def test_2d_square(self):
# simple smoke test: 2d square
points = np.array([(0,0), (0,1), (1,1), (1,0)], dtype=np.double)
tri = qhull.Delaunay(points)
assert_equal(tri.simplices, [[1, 3, 2], [3, 1, 0]])
assert_equal(tri.neighbors, [[-1, -1, 1], [-1, -1, 0]])
def test_duplicate_points(self):
x = np.array([0, 1, 0, 1], dtype=np.float64)
y = np.array([0, 0, 1, 1], dtype=np.float64)
xp = np.r_[x, x]
yp = np.r_[y, y]
# shouldn't fail on duplicate points
qhull.Delaunay(np.c_[x, y])
qhull.Delaunay(np.c_[xp, yp])
def test_pathological(self):
# both should succeed
points = DATASETS['pathological-1']
tri = qhull.Delaunay(points)
assert_equal(tri.points[tri.simplices].max(), points.max())
assert_equal(tri.points[tri.simplices].min(), points.min())
points = DATASETS['pathological-2']
tri = qhull.Delaunay(points)
assert_equal(tri.points[tri.simplices].max(), points.max())
assert_equal(tri.points[tri.simplices].min(), points.min())
def test_joggle(self):
# Check that the option QJ indeed guarantees that all input points
# occur as vertices of the triangulation
points = np.random.rand(10, 2)
points = np.r_[points, points] # duplicate input data
tri = qhull.Delaunay(points, qhull_options="QJ Qbb Pp")
assert_array_equal(np.unique(tri.simplices.ravel()),
np.arange(len(points)))
def test_coplanar(self):
# Check that the coplanar point output option indeed works
points = np.random.rand(10, 2)
points = np.r_[points, points] # duplicate input data
tri = qhull.Delaunay(points)
assert_(len(np.unique(tri.simplices.ravel())) == len(points)//2)
assert_(len(tri.coplanar) == len(points)//2)
assert_(len(np.unique(tri.coplanar[:,2])) == len(points)//2)
assert_(np.all(tri.vertex_to_simplex >= 0))
def test_furthest_site(self):
points = [(0, 0), (0, 1), (1, 0), (0.5, 0.5), (1.1, 1.1)]
tri = qhull.Delaunay(points, furthest_site=True)
expected = np.array([(1, 4, 0), (4, 2, 0)]) # from Qhull
assert_array_equal(tri.simplices, expected)
@pytest.mark.parametrize("name", sorted(INCREMENTAL_DATASETS))
def test_incremental(self, name):
# Test incremental construction of the triangulation
chunks, opts = INCREMENTAL_DATASETS[name]
points = np.concatenate(chunks, axis=0)
obj = qhull.Delaunay(chunks[0], incremental=True,
qhull_options=opts)
for chunk in chunks[1:]:
obj.add_points(chunk)
obj2 = qhull.Delaunay(points)
obj3 = qhull.Delaunay(chunks[0], incremental=True,
qhull_options=opts)
if len(chunks) > 1:
obj3.add_points(np.concatenate(chunks[1:], axis=0),
restart=True)
# Check that the incremental mode agrees with upfront mode
if name.startswith('pathological'):
# XXX: These produce valid but different triangulations.
# They look OK when plotted, but how to check them?
assert_array_equal(np.unique(obj.simplices.ravel()),
np.arange(points.shape[0]))
assert_array_equal(np.unique(obj2.simplices.ravel()),
np.arange(points.shape[0]))
else:
assert_unordered_tuple_list_equal(obj.simplices, obj2.simplices,
tpl=sorted_tuple)
assert_unordered_tuple_list_equal(obj2.simplices, obj3.simplices,
tpl=sorted_tuple)
def test_vertices_deprecation(self):
tri = qhull.Delaunay([(0, 0), (0, 1), (1, 0)])
msg = ("Delaunay attribute 'vertices' is deprecated in favour of "
"'simplices' and will be removed in Scipy 1.11.0.")
with pytest.warns(DeprecationWarning, match=msg):
tri.vertices
def assert_hulls_equal(points, facets_1, facets_2):
# Check that two convex hulls constructed from the same point set
# are equal
facets_1 = set(map(sorted_tuple, facets_1))
facets_2 = set(map(sorted_tuple, facets_2))
if facets_1 != facets_2 and points.shape[1] == 2:
# The direct check fails for the pathological cases
# --- then the convex hull from Delaunay differs (due
# to rounding error etc.) from the hull computed
# otherwise, by the question whether (tricoplanar)
# points that lie almost exactly on the hull are
# included as vertices of the hull or not.
#
# So we check the result, and accept it if the Delaunay
# hull line segments are a subset of the usual hull.
eps = 1000 * np.finfo(float).eps
for a, b in facets_1:
for ap, bp in facets_2:
t = points[bp] - points[ap]
t /= np.linalg.norm(t) # tangent
n = np.array([-t[1], t[0]]) # normal
# check that the two line segments are parallel
# to the same line
c1 = np.dot(n, points[b] - points[ap])
c2 = np.dot(n, points[a] - points[ap])
if not np.allclose(np.dot(c1, n), 0):
continue
if not np.allclose(np.dot(c2, n), 0):
continue
# Check that the segment (a, b) is contained in (ap, bp)
c1 = np.dot(t, points[a] - points[ap])
c2 = np.dot(t, points[b] - points[ap])
c3 = np.dot(t, points[bp] - points[ap])
if c1 < -eps or c1 > c3 + eps:
continue
if c2 < -eps or c2 > c3 + eps:
continue
# OK:
break
else:
raise AssertionError("comparison fails")
# it was OK
return
assert_equal(facets_1, facets_2)
class TestConvexHull:
def test_masked_array_fails(self):
masked_array = np.ma.masked_all(1)
assert_raises(ValueError, qhull.ConvexHull, masked_array)
def test_array_with_nans_fails(self):
points_with_nan = np.array([(0,0), (1,1), (2,np.nan)], dtype=np.double)
assert_raises(ValueError, qhull.ConvexHull, points_with_nan)
@pytest.mark.parametrize("name", sorted(DATASETS))
def test_hull_consistency_tri(self, name):
# Check that a convex hull returned by qhull in ndim
# and the hull constructed from ndim delaunay agree
points = DATASETS[name]
tri = qhull.Delaunay(points)
hull = qhull.ConvexHull(points)
assert_hulls_equal(points, tri.convex_hull, hull.simplices)
# Check that the hull extremes are as expected
if points.shape[1] == 2:
assert_equal(np.unique(hull.simplices), np.sort(hull.vertices))
else:
assert_equal(np.unique(hull.simplices), hull.vertices)
@pytest.mark.parametrize("name", sorted(INCREMENTAL_DATASETS))
def test_incremental(self, name):
# Test incremental construction of the convex hull
chunks, _ = INCREMENTAL_DATASETS[name]
points = np.concatenate(chunks, axis=0)
obj = qhull.ConvexHull(chunks[0], incremental=True)
for chunk in chunks[1:]:
obj.add_points(chunk)
obj2 = qhull.ConvexHull(points)
obj3 = qhull.ConvexHull(chunks[0], incremental=True)
if len(chunks) > 1:
obj3.add_points(np.concatenate(chunks[1:], axis=0),
restart=True)
# Check that the incremental mode agrees with upfront mode
assert_hulls_equal(points, obj.simplices, obj2.simplices)
assert_hulls_equal(points, obj.simplices, obj3.simplices)
def test_vertices_2d(self):
# The vertices should be in counterclockwise order in 2-D
np.random.seed(1234)
points = np.random.rand(30, 2)
hull = qhull.ConvexHull(points)
assert_equal(np.unique(hull.simplices), np.sort(hull.vertices))
# Check counterclockwiseness
x, y = hull.points[hull.vertices].T
angle = np.arctan2(y - y.mean(), x - x.mean())
assert_(np.all(np.diff(np.unwrap(angle)) > 0))
def test_volume_area(self):
# Basic check that we get back the correct volume and area for a cube
points = np.array([(0, 0, 0), (0, 1, 0), (1, 0, 0), (1, 1, 0),
(0, 0, 1), (0, 1, 1), (1, 0, 1), (1, 1, 1)])
tri = qhull.ConvexHull(points)
assert_allclose(tri.volume, 1., rtol=1e-14)
assert_allclose(tri.area, 6., rtol=1e-14)
@pytest.mark.parametrize("incremental", [False, True])
def test_good2d(self, incremental):
# Make sure the QGn option gives the correct value of "good".
points = np.array([[0.2, 0.2],
[0.2, 0.4],
[0.4, 0.4],
[0.4, 0.2],
[0.3, 0.6]])
hull = qhull.ConvexHull(points=points,
incremental=incremental,
qhull_options='QG4')
expected = np.array([False, True, False, False], dtype=bool)
actual = hull.good
assert_equal(actual, expected)
@pytest.mark.parametrize("visibility", [
"QG4", # visible=True
"QG-4", # visible=False
])
@pytest.mark.parametrize("new_gen, expected", [
# add generator that places QG4 inside hull
# so all facets are invisible
(np.array([[0.3, 0.7]]),
np.array([False, False, False, False, False], dtype=bool)),
# adding a generator on the opposite side of the square
# should preserve the single visible facet & add one invisible
# facet
(np.array([[0.3, -0.7]]),
np.array([False, True, False, False, False], dtype=bool)),
# split the visible facet on top of the square into two
# visible facets, with visibility at the end of the array
# because add_points concatenates
(np.array([[0.3, 0.41]]),
np.array([False, False, False, True, True], dtype=bool)),
# with our current Qhull options, coplanarity will not count
# for visibility; this case shifts one visible & one invisible
# facet & adds a coplanar facet
# simplex at index position 2 is the shifted visible facet
# the final simplex is the coplanar facet
(np.array([[0.5, 0.6], [0.6, 0.6]]),
np.array([False, False, True, False, False], dtype=bool)),
# place the new generator such that it envelops the query
# point within the convex hull, but only just barely within
# the double precision limit
# NOTE: testing exact degeneracy is less predictable than this
# scenario, perhaps because of the default Qt option we have
# enabled for Qhull to handle precision matters
(np.array([[0.3, 0.6 + 1e-16]]),
np.array([False, False, False, False, False], dtype=bool)),
])
def test_good2d_incremental_changes(self, new_gen, expected,
visibility):
# use the usual square convex hull
# generators from test_good2d
points = np.array([[0.2, 0.2],
[0.2, 0.4],
[0.4, 0.4],
[0.4, 0.2],
[0.3, 0.6]])
hull = qhull.ConvexHull(points=points,
incremental=True,
qhull_options=visibility)
hull.add_points(new_gen)
actual = hull.good
if '-' in visibility:
expected = np.invert(expected)
assert_equal(actual, expected)
@pytest.mark.parametrize("incremental", [False, True])
def test_good2d_no_option(self, incremental):
# handle case where good attribue doesn't exist
# because Qgn or Qg-n wasn't specified
points = np.array([[0.2, 0.2],
[0.2, 0.4],
[0.4, 0.4],
[0.4, 0.2],
[0.3, 0.6]])
hull = qhull.ConvexHull(points=points,
incremental=incremental)
actual = hull.good
assert actual is None
# preserve None after incremental addition
if incremental:
hull.add_points(np.zeros((1, 2)))
actual = hull.good
assert actual is None
@pytest.mark.parametrize("incremental", [False, True])
def test_good2d_inside(self, incremental):
# Make sure the QGn option gives the correct value of "good".
# When point n is inside the convex hull of the rest, good is
# all False.
points = np.array([[0.2, 0.2],
[0.2, 0.4],
[0.4, 0.4],
[0.4, 0.2],
[0.3, 0.3]])
hull = qhull.ConvexHull(points=points,
incremental=incremental,
qhull_options='QG4')
expected = np.array([False, False, False, False], dtype=bool)
actual = hull.good
assert_equal(actual, expected)
@pytest.mark.parametrize("incremental", [False, True])
def test_good3d(self, incremental):
# Make sure the QGn option gives the correct value of "good"
# for a 3d figure
points = np.array([[0.0, 0.0, 0.0],
[0.90029516, -0.39187448, 0.18948093],
[0.48676420, -0.72627633, 0.48536925],
[0.57651530, -0.81179274, -0.09285832],
[0.67846893, -0.71119562, 0.18406710]])
hull = qhull.ConvexHull(points=points,
incremental=incremental,
qhull_options='QG0')
expected = np.array([True, False, False, False], dtype=bool)
assert_equal(hull.good, expected)
class TestVoronoi:
@pytest.mark.parametrize("qhull_opts, extra_pts", [
# option Qz (default for SciPy) will add
# an extra point at infinity
("Qbb Qc Qz", 1),
("Qbb Qc", 0),
])
@pytest.mark.parametrize("n_pts", [50, 100])
@pytest.mark.parametrize("ndim", [2, 3])
def test_point_region_structure(self,
qhull_opts,
n_pts,
extra_pts,
ndim):
# see gh-16773
rng = np.random.default_rng(7790)
points = rng.random((n_pts, ndim))
vor = Voronoi(points, qhull_options=qhull_opts)
pt_region = vor.point_region
assert pt_region.max() == n_pts - 1 + extra_pts
assert pt_region.size == len(vor.regions) - extra_pts
assert len(vor.regions) == n_pts + extra_pts
assert vor.points.shape[0] == n_pts
# if there is an empty sublist in the Voronoi
# regions data structure, it should never be
# indexed because it corresponds to an internally
# added point at infinity and is not a member of the
# generators (input points)
if extra_pts:
sublens = [len(x) for x in vor.regions]
# only one point at infinity (empty region)
# is allowed
assert sublens.count(0) == 1
assert sublens.index(0) not in pt_region
def test_masked_array_fails(self):
masked_array = np.ma.masked_all(1)
assert_raises(ValueError, qhull.Voronoi, masked_array)
def test_simple(self):
# Simple case with known Voronoi diagram
points = [(0, 0), (0, 1), (0, 2),
(1, 0), (1, 1), (1, 2),
(2, 0), (2, 1), (2, 2)]
# qhull v o Fv Qbb Qc Qz < dat
output = """
2
5 10 1
-10.101 -10.101
0.5 0.5
0.5 1.5
1.5 0.5
1.5 1.5
2 0 1
3 2 0 1
2 0 2
3 3 0 1
4 1 2 4 3
3 4 0 2
2 0 3
3 4 0 3
2 0 4
0
12
4 0 3 0 1
4 0 1 0 1
4 1 4 1 2
4 1 2 0 2
4 2 5 0 2
4 3 4 1 3
4 3 6 0 3
4 4 5 2 4
4 4 7 3 4
4 5 8 0 4
4 6 7 0 3
4 7 8 0 4
"""
self._compare_qvoronoi(points, output)
def _compare_qvoronoi(self, points, output, **kw):
"""Compare to output from 'qvoronoi o Fv < data' to Voronoi()"""
# Parse output
output = [list(map(float, x.split())) for x in output.strip().splitlines()]
nvertex = int(output[1][0])
vertices = list(map(tuple, output[3:2+nvertex])) # exclude inf
nregion = int(output[1][1])
regions = [[int(y)-1 for y in x[1:]]
for x in output[2+nvertex:2+nvertex+nregion]]
ridge_points = [[int(y) for y in x[1:3]]
for x in output[3+nvertex+nregion:]]
ridge_vertices = [[int(y)-1 for y in x[3:]]
for x in output[3+nvertex+nregion:]]
# Compare results
vor = qhull.Voronoi(points, **kw)
def sorttuple(x):
return tuple(sorted(x))
assert_allclose(vor.vertices, vertices)
assert_equal(set(map(tuple, vor.regions)),
set(map(tuple, regions)))
p1 = list(zip(list(map(sorttuple, ridge_points)), list(map(sorttuple, ridge_vertices))))
p2 = list(zip(list(map(sorttuple, vor.ridge_points.tolist())),
list(map(sorttuple, vor.ridge_vertices))))
p1.sort()
p2.sort()
assert_equal(p1, p2)
@pytest.mark.parametrize("name", sorted(DATASETS))
def test_ridges(self, name):
# Check that the ridges computed by Voronoi indeed separate
# the regions of nearest neighborhood, by comparing the result
# to KDTree.
points = DATASETS[name]
tree = KDTree(points)
vor = qhull.Voronoi(points)
for p, v in vor.ridge_dict.items():
# consider only finite ridges
if not np.all(np.asarray(v) >= 0):
continue
ridge_midpoint = vor.vertices[v].mean(axis=0)
d = 1e-6 * (points[p[0]] - ridge_midpoint)
dist, k = tree.query(ridge_midpoint + d, k=1)
assert_equal(k, p[0])
dist, k = tree.query(ridge_midpoint - d, k=1)
assert_equal(k, p[1])
def test_furthest_site(self):
points = [(0, 0), (0, 1), (1, 0), (0.5, 0.5), (1.1, 1.1)]
# qhull v o Fv Qbb Qc Qu < dat
output = """
2
3 5 1
-10.101 -10.101
0.6000000000000001 0.5
0.5 0.6000000000000001
3 0 2 1
2 0 1
2 0 2
0
3 0 2 1
5
4 0 2 0 2
4 0 4 1 2
4 0 1 0 1
4 1 4 0 1
4 2 4 0 2
"""
self._compare_qvoronoi(points, output, furthest_site=True)
def test_furthest_site_flag(self):
points = [(0, 0), (0, 1), (1, 0), (0.5, 0.5), (1.1, 1.1)]
vor = Voronoi(points)
assert_equal(vor.furthest_site,False)
vor = Voronoi(points,furthest_site=True)
assert_equal(vor.furthest_site,True)
@pytest.mark.parametrize("name", sorted(INCREMENTAL_DATASETS))
def test_incremental(self, name):
# Test incremental construction of the triangulation
if INCREMENTAL_DATASETS[name][0][0].shape[1] > 3:
# too slow (testing of the result --- qhull is still fast)
return
chunks, opts = INCREMENTAL_DATASETS[name]
points = np.concatenate(chunks, axis=0)
obj = qhull.Voronoi(chunks[0], incremental=True,
qhull_options=opts)
for chunk in chunks[1:]:
obj.add_points(chunk)
obj2 = qhull.Voronoi(points)
obj3 = qhull.Voronoi(chunks[0], incremental=True,
qhull_options=opts)
if len(chunks) > 1:
obj3.add_points(np.concatenate(chunks[1:], axis=0),
restart=True)
# -- Check that the incremental mode agrees with upfront mode
assert_equal(len(obj.point_region), len(obj2.point_region))
assert_equal(len(obj.point_region), len(obj3.point_region))
# The vertices may be in different order or duplicated in
# the incremental map
for objx in obj, obj3:
vertex_map = {-1: -1}
for i, v in enumerate(objx.vertices):
for j, v2 in enumerate(obj2.vertices):
if np.allclose(v, v2):
vertex_map[i] = j
def remap(x):
if hasattr(x, '__len__'):
return tuple(set([remap(y) for y in x]))
try:
return vertex_map[x]
except KeyError as e:
raise AssertionError("incremental result has spurious vertex at %r"
% (objx.vertices[x],)) from e
def simplified(x):
items = set(map(sorted_tuple, x))
if () in items:
items.remove(())
items = [x for x in items if len(x) > 1]
items.sort()
return items
assert_equal(
simplified(remap(objx.regions)),
simplified(obj2.regions)
)
assert_equal(
simplified(remap(objx.ridge_vertices)),
simplified(obj2.ridge_vertices)
)
# XXX: compare ridge_points --- not clear exactly how to do this
class Test_HalfspaceIntersection:
def assert_unordered_allclose(self, arr1, arr2, rtol=1e-7):
"""Check that every line in arr1 is only once in arr2"""
assert_equal(arr1.shape, arr2.shape)
truths = np.zeros((arr1.shape[0],), dtype=bool)
for l1 in arr1:
indexes = np.nonzero((abs(arr2 - l1) < rtol).all(axis=1))[0]
assert_equal(indexes.shape, (1,))
truths[indexes[0]] = True
assert_(truths.all())
@pytest.mark.parametrize("dt", [np.float64, int])
def test_cube_halfspace_intersection(self, dt):
halfspaces = np.array([[-1, 0, 0],
[0, -1, 0],
[1, 0, -2],
[0, 1, -2]], dtype=dt)
feasible_point = np.array([1, 1], dtype=dt)
points = np.array([[0.0, 0.0], [2.0, 0.0], [0.0, 2.0], [2.0, 2.0]])
hull = qhull.HalfspaceIntersection(halfspaces, feasible_point)
assert_allclose(hull.intersections, points)
def test_self_dual_polytope_intersection(self):
fname = os.path.join(os.path.dirname(__file__), 'data',
'selfdual-4d-polytope.txt')
ineqs = np.genfromtxt(fname)
halfspaces = -np.hstack((ineqs[:, 1:], ineqs[:, :1]))
feas_point = np.array([0., 0., 0., 0.])
hs = qhull.HalfspaceIntersection(halfspaces, feas_point)
assert_equal(hs.intersections.shape, (24, 4))
assert_almost_equal(hs.dual_volume, 32.0)
assert_equal(len(hs.dual_facets), 24)
for facet in hs.dual_facets:
assert_equal(len(facet), 6)
dists = halfspaces[:, -1] + halfspaces[:, :-1].dot(feas_point)
self.assert_unordered_allclose((halfspaces[:, :-1].T/dists).T, hs.dual_points)
points = itertools.permutations([0., 0., 0.5, -0.5])
for point in points:
assert_equal(np.sum((hs.intersections == point).all(axis=1)), 1)
def test_wrong_feasible_point(self):
halfspaces = np.array([[-1.0, 0.0, 0.0],
[0.0, -1.0, 0.0],
[1.0, 0.0, -1.0],
[0.0, 1.0, -1.0]])
feasible_point = np.array([0.5, 0.5, 0.5])
#Feasible point is (ndim,) instead of (ndim-1,)
assert_raises(ValueError, qhull.HalfspaceIntersection, halfspaces, feasible_point)
feasible_point = np.array([[0.5], [0.5]])
#Feasible point is (ndim-1, 1) instead of (ndim-1,)
assert_raises(ValueError, qhull.HalfspaceIntersection, halfspaces, feasible_point)
feasible_point = np.array([[0.5, 0.5]])
#Feasible point is (1, ndim-1) instead of (ndim-1,)
assert_raises(ValueError, qhull.HalfspaceIntersection, halfspaces, feasible_point)
feasible_point = np.array([-0.5, -0.5])
#Feasible point is outside feasible region
assert_raises(qhull.QhullError, qhull.HalfspaceIntersection, halfspaces, feasible_point)
def test_incremental(self):
#Cube
halfspaces = np.array([[0., 0., -1., -0.5],
[0., -1., 0., -0.5],
[-1., 0., 0., -0.5],
[1., 0., 0., -0.5],
[0., 1., 0., -0.5],
[0., 0., 1., -0.5]])
#Cut each summit
extra_normals = np.array([[1., 1., 1.],
[1., 1., -1.],
[1., -1., 1.],
[1, -1., -1.]])
offsets = np.array([[-1.]]*8)
extra_halfspaces = np.hstack((np.vstack((extra_normals, -extra_normals)),
offsets))
feas_point = np.array([0., 0., 0.])
inc_hs = qhull.HalfspaceIntersection(halfspaces, feas_point, incremental=True)
inc_res_hs = qhull.HalfspaceIntersection(halfspaces, feas_point, incremental=True)
for i, ehs in enumerate(extra_halfspaces):
inc_hs.add_halfspaces(ehs[np.newaxis, :])
inc_res_hs.add_halfspaces(ehs[np.newaxis, :], restart=True)
total = np.vstack((halfspaces, extra_halfspaces[:i+1, :]))
hs = qhull.HalfspaceIntersection(total, feas_point)
assert_allclose(inc_hs.halfspaces, inc_res_hs.halfspaces)
assert_allclose(inc_hs.halfspaces, hs.halfspaces)
#Direct computation and restart should have points in same order
assert_allclose(hs.intersections, inc_res_hs.intersections)
#Incremental will have points in different order than direct computation
self.assert_unordered_allclose(inc_hs.intersections, hs.intersections)
inc_hs.close()
def test_cube(self):
# Halfspaces of the cube:
halfspaces = np.array([[-1., 0., 0., 0.], # x >= 0
[1., 0., 0., -1.], # x <= 1
[0., -1., 0., 0.], # y >= 0
[0., 1., 0., -1.], # y <= 1
[0., 0., -1., 0.], # z >= 0
[0., 0., 1., -1.]]) # z <= 1
point = np.array([0.5, 0.5, 0.5])
hs = qhull.HalfspaceIntersection(halfspaces, point)
# qhalf H0.5,0.5,0.5 o < input.txt
qhalf_points = np.array([
[-2, 0, 0],
[2, 0, 0],
[0, -2, 0],
[0, 2, 0],
[0, 0, -2],
[0, 0, 2]])
qhalf_facets = [
[2, 4, 0],
[4, 2, 1],
[5, 2, 0],
[2, 5, 1],
[3, 4, 1],
[4, 3, 0],
[5, 3, 1],
[3, 5, 0]]
assert len(qhalf_facets) == len(hs.dual_facets)
for a, b in zip(qhalf_facets, hs.dual_facets):
assert set(a) == set(b) # facet orientation can differ
assert_allclose(hs.dual_points, qhalf_points)
| bsd-3-clause | 3274653af028bb8e125d064008a45f0d | 36.476231 | 98 | 0.522255 | 3.269906 | false | true | false | false |
scipy/scipy | scipy/sparse/_csc.py | 13 | 7925 | """Compressed Sparse Column matrix format"""
__docformat__ = "restructuredtext en"
__all__ = ['csc_matrix', 'isspmatrix_csc']
import numpy as np
from ._base import spmatrix
from ._sparsetools import csc_tocsr, expandptr
from ._sputils import upcast, get_index_dtype
from ._compressed import _cs_matrix
class csc_matrix(_cs_matrix):
"""
Compressed Sparse Column matrix
This can be instantiated in several ways:
csc_matrix(D)
with a dense matrix or rank-2 ndarray D
csc_matrix(S)
with another sparse matrix S (equivalent to S.tocsc())
csc_matrix((M, N), [dtype])
to construct an empty matrix with shape (M, N)
dtype is optional, defaulting to dtype='d'.
csc_matrix((data, (row_ind, col_ind)), [shape=(M, N)])
where ``data``, ``row_ind`` and ``col_ind`` satisfy the
relationship ``a[row_ind[k], col_ind[k]] = data[k]``.
csc_matrix((data, indices, indptr), [shape=(M, N)])
is the standard CSC representation where the row indices for
column i are stored in ``indices[indptr[i]:indptr[i+1]]``
and their corresponding values are stored in
``data[indptr[i]:indptr[i+1]]``. If the shape parameter is
not supplied, the matrix dimensions are inferred from
the index arrays.
Attributes
----------
dtype : dtype
Data type of the matrix
shape : 2-tuple
Shape of the matrix
ndim : int
Number of dimensions (this is always 2)
nnz
Number of stored values, including explicit zeros
data
Data array of the matrix
indices
CSC format index array
indptr
CSC format index pointer array
has_sorted_indices
Whether indices are sorted
Notes
-----
Sparse matrices can be used in arithmetic operations: they support
addition, subtraction, multiplication, division, and matrix power.
Advantages of the CSC format
- efficient arithmetic operations CSC + CSC, CSC * CSC, etc.
- efficient column slicing
- fast matrix vector products (CSR, BSR may be faster)
Disadvantages of the CSC format
- slow row slicing operations (consider CSR)
- changes to the sparsity structure are expensive (consider LIL or DOK)
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csc_matrix
>>> csc_matrix((3, 4), dtype=np.int8).toarray()
array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]], dtype=int8)
>>> row = np.array([0, 2, 2, 0, 1, 2])
>>> col = np.array([0, 0, 1, 2, 2, 2])
>>> data = np.array([1, 2, 3, 4, 5, 6])
>>> csc_matrix((data, (row, col)), shape=(3, 3)).toarray()
array([[1, 0, 4],
[0, 0, 5],
[2, 3, 6]])
>>> indptr = np.array([0, 2, 3, 6])
>>> indices = np.array([0, 2, 2, 0, 1, 2])
>>> data = np.array([1, 2, 3, 4, 5, 6])
>>> csc_matrix((data, indices, indptr), shape=(3, 3)).toarray()
array([[1, 0, 4],
[0, 0, 5],
[2, 3, 6]])
"""
format = 'csc'
def transpose(self, axes=None, copy=False):
if axes is not None:
raise ValueError(("Sparse matrices do not support "
"an 'axes' parameter because swapping "
"dimensions is the only logical permutation."))
M, N = self.shape
return self._csr_container((self.data, self.indices,
self.indptr), (N, M), copy=copy)
transpose.__doc__ = spmatrix.transpose.__doc__
def __iter__(self):
yield from self.tocsr()
def tocsc(self, copy=False):
if copy:
return self.copy()
else:
return self
tocsc.__doc__ = spmatrix.tocsc.__doc__
def tocsr(self, copy=False):
M,N = self.shape
idx_dtype = get_index_dtype((self.indptr, self.indices),
maxval=max(self.nnz, N))
indptr = np.empty(M + 1, dtype=idx_dtype)
indices = np.empty(self.nnz, dtype=idx_dtype)
data = np.empty(self.nnz, dtype=upcast(self.dtype))
csc_tocsr(M, N,
self.indptr.astype(idx_dtype),
self.indices.astype(idx_dtype),
self.data,
indptr,
indices,
data)
A = self._csr_container(
(data, indices, indptr),
shape=self.shape, copy=False
)
A.has_sorted_indices = True
return A
tocsr.__doc__ = spmatrix.tocsr.__doc__
def nonzero(self):
# CSC can't use _cs_matrix's .nonzero method because it
# returns the indices sorted for self transposed.
# Get row and col indices, from _cs_matrix.tocoo
major_dim, minor_dim = self._swap(self.shape)
minor_indices = self.indices
major_indices = np.empty(len(minor_indices), dtype=self.indices.dtype)
expandptr(major_dim, self.indptr, major_indices)
row, col = self._swap((major_indices, minor_indices))
# Remove explicit zeros
nz_mask = self.data != 0
row = row[nz_mask]
col = col[nz_mask]
# Sort them to be in C-style order
ind = np.argsort(row, kind='mergesort')
row = row[ind]
col = col[ind]
return row, col
nonzero.__doc__ = _cs_matrix.nonzero.__doc__
def getrow(self, i):
"""Returns a copy of row i of the matrix, as a (1 x n)
CSR matrix (row vector).
"""
M, N = self.shape
i = int(i)
if i < 0:
i += M
if i < 0 or i >= M:
raise IndexError('index (%d) out of range' % i)
return self._get_submatrix(minor=i).tocsr()
def getcol(self, i):
"""Returns a copy of column i of the matrix, as a (m x 1)
CSC matrix (column vector).
"""
M, N = self.shape
i = int(i)
if i < 0:
i += N
if i < 0 or i >= N:
raise IndexError('index (%d) out of range' % i)
return self._get_submatrix(major=i, copy=True)
def _get_intXarray(self, row, col):
return self._major_index_fancy(col)._get_submatrix(minor=row)
def _get_intXslice(self, row, col):
if col.step in (1, None):
return self._get_submatrix(major=col, minor=row, copy=True)
return self._major_slice(col)._get_submatrix(minor=row)
def _get_sliceXint(self, row, col):
if row.step in (1, None):
return self._get_submatrix(major=col, minor=row, copy=True)
return self._get_submatrix(major=col)._minor_slice(row)
def _get_sliceXarray(self, row, col):
return self._major_index_fancy(col)._minor_slice(row)
def _get_arrayXint(self, row, col):
return self._get_submatrix(major=col)._minor_index_fancy(row)
def _get_arrayXslice(self, row, col):
return self._major_slice(col)._minor_index_fancy(row)
# these functions are used by the parent class (_cs_matrix)
# to remove redudancy between csc_matrix and csr_matrix
def _swap(self, x):
"""swap the members of x if this is a column-oriented matrix
"""
return x[1], x[0]
def isspmatrix_csc(x):
"""Is x of csc_matrix type?
Parameters
----------
x
object to check for being a csc matrix
Returns
-------
bool
True if x is a csc matrix, False otherwise
Examples
--------
>>> from scipy.sparse import csc_matrix, isspmatrix_csc
>>> isspmatrix_csc(csc_matrix([[5]]))
True
>>> from scipy.sparse import csc_matrix, csr_matrix, isspmatrix_csc
>>> isspmatrix_csc(csr_matrix([[5]]))
False
"""
from ._arrays import csc_array
return isinstance(x, csc_matrix) or isinstance(x, csc_array)
| bsd-3-clause | b23579f9db1f5724e13e5b0433b7e2ce | 29.480769 | 78 | 0.55836 | 3.713683 | false | false | false | false |
scipy/scipy | scipy/signal/_arraytools.py | 27 | 7489 | """
Functions for acting on a axis of an array.
"""
import numpy as np
def axis_slice(a, start=None, stop=None, step=None, axis=-1):
"""Take a slice along axis 'axis' from 'a'.
Parameters
----------
a : numpy.ndarray
The array to be sliced.
start, stop, step : int or None
The slice parameters.
axis : int, optional
The axis of `a` to be sliced.
Examples
--------
>>> a = array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> axis_slice(a, start=0, stop=1, axis=1)
array([[1],
[4],
[7]])
>>> axis_slice(a, start=1, axis=0)
array([[4, 5, 6],
[7, 8, 9]])
Notes
-----
The keyword arguments start, stop and step are used by calling
slice(start, stop, step). This implies axis_slice() does not
handle its arguments the exactly the same as indexing. To select
a single index k, for example, use
axis_slice(a, start=k, stop=k+1)
In this case, the length of the axis 'axis' in the result will
be 1; the trivial dimension is not removed. (Use numpy.squeeze()
to remove trivial axes.)
"""
a_slice = [slice(None)] * a.ndim
a_slice[axis] = slice(start, stop, step)
b = a[tuple(a_slice)]
return b
def axis_reverse(a, axis=-1):
"""Reverse the 1-D slices of `a` along axis `axis`.
Returns axis_slice(a, step=-1, axis=axis).
"""
return axis_slice(a, step=-1, axis=axis)
def odd_ext(x, n, axis=-1):
"""
Odd extension at the boundaries of an array
Generate a new ndarray by making an odd extension of `x` along an axis.
Parameters
----------
x : ndarray
The array to be extended.
n : int
The number of elements by which to extend `x` at each end of the axis.
axis : int, optional
The axis along which to extend `x`. Default is -1.
Examples
--------
>>> from scipy.signal._arraytools import odd_ext
>>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]])
>>> odd_ext(a, 2)
array([[-1, 0, 1, 2, 3, 4, 5, 6, 7],
[-4, -1, 0, 1, 4, 9, 16, 23, 28]])
Odd extension is a "180 degree rotation" at the endpoints of the original
array:
>>> t = np.linspace(0, 1.5, 100)
>>> a = 0.9 * np.sin(2 * np.pi * t**2)
>>> b = odd_ext(a, 40)
>>> import matplotlib.pyplot as plt
>>> plt.plot(arange(-40, 140), b, 'b', lw=1, label='odd extension')
>>> plt.plot(arange(100), a, 'r', lw=2, label='original')
>>> plt.legend(loc='best')
>>> plt.show()
"""
if n < 1:
return x
if n > x.shape[axis] - 1:
raise ValueError(("The extension length n (%d) is too big. " +
"It must not exceed x.shape[axis]-1, which is %d.")
% (n, x.shape[axis] - 1))
left_end = axis_slice(x, start=0, stop=1, axis=axis)
left_ext = axis_slice(x, start=n, stop=0, step=-1, axis=axis)
right_end = axis_slice(x, start=-1, axis=axis)
right_ext = axis_slice(x, start=-2, stop=-(n + 2), step=-1, axis=axis)
ext = np.concatenate((2 * left_end - left_ext,
x,
2 * right_end - right_ext),
axis=axis)
return ext
def even_ext(x, n, axis=-1):
"""
Even extension at the boundaries of an array
Generate a new ndarray by making an even extension of `x` along an axis.
Parameters
----------
x : ndarray
The array to be extended.
n : int
The number of elements by which to extend `x` at each end of the axis.
axis : int, optional
The axis along which to extend `x`. Default is -1.
Examples
--------
>>> from scipy.signal._arraytools import even_ext
>>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]])
>>> even_ext(a, 2)
array([[ 3, 2, 1, 2, 3, 4, 5, 4, 3],
[ 4, 1, 0, 1, 4, 9, 16, 9, 4]])
Even extension is a "mirror image" at the boundaries of the original array:
>>> t = np.linspace(0, 1.5, 100)
>>> a = 0.9 * np.sin(2 * np.pi * t**2)
>>> b = even_ext(a, 40)
>>> import matplotlib.pyplot as plt
>>> plt.plot(arange(-40, 140), b, 'b', lw=1, label='even extension')
>>> plt.plot(arange(100), a, 'r', lw=2, label='original')
>>> plt.legend(loc='best')
>>> plt.show()
"""
if n < 1:
return x
if n > x.shape[axis] - 1:
raise ValueError(("The extension length n (%d) is too big. " +
"It must not exceed x.shape[axis]-1, which is %d.")
% (n, x.shape[axis] - 1))
left_ext = axis_slice(x, start=n, stop=0, step=-1, axis=axis)
right_ext = axis_slice(x, start=-2, stop=-(n + 2), step=-1, axis=axis)
ext = np.concatenate((left_ext,
x,
right_ext),
axis=axis)
return ext
def const_ext(x, n, axis=-1):
"""
Constant extension at the boundaries of an array
Generate a new ndarray that is a constant extension of `x` along an axis.
The extension repeats the values at the first and last element of
the axis.
Parameters
----------
x : ndarray
The array to be extended.
n : int
The number of elements by which to extend `x` at each end of the axis.
axis : int, optional
The axis along which to extend `x`. Default is -1.
Examples
--------
>>> from scipy.signal._arraytools import const_ext
>>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]])
>>> const_ext(a, 2)
array([[ 1, 1, 1, 2, 3, 4, 5, 5, 5],
[ 0, 0, 0, 1, 4, 9, 16, 16, 16]])
Constant extension continues with the same values as the endpoints of the
array:
>>> t = np.linspace(0, 1.5, 100)
>>> a = 0.9 * np.sin(2 * np.pi * t**2)
>>> b = const_ext(a, 40)
>>> import matplotlib.pyplot as plt
>>> plt.plot(arange(-40, 140), b, 'b', lw=1, label='constant extension')
>>> plt.plot(arange(100), a, 'r', lw=2, label='original')
>>> plt.legend(loc='best')
>>> plt.show()
"""
if n < 1:
return x
left_end = axis_slice(x, start=0, stop=1, axis=axis)
ones_shape = [1] * x.ndim
ones_shape[axis] = n
ones = np.ones(ones_shape, dtype=x.dtype)
left_ext = ones * left_end
right_end = axis_slice(x, start=-1, axis=axis)
right_ext = ones * right_end
ext = np.concatenate((left_ext,
x,
right_ext),
axis=axis)
return ext
def zero_ext(x, n, axis=-1):
"""
Zero padding at the boundaries of an array
Generate a new ndarray that is a zero-padded extension of `x` along
an axis.
Parameters
----------
x : ndarray
The array to be extended.
n : int
The number of elements by which to extend `x` at each end of the
axis.
axis : int, optional
The axis along which to extend `x`. Default is -1.
Examples
--------
>>> from scipy.signal._arraytools import zero_ext
>>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]])
>>> zero_ext(a, 2)
array([[ 0, 0, 1, 2, 3, 4, 5, 0, 0],
[ 0, 0, 0, 1, 4, 9, 16, 0, 0]])
"""
if n < 1:
return x
zeros_shape = list(x.shape)
zeros_shape[axis] = n
zeros = np.zeros(zeros_shape, dtype=x.dtype)
ext = np.concatenate((zeros, x, zeros), axis=axis)
return ext
| bsd-3-clause | 83dca6943ef1511ae043dad01413a0a1 | 30.074689 | 79 | 0.52517 | 3.246207 | false | false | false | false |
scipy/scipy | scipy/sparse/_sputils.py | 8 | 13136 | """ Utility functions for sparse matrix module
"""
import sys
import operator
import numpy as np
from scipy._lib._util import prod
import scipy.sparse as sp
__all__ = ['upcast', 'getdtype', 'getdata', 'isscalarlike', 'isintlike',
'isshape', 'issequence', 'isdense', 'ismatrix', 'get_sum_dtype']
supported_dtypes = [np.bool_, np.byte, np.ubyte, np.short, np.ushort, np.intc,
np.uintc, np.int_, np.uint, np.longlong, np.ulonglong,
np.single, np.double,
np.longdouble, np.csingle, np.cdouble, np.clongdouble]
_upcast_memo = {}
def upcast(*args):
"""Returns the nearest supported sparse dtype for the
combination of one or more types.
upcast(t0, t1, ..., tn) -> T where T is a supported dtype
Examples
--------
>>> upcast('int32')
<type 'numpy.int32'>
>>> upcast('bool')
<type 'numpy.bool_'>
>>> upcast('int32','float32')
<type 'numpy.float64'>
>>> upcast('bool',complex,float)
<type 'numpy.complex128'>
"""
t = _upcast_memo.get(hash(args))
if t is not None:
return t
upcast = np.result_type(*args)
for t in supported_dtypes:
if np.can_cast(upcast, t):
_upcast_memo[hash(args)] = t
return t
raise TypeError('no supported conversion for types: %r' % (args,))
def upcast_char(*args):
"""Same as `upcast` but taking dtype.char as input (faster)."""
t = _upcast_memo.get(args)
if t is not None:
return t
t = upcast(*map(np.dtype, args))
_upcast_memo[args] = t
return t
def upcast_scalar(dtype, scalar):
"""Determine data type for binary operation between an array of
type `dtype` and a scalar.
"""
return (np.array([0], dtype=dtype) * scalar).dtype
def downcast_intp_index(arr):
"""
Down-cast index array to np.intp dtype if it is of a larger dtype.
Raise an error if the array contains a value that is too large for
intp.
"""
if arr.dtype.itemsize > np.dtype(np.intp).itemsize:
if arr.size == 0:
return arr.astype(np.intp)
maxval = arr.max()
minval = arr.min()
if maxval > np.iinfo(np.intp).max or minval < np.iinfo(np.intp).min:
raise ValueError("Cannot deal with arrays with indices larger "
"than the machine maximum address size "
"(e.g. 64-bit indices on 32-bit machine).")
return arr.astype(np.intp)
return arr
def to_native(A):
"""
Ensure that the data type of the NumPy array `A` has native byte order.
`A` must be a NumPy array. If the data type of `A` does not have native
byte order, a copy of `A` with a native byte order is returned. Otherwise
`A` is returned.
"""
dt = A.dtype
if dt.isnative:
# Don't call `asarray()` if A is already native, to avoid unnecessarily
# creating a view of the input array.
return A
return np.asarray(A, dtype=dt.newbyteorder('native'))
def getdtype(dtype, a=None, default=None):
"""Function used to simplify argument processing. If 'dtype' is not
specified (is None), returns a.dtype; otherwise returns a np.dtype
object created from the specified dtype argument. If 'dtype' and 'a'
are both None, construct a data type out of the 'default' parameter.
Furthermore, 'dtype' must be in 'allowed' set.
"""
# TODO is this really what we want?
if dtype is None:
try:
newdtype = a.dtype
except AttributeError as e:
if default is not None:
newdtype = np.dtype(default)
else:
raise TypeError("could not interpret data type") from e
else:
newdtype = np.dtype(dtype)
if newdtype == np.object_:
raise ValueError(
"object dtype is not supported by sparse matrices"
)
return newdtype
def getdata(obj, dtype=None, copy=False):
"""
This is a wrapper of `np.array(obj, dtype=dtype, copy=copy)`
that will generate a warning if the result is an object array.
"""
data = np.array(obj, dtype=dtype, copy=copy)
# Defer to getdtype for checking that the dtype is OK.
# This is called for the validation only; we don't need the return value.
getdtype(data.dtype)
return data
def get_index_dtype(arrays=(), maxval=None, check_contents=False):
"""
Based on input (integer) arrays `a`, determine a suitable index data
type that can hold the data in the arrays.
Parameters
----------
arrays : tuple of array_like
Input arrays whose types/contents to check
maxval : float, optional
Maximum value needed
check_contents : bool, optional
Whether to check the values in the arrays and not just their types.
Default: False (check only the types)
Returns
-------
dtype : dtype
Suitable index data type (int32 or int64)
"""
int32min = np.int32(np.iinfo(np.int32).min)
int32max = np.int32(np.iinfo(np.int32).max)
# not using intc directly due to misinteractions with pythran
dtype = np.int32 if np.intc().itemsize == 4 else np.int64
if maxval is not None:
maxval = np.int64(maxval)
if maxval > int32max:
dtype = np.int64
if isinstance(arrays, np.ndarray):
arrays = (arrays,)
for arr in arrays:
arr = np.asarray(arr)
if not np.can_cast(arr.dtype, np.int32):
if check_contents:
if arr.size == 0:
# a bigger type not needed
continue
elif np.issubdtype(arr.dtype, np.integer):
maxval = arr.max()
minval = arr.min()
if minval >= int32min and maxval <= int32max:
# a bigger type not needed
continue
dtype = np.int64
break
return dtype
def get_sum_dtype(dtype):
"""Mimic numpy's casting for np.sum"""
if dtype.kind == 'u' and np.can_cast(dtype, np.uint):
return np.uint
if np.can_cast(dtype, np.int_):
return np.int_
return dtype
def isscalarlike(x):
"""Is x either a scalar, an array scalar, or a 0-dim array?"""
return np.isscalar(x) or (isdense(x) and x.ndim == 0)
def isintlike(x):
"""Is x appropriate as an index into a sparse matrix? Returns True
if it can be cast safely to a machine int.
"""
# Fast-path check to eliminate non-scalar values. operator.index would
# catch this case too, but the exception catching is slow.
if np.ndim(x) != 0:
return False
try:
operator.index(x)
except (TypeError, ValueError):
try:
loose_int = bool(int(x) == x)
except (TypeError, ValueError):
return False
if loose_int:
msg = "Inexact indices into sparse matrices are not allowed"
raise ValueError(msg)
return loose_int
return True
def isshape(x, nonneg=False):
"""Is x a valid 2-tuple of dimensions?
If nonneg, also checks that the dimensions are non-negative.
"""
try:
# Assume it's a tuple of matrix dimensions (M, N)
(M, N) = x
except Exception:
return False
else:
if isintlike(M) and isintlike(N):
if np.ndim(M) == 0 and np.ndim(N) == 0:
if not nonneg or (M >= 0 and N >= 0):
return True
return False
def issequence(t):
return ((isinstance(t, (list, tuple)) and
(len(t) == 0 or np.isscalar(t[0]))) or
(isinstance(t, np.ndarray) and (t.ndim == 1)))
def ismatrix(t):
return ((isinstance(t, (list, tuple)) and
len(t) > 0 and issequence(t[0])) or
(isinstance(t, np.ndarray) and t.ndim == 2))
def isdense(x):
return isinstance(x, np.ndarray)
def validateaxis(axis):
if axis is not None:
axis_type = type(axis)
# In NumPy, you can pass in tuples for 'axis', but they are
# not very useful for sparse matrices given their limited
# dimensions, so let's make it explicit that they are not
# allowed to be passed in
if axis_type == tuple:
raise TypeError(("Tuples are not accepted for the 'axis' "
"parameter. Please pass in one of the "
"following: {-2, -1, 0, 1, None}."))
# If not a tuple, check that the provided axis is actually
# an integer and raise a TypeError similar to NumPy's
if not np.issubdtype(np.dtype(axis_type), np.integer):
raise TypeError("axis must be an integer, not {name}"
.format(name=axis_type.__name__))
if not (-2 <= axis <= 1):
raise ValueError("axis out of range")
def check_shape(args, current_shape=None):
"""Imitate numpy.matrix handling of shape arguments"""
if len(args) == 0:
raise TypeError("function missing 1 required positional argument: "
"'shape'")
elif len(args) == 1:
try:
shape_iter = iter(args[0])
except TypeError:
new_shape = (operator.index(args[0]), )
else:
new_shape = tuple(operator.index(arg) for arg in shape_iter)
else:
new_shape = tuple(operator.index(arg) for arg in args)
if current_shape is None:
if len(new_shape) != 2:
raise ValueError('shape must be a 2-tuple of positive integers')
elif any(d < 0 for d in new_shape):
raise ValueError("'shape' elements cannot be negative")
else:
# Check the current size only if needed
current_size = prod(current_shape)
# Check for negatives
negative_indexes = [i for i, x in enumerate(new_shape) if x < 0]
if len(negative_indexes) == 0:
new_size = prod(new_shape)
if new_size != current_size:
raise ValueError('cannot reshape array of size {} into shape {}'
.format(current_size, new_shape))
elif len(negative_indexes) == 1:
skip = negative_indexes[0]
specified = prod(new_shape[0:skip] + new_shape[skip+1:])
unspecified, remainder = divmod(current_size, specified)
if remainder != 0:
err_shape = tuple('newshape' if x < 0 else x for x in new_shape)
raise ValueError('cannot reshape array of size {} into shape {}'
''.format(current_size, err_shape))
new_shape = new_shape[0:skip] + (unspecified,) + new_shape[skip+1:]
else:
raise ValueError('can only specify one unknown dimension')
if len(new_shape) != 2:
raise ValueError('matrix shape must be two-dimensional')
return new_shape
def check_reshape_kwargs(kwargs):
"""Unpack keyword arguments for reshape function.
This is useful because keyword arguments after star arguments are not
allowed in Python 2, but star keyword arguments are. This function unpacks
'order' and 'copy' from the star keyword arguments (with defaults) and
throws an error for any remaining.
"""
order = kwargs.pop('order', 'C')
copy = kwargs.pop('copy', False)
if kwargs: # Some unused kwargs remain
raise TypeError('reshape() got unexpected keywords arguments: {}'
.format(', '.join(kwargs.keys())))
return order, copy
def is_pydata_spmatrix(m):
"""
Check whether object is pydata/sparse matrix, avoiding importing the module.
"""
base_cls = getattr(sys.modules.get('sparse'), 'SparseArray', None)
return base_cls is not None and isinstance(m, base_cls)
###############################################################################
# Wrappers for NumPy types that are deprecated
# Numpy versions of these functions raise deprecation warnings, the
# ones below do not.
def matrix(*args, **kwargs):
return np.array(*args, **kwargs).view(np.matrix)
def asmatrix(data, dtype=None):
if isinstance(data, np.matrix) and (dtype is None or data.dtype == dtype):
return data
return np.asarray(data, dtype=dtype).view(np.matrix)
###############################################################################
def _todata(s: 'sp.spmatrix') -> np.ndarray:
"""Access nonzero values, possibly after summing duplicates.
Parameters
----------
s : sparse matrix
Input sparse matrix.
Returns
-------
data: ndarray
Nonzero values of the array, with shape (s.nnz,)
"""
if isinstance(s, sp._data._data_matrix):
return s._deduped_data()
if isinstance(s, sp.dok_matrix):
return np.fromiter(s.values(), dtype=s.dtype, count=s.nnz)
if isinstance(s, sp.lil_matrix):
data = np.empty(s.nnz, dtype=s.dtype)
sp._csparsetools.lil_flatten_to_array(s.data, data)
return data
return s.tocoo()._deduped_data()
| bsd-3-clause | 7787577de369a012c390f005d07fc417 | 30.806295 | 80 | 0.584881 | 3.918854 | false | false | false | false |
scipy/scipy | tools/write_release_and_log.py | 11 | 4038 | """
Standalone script for writing release doc and logs::
python tools/write_release_and_log.py <LOG_START> <LOG_END>
Example::
python tools/write_release_and_log.py v1.7.0 v1.8.0
Needs to be run from the root of the repository.
"""
import os
import sys
import subprocess
from hashlib import md5
from hashlib import sha256
from pathlib import Path
sys.path.insert(0, os.path.dirname(__file__))
sys.path.insert(1, os.path.join(os.path.dirname(__file__), 'tools'))
try:
version_utils = __import__("version_utils")
FULLVERSION = version_utils.VERSION
# This is duplicated from setup.py
if os.path.exists('.git'):
GIT_REVISION, _ = version_utils.git_version(
os.path.join(os.path.dirname(__file__), '..'))
else:
GIT_REVISION = "Unknown"
if not version_utils.ISRELEASED:
if GIT_REVISION == "Unknown":
FULLVERSION += '.dev0+Unknown'
else:
FULLVERSION += '.dev0+' + GIT_REVISION[:7]
finally:
sys.path.pop(1)
sys.path.pop(0)
try:
# Ensure sensible file permissions
os.umask(0o022)
except AttributeError:
# No umask on non-posix
pass
def get_latest_release_doc(path):
"""
Method to pick the file from 'doc/release' with the highest
release number (e.g., `1.9.0-notes.rst`).
"""
file_paths = os.listdir(path)
file_paths.sort(key=lambda x: list(map(int, (x.split('-')[0].split('.')))))
return os.path.join(path, file_paths[-1])
# ----------------------------
# Release notes and Changelog
# ----------------------------
def compute_md5(idirs):
released = os.listdir(idirs)
checksums = []
for fn in sorted(released):
fn_updated = os.path.join("release", fn)
with open(fn_updated, 'rb') as f:
m = md5(f.read())
checksums.append('%s %s' % (m.hexdigest(), os.path.basename(fn)))
return checksums
def compute_sha256(idirs):
# better checksum so gpg signed README.txt containing the sums can be used
# to verify the binaries instead of signing all binaries
released = os.listdir(idirs)
checksums = []
for fn in sorted(released):
fn_updated = os.path.join("release", fn)
with open(fn_updated, 'rb') as f:
m = sha256(f.read())
checksums.append('%s %s' % (m.hexdigest(), os.path.basename(fn)))
return checksums
def write_release_task(filename='NOTES.txt'):
idirs = Path('release')
source = Path(get_latest_release_doc('doc/release'))
target = Path(filename)
if target.exists():
target.remove()
tmp_target = Path(filename + '.txt')
os.system(f'cp {source} {tmp_target}')
with open(str(tmp_target), 'a') as ftarget:
ftarget.writelines("""
Checksums
=========
MD5
~~~
""")
ftarget.writelines(['%s\n' % c for c in compute_md5(idirs)])
ftarget.writelines("""
SHA256
~~~~~~
""")
ftarget.writelines(['%s\n' % c for c in compute_sha256(idirs)])
print("Release README generated successfully")
def write_log_task(filename='Changelog'):
st = subprocess.Popen(
['git', 'log', '%s..%s' % (LOG_START, LOG_END)],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = st.communicate()
if not st.returncode == 0:
raise RuntimeError("%s failed" % str(error))
out = st.communicate()[0].decode()
with open(filename, 'w') as a:
a.writelines(out)
print("Release logs generated successfully")
def main():
"""
Checks weather release directory is present or not
and calls the method to generate logs and notes
"""
if not os.path.exists("release"):
os.makedirs("release")
write_release_task(os.path.join("release", 'README'))
write_log_task(os.path.join("release", 'Changelog'))
if __name__ == '__main__':
if len(sys.argv) == 3:
LOG_START = str(sys.argv[1])
LOG_END = str(sys.argv[2])
else:
print("invalid number of arguments, please add LOG_START and LOG_END")
main()
| bsd-3-clause | 5914e5b3f095711eaa103f437fbb0727 | 25.220779 | 79 | 0.607479 | 3.436596 | false | false | false | false |
scipy/scipy | scipy/sparse/__init__.py | 13 | 8636 | """
=====================================
Sparse matrices (:mod:`scipy.sparse`)
=====================================
.. currentmodule:: scipy.sparse
SciPy 2-D sparse array package for numeric data.
.. note::
This package is switching to an array interface, compatible with
NumPy arrays, from the older matrix interface. We recommend that
you use the array objects (`bsr_array`, `coo_array`, etc.) for
all new work.
When using the array interface, please note that:
- ``x * y`` no longer performs matrix multiplication, but
element-wise multiplication (just like with NumPy arrays). To
make code work with both arrays and matrices, use ``x @ y`` for
matrix multiplication.
- Operations such as `sum`, that used to produce dense matrices, now
produce arrays, whose multiplication behavior differs similarly.
- Sparse arrays currently must be two-dimensional. This also means
that all *slicing* operations on these objects must produce
two-dimensional results, or they will result in an error. This
will be addressed in a future version.
The construction utilities (`eye`, `kron`, `random`, `diags`, etc.)
have not yet been ported, but their results can be wrapped into arrays::
A = csr_array(eye(3))
Contents
========
Sparse array classes
--------------------
.. autosummary::
:toctree: generated/
bsr_array - Block Sparse Row array
coo_array - A sparse array in COOrdinate format
csc_array - Compressed Sparse Column array
csr_array - Compressed Sparse Row array
dia_array - Sparse array with DIAgonal storage
dok_array - Dictionary Of Keys based sparse array
lil_array - Row-based list of lists sparse array
Sparse matrix classes
---------------------
.. autosummary::
:toctree: generated/
bsr_matrix - Block Sparse Row matrix
coo_matrix - A sparse matrix in COOrdinate format
csc_matrix - Compressed Sparse Column matrix
csr_matrix - Compressed Sparse Row matrix
dia_matrix - Sparse matrix with DIAgonal storage
dok_matrix - Dictionary Of Keys based sparse matrix
lil_matrix - Row-based list of lists sparse matrix
spmatrix - Sparse matrix base class
Functions
---------
Building sparse matrices:
.. autosummary::
:toctree: generated/
eye - Sparse MxN matrix whose k-th diagonal is all ones
identity - Identity matrix in sparse format
kron - kronecker product of two sparse matrices
kronsum - kronecker sum of sparse matrices
diags - Return a sparse matrix from diagonals
spdiags - Return a sparse matrix from diagonals
block_diag - Build a block diagonal sparse matrix
tril - Lower triangular portion of a matrix in sparse format
triu - Upper triangular portion of a matrix in sparse format
bmat - Build a sparse matrix from sparse sub-blocks
hstack - Stack sparse matrices horizontally (column wise)
vstack - Stack sparse matrices vertically (row wise)
rand - Random values in a given shape
random - Random values in a given shape
Save and load sparse matrices:
.. autosummary::
:toctree: generated/
save_npz - Save a sparse matrix to a file using ``.npz`` format.
load_npz - Load a sparse matrix from a file using ``.npz`` format.
Sparse matrix tools:
.. autosummary::
:toctree: generated/
find
Identifying sparse matrices:
.. autosummary::
:toctree: generated/
issparse
isspmatrix
isspmatrix_csc
isspmatrix_csr
isspmatrix_bsr
isspmatrix_lil
isspmatrix_dok
isspmatrix_coo
isspmatrix_dia
Submodules
----------
.. autosummary::
csgraph - Compressed sparse graph routines
linalg - sparse linear algebra routines
Exceptions
----------
.. autosummary::
:toctree: generated/
SparseEfficiencyWarning
SparseWarning
Usage information
=================
There are seven available sparse matrix types:
1. csc_matrix: Compressed Sparse Column format
2. csr_matrix: Compressed Sparse Row format
3. bsr_matrix: Block Sparse Row format
4. lil_matrix: List of Lists format
5. dok_matrix: Dictionary of Keys format
6. coo_matrix: COOrdinate format (aka IJV, triplet format)
7. dia_matrix: DIAgonal format
To construct a matrix efficiently, use either dok_matrix or lil_matrix.
The lil_matrix class supports basic slicing and fancy indexing with a
similar syntax to NumPy arrays. As illustrated below, the COO format
may also be used to efficiently construct matrices. Despite their
similarity to NumPy arrays, it is **strongly discouraged** to use NumPy
functions directly on these matrices because NumPy may not properly convert
them for computations, leading to unexpected (and incorrect) results. If you
do want to apply a NumPy function to these matrices, first check if SciPy has
its own implementation for the given sparse matrix class, or **convert the
sparse matrix to a NumPy array** (e.g., using the `toarray()` method of the
class) first before applying the method.
To perform manipulations such as multiplication or inversion, first
convert the matrix to either CSC or CSR format. The lil_matrix format is
row-based, so conversion to CSR is efficient, whereas conversion to CSC
is less so.
All conversions among the CSR, CSC, and COO formats are efficient,
linear-time operations.
Matrix vector product
---------------------
To do a vector product between a sparse matrix and a vector simply use
the matrix `dot` method, as described in its docstring:
>>> import numpy as np
>>> from scipy.sparse import csr_matrix
>>> A = csr_matrix([[1, 2, 0], [0, 0, 3], [4, 0, 5]])
>>> v = np.array([1, 0, -1])
>>> A.dot(v)
array([ 1, -3, -1], dtype=int64)
.. warning:: As of NumPy 1.7, `np.dot` is not aware of sparse matrices,
therefore using it will result on unexpected results or errors.
The corresponding dense array should be obtained first instead:
>>> np.dot(A.toarray(), v)
array([ 1, -3, -1], dtype=int64)
but then all the performance advantages would be lost.
The CSR format is specially suitable for fast matrix vector products.
Example 1
---------
Construct a 1000x1000 lil_matrix and add some values to it:
>>> from scipy.sparse import lil_matrix
>>> from scipy.sparse.linalg import spsolve
>>> from numpy.linalg import solve, norm
>>> from numpy.random import rand
>>> A = lil_matrix((1000, 1000))
>>> A[0, :100] = rand(100)
>>> A[1, 100:200] = A[0, :100]
>>> A.setdiag(rand(1000))
Now convert it to CSR format and solve A x = b for x:
>>> A = A.tocsr()
>>> b = rand(1000)
>>> x = spsolve(A, b)
Convert it to a dense matrix and solve, and check that the result
is the same:
>>> x_ = solve(A.toarray(), b)
Now we can compute norm of the error with:
>>> err = norm(x-x_)
>>> err < 1e-10
True
It should be small :)
Example 2
---------
Construct a matrix in COO format:
>>> from scipy import sparse
>>> from numpy import array
>>> I = array([0,3,1,0])
>>> J = array([0,3,1,2])
>>> V = array([4,5,7,9])
>>> A = sparse.coo_matrix((V,(I,J)),shape=(4,4))
Notice that the indices do not need to be sorted.
Duplicate (i,j) entries are summed when converting to CSR or CSC.
>>> I = array([0,0,1,3,1,0,0])
>>> J = array([0,2,1,3,1,0,0])
>>> V = array([1,1,1,1,1,1,1])
>>> B = sparse.coo_matrix((V,(I,J)),shape=(4,4)).tocsr()
This is useful for constructing finite-element stiffness and mass matrices.
Further details
---------------
CSR column indices are not necessarily sorted. Likewise for CSC row
indices. Use the .sorted_indices() and .sort_indices() methods when
sorted indices are required (e.g., when passing data to other libraries).
"""
# Original code by Travis Oliphant.
# Modified and extended by Ed Schofield, Robert Cimrman,
# Nathan Bell, and Jake Vanderplas.
import warnings as _warnings
from ._base import *
from ._csr import *
from ._csc import *
from ._lil import *
from ._dok import *
from ._coo import *
from ._dia import *
from ._bsr import *
from ._construct import *
from ._extract import *
from ._matrix_io import *
from ._arrays import (
csr_array, csc_array, lil_array, dok_array, coo_array, dia_array, bsr_array
)
# For backward compatibility with v0.19.
from . import csgraph
# Deprecated namespaces, to be removed in v2.0.0
from . import (
base, bsr, compressed, construct, coo, csc, csr, data, dia, dok, extract,
lil, sparsetools, sputils
)
__all__ = [s for s in dir() if not s.startswith('_')]
# Filter PendingDeprecationWarning for np.matrix introduced with numpy 1.15
_warnings.filterwarnings('ignore', message='the matrix subclass is not the recommended way')
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
| bsd-3-clause | f339be2609078986d77c4744c8470708 | 27.979866 | 92 | 0.698819 | 3.65313 | false | false | false | false |
scipy/scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_D.py | 25 | 17840 | # -*- coding: utf-8 -*-
import numpy as np
from numpy import abs, cos, exp, arange, pi, sin, sqrt, sum, zeros, tanh
from numpy.testing import assert_almost_equal
from .go_benchmark import Benchmark
class Damavandi(Benchmark):
r"""
Damavandi objective function.
This class defines the Damavandi [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Damavandi}}(x) = \left[ 1 - \lvert{\frac{
\sin[\pi (x_1 - 2)]\sin[\pi (x2 - 2)]}{\pi^2 (x_1 - 2)(x_2 - 2)}}
\rvert^5 \right] \left[2 + (x_1 - 7)^2 + 2(x_2 - 7)^2 \right]
Here, :math:`n` represents the number of dimensions and :math:`x_i \in
[0, 14]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0.0` for :math:`x_i = 2` for
:math:`i = 1, ..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, 2)
self._bounds = list(zip([0.0] * self.N, [14.0] * self.N))
self.global_optimum = [[2 for _ in range(self.N)]]
self.fglob = np.nan
def fun(self, x, *args):
self.nfev += 1
try:
num = sin(pi * (x[0] - 2.0)) * sin(pi * (x[1] - 2.0))
den = (pi ** 2) * (x[0] - 2.0) * (x[1] - 2.0)
factor1 = 1.0 - (abs(num / den)) ** 5.0
factor2 = 2 + (x[0] - 7.0) ** 2.0 + 2 * (x[1] - 7.0) ** 2.0
return factor1 * factor2
except ZeroDivisionError:
return np.nan
def success(self, x):
"""Is a candidate solution at the global minimum"""
val = self.fun(x)
if np.isnan(val):
return True
try:
assert_almost_equal(val, 0., 4)
return True
except AssertionError:
return False
return False
class Deb01(Benchmark):
r"""
Deb 1 objective function.
This class defines the Deb 1 [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Deb01}}(x) = - \frac{1}{N} \sum_{i=1}^n \sin^6(5 \pi x_i)
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-1, 1]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x_i) = 0.0`. The number of global minima is
:math:`5^n` that are evenly spaced in the function landscape, where
:math:`n` represents the dimension of the problem.
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.change_dimensionality = True
self._bounds = list(zip([-1.0] * self.N, [1.0] * self.N))
self.global_optimum = [[0.3, -0.3]]
self.fglob = -1.0
def fun(self, x, *args):
self.nfev += 1
return -(1.0 / self.N) * sum(sin(5 * pi * x) ** 6.0)
class Deb03(Benchmark):
r"""
Deb 3 objective function.
This class defines the Deb 3 [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Deb02}}(x) = - \frac{1}{N} \sum_{i=1}^n \sin^6 \left[ 5 \pi
\left ( x_i^{3/4} - 0.05 \right) \right ]
Here, :math:`n` represents the number of dimensions and :math:`x_i \in
[0, 1]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0.0`. The number of global minima is
:math:`5^n` that are evenly spaced in the function landscape, where
:math:`n` represents the dimension of the problem.
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.change_dimensionality = True
self._bounds = list(zip([-1.0] * self.N, [1.0] * self.N))
self.global_optimum = [[0.93388314, 0.68141781]]
self.fglob = -1.0
def fun(self, x, *args):
self.nfev += 1
return -(1.0 / self.N) * sum(sin(5 * pi * (x ** 0.75 - 0.05)) ** 6.0)
class Decanomial(Benchmark):
r"""
Decanomial objective function.
This class defines the Decanomial function global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Decanomial}}(x) = 0.001 \left(\lvert{x_{2}^{4} + 12 x_{2}^{3}
+ 54 x_{2}^{2} + 108 x_{2} + 81.0}\rvert + \lvert{x_{1}^{10}
- 20 x_{1}^{9} + 180 x_{1}^{8} - 960 x_{1}^{7} + 3360 x_{1}^{6}
- 8064 x_{1}^{5} + 13340 x_{1}^{4} - 15360 x_{1}^{3} + 11520 x_{1}^{2}
- 5120 x_{1} + 2624.0}\rvert\right)^{2}
with :math:`x_i \in [-10, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [2, -3]`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.custom_bounds = [(0, 2.5), (-2, -4)]
self.global_optimum = [[2.0, -3.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
val = x[1] ** 4 + 12 * x[1] ** 3 + 54 * x[1] ** 2 + 108 * x[1] + 81.0
val2 = x[0] ** 10. - 20 * x[0] ** 9 + 180 * x[0] ** 8 - 960 * x[0] ** 7
val2 += 3360 * x[0] ** 6 - 8064 * x[0] ** 5 + 13340 * x[0] ** 4
val2 += - 15360 * x[0] ** 3 + 11520 * x[0] ** 2 - 5120 * x[0] + 2624
return 0.001 * (abs(val) + abs(val2)) ** 2.
class Deceptive(Benchmark):
r"""
Deceptive objective function.
This class defines the Deceptive [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Deceptive}}(x) = - \left [\frac{1}{n}
\sum_{i=1}^{n} g_i(x_i) \right ]^{\beta}
Where :math:`\beta` is a fixed non-linearity factor; in this exercise,
:math:`\beta = 2`. The function :math:`g_i(x_i)` is given by:
.. math::
g_i(x_i) = \begin{cases}
- \frac{x}{\alpha_i} + \frac{4}{5} &
\textrm{if} \hspace{5pt} 0 \leq x_i \leq \frac{4}{5} \alpha_i \\
\frac{5x}{\alpha_i} -4 &
\textrm{if} \hspace{5pt} \frac{4}{5} \alpha_i \le x_i \leq \alpha_i \\
\frac{5(x - \alpha_i)}{\alpha_i-1} &
\textrm{if} \hspace{5pt} \alpha_i \le x_i \leq \frac{1 + 4\alpha_i}{5} \\
\frac{x - 1}{1 - \alpha_i} &
\textrm{if} \hspace{5pt} \frac{1 + 4\alpha_i}{5} \le x_i \leq 1
\end{cases}
Here, :math:`n` represents the number of dimensions and :math:`x_i \in
[0, 1]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = -1` for :math:`x_i = \alpha_i` for
:math:`i = 1, ..., n`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
TODO: this function was taken from the Gavana website. The following code
is based on his code. His code and the website don't match, the equations
are wrong.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([0.0] * self.N, [1.0] * self.N))
alpha = arange(1.0, self.N + 1.0) / (self.N + 1.0)
self.global_optimum = [alpha]
self.fglob = -1.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
alpha = arange(1.0, self.N + 1.0) / (self.N + 1.0)
beta = 2.0
g = zeros((self.N, ))
for i in range(self.N):
if x[i] <= 0.0:
g[i] = x[i]
elif x[i] < 0.8 * alpha[i]:
g[i] = -x[i] / alpha[i] + 0.8
elif x[i] < alpha[i]:
g[i] = 5.0 * x[i] / alpha[i] - 4.0
elif x[i] < (1.0 + 4 * alpha[i]) / 5.0:
g[i] = 5.0 * (x[i] - alpha[i]) / (alpha[i] - 1.0) + 1.0
elif x[i] <= 1.0:
g[i] = (x[i] - 1.0) / (1.0 - alpha[i]) + 4.0 / 5.0
else:
g[i] = x[i] - 1.0
return -((1.0 / self.N) * sum(g)) ** beta
class DeckkersAarts(Benchmark):
r"""
Deckkers-Aarts objective function.
This class defines the Deckkers-Aarts [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{DeckkersAarts}}(x) = 10^5x_1^2 + x_2^2 - (x_1^2 + x_2^2)^2
+ 10^{-5}(x_1^2 + x_2^2)^4
with :math:`x_i \in [-20, 20]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = -24776.518242168` for
:math:`x = [0, \pm 14.9451209]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO: jamil solution and global minimum are slightly wrong.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-20.0] * self.N, [20.0] * self.N))
self.custom_bounds = ([-1, 1], [14, 16])
self.global_optimum = [[0.0, 14.9451209]]
self.fglob = -24776.518342168
def fun(self, x, *args):
self.nfev += 1
return (1.e5 * x[0] ** 2 + x[1] ** 2 - (x[0] ** 2 + x[1] ** 2) ** 2
+ 1.e-5 * (x[0] ** 2 + x[1] ** 2) ** 4)
class DeflectedCorrugatedSpring(Benchmark):
r"""
DeflectedCorrugatedSpring objective function.
This class defines the Deflected Corrugated Spring [1]_ function global
optimization problem. This is a multimodal minimization problem defined as
follows:
.. math::
f_{\text{DeflectedCorrugatedSpring}}(x) = 0.1\sum_{i=1}^n \left[ (x_i -
\alpha)^2 - \cos \left( K \sqrt {\sum_{i=1}^n (x_i - \alpha)^2}
\right ) \right ]
Where, in this exercise, :math:`K = 5` and :math:`\alpha = 5`.
Here, :math:`n` represents the number of dimensions and :math:`x_i \in
[0, 2\alpha]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = -1` for :math:`x_i = \alpha` for
:math:`i = 1, ..., n`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
TODO: website has a different equation to the gavana codebase. The function
below is different to the equation above. Also, the global minimum is
wrong.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
alpha = 5.0
self._bounds = list(zip([0] * self.N, [2 * alpha] * self.N))
self.global_optimum = [[alpha for _ in range(self.N)]]
self.fglob = -1.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
K, alpha = 5.0, 5.0
return (-cos(K * sqrt(sum((x - alpha) ** 2)))
+ 0.1 * sum((x - alpha) ** 2))
class DeVilliersGlasser01(Benchmark):
r"""
DeVilliers-Glasser 1 objective function.
This class defines the DeVilliers-Glasser 1 [1]_ function global optimization
problem. This is a multimodal minimization problem defined as follows:
.. math::
f_{\text{DeVilliersGlasser01}}(x) = \sum_{i=1}^{24} \left[ x_1x_2^{t_i}
\sin(x_3t_i + x_4) - y_i \right ]^2
Where, in this exercise, :math:`t_i = 0.1(i - 1)` and
:math:`y_i = 60.137(1.371^{t_i}) \sin(3.112t_i + 1.761)`.
Here, :math:`n` represents the number of dimensions and :math:`x_i \in
[1, 100]` for :math:`i = 1, ..., 4`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for
:math:`x = [60.137, 1.371, 3.112, 1.761]`.
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=4):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([1.0] * self.N, [100.0] * self.N))
self.global_optimum = [[60.137, 1.371, 3.112, 1.761]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
t = 0.1 * arange(24)
y = 60.137 * (1.371 ** t) * sin(3.112 * t + 1.761)
return sum((x[0] * (x[1] ** t) * sin(x[2] * t + x[3]) - y) ** 2.0)
class DeVilliersGlasser02(Benchmark):
r"""
DeVilliers-Glasser 2 objective function.
This class defines the DeVilliers-Glasser 2 [1]_ function global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{DeVilliersGlasser01}}(x) = \sum_{i=1}^{24} \left[ x_1x_2^{t_i}
\tanh \left [x_3t_i + \sin(x_4t_i) \right] \cos(t_ie^{x_5}) -
y_i \right ]^2
Where, in this exercise, :math:`t_i = 0.1(i - 1)` and
:math:`y_i = 53.81(1.27^{t_i}) \tanh (3.012t_i + \sin(2.13t_i))
\cos(e^{0.507}t_i)`.
with :math:`x_i \in [1, 60]` for :math:`i = 1, ..., 5`.
*Global optimum*: :math:`f(x) = 0` for
:math:`x = [53.81, 1.27, 3.012, 2.13, 0.507]`.
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=5):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([1.0] * self.N, [60.0] * self.N))
self.global_optimum = [[53.81, 1.27, 3.012, 2.13, 0.507]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
t = 0.1 * arange(16)
y = (53.81 * 1.27 ** t * tanh(3.012 * t + sin(2.13 * t))
* cos(exp(0.507) * t))
return sum((x[0] * (x[1] ** t) * tanh(x[2] * t + sin(x[3] * t))
* cos(t * exp(x[4])) - y) ** 2.0)
class DixonPrice(Benchmark):
r"""
Dixon and Price objective function.
This class defines the Dixon and Price global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{DixonPrice}}(x) = (x_i - 1)^2
+ \sum_{i=2}^n i(2x_i^2 - x_{i-1})^2
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-10, 10]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x_i) = 0` for
:math:`x_i = 2^{- \frac{(2^i - 2)}{2^i}}` for :math:`i = 1, ..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO: Gavana code not correct. i array should start from 2.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.custom_bounds = [(-2, 3), (-2, 3)]
self.global_optimum = [[2.0 ** (-(2.0 ** i - 2.0) / 2.0 ** i)
for i in range(1, self.N + 1)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
i = arange(2, self.N + 1)
s = i * (2.0 * x[1:] ** 2.0 - x[:-1]) ** 2.0
return sum(s) + (x[0] - 1.0) ** 2.0
class Dolan(Benchmark):
r"""
Dolan objective function.
This class defines the Dolan [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Dolan}}(x) = \lvert (x_1 + 1.7 x_2)\sin(x_1) - 1.5 x_3
- 0.1 x_4\cos(x_5 + x_5 - x_1) + 0.2 x_5^2 - x_2 - 1 \rvert
with :math:`x_i \in [-100, 100]` for :math:`i = 1, ..., 5`.
*Global optimum*: :math:`f(x_i) = 10^{-5}` for
:math:`x = [8.39045925, 4.81424707, 7.34574133, 68.88246895, 3.85470806]`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
TODO Jamil equation is missing the absolute brackets around the entire
expression.
"""
def __init__(self, dimensions=5):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-100.0] * self.N,
[100.0] * self.N))
self.global_optimum = [[-74.10522498, 44.33511286, 6.21069214,
18.42772233, -16.5839403]]
self.fglob = 0
def fun(self, x, *args):
self.nfev += 1
return (abs((x[0] + 1.7 * x[1]) * sin(x[0]) - 1.5 * x[2]
- 0.1 * x[3] * cos(x[3] + x[4] - x[0]) + 0.2 * x[4] ** 2
- x[1] - 1))
class DropWave(Benchmark):
r"""
DropWave objective function.
This class defines the DropWave [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{DropWave}}(x) = - \frac{1 + \cos\left(12 \sqrt{\sum_{i=1}^{n}
x_i^{2}}\right)}{2 + 0.5 \sum_{i=1}^{n} x_i^{2}}
with :math:`x_i \in [-5.12, 5.12]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = -1` for :math:`x = [0, 0]`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-5.12] * self.N, [5.12] * self.N))
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = -1.0
def fun(self, x, *args):
self.nfev += 1
norm_x = sum(x ** 2)
return -(1 + cos(12 * sqrt(norm_x))) / (0.5 * norm_x + 2)
| bsd-3-clause | 2fdb4e90e01892bc83655ec2c90632d7 | 30.408451 | 95 | 0.539686 | 2.840312 | false | false | false | false |
scipy/scipy | scipy/stats/_kde.py | 8 | 24412 | #-------------------------------------------------------------------------------
#
# Define classes for (uni/multi)-variate kernel density estimation.
#
# Currently, only Gaussian kernels are implemented.
#
# Written by: Robert Kern
#
# Date: 2004-08-09
#
# Modified: 2005-02-10 by Robert Kern.
# Contributed to SciPy
# 2005-10-07 by Robert Kern.
# Some fixes to match the new scipy_core
#
# Copyright 2004-2005 by Enthought, Inc.
#
#-------------------------------------------------------------------------------
# Standard library imports.
import warnings
# SciPy imports.
from scipy import linalg, special
from scipy._lib._util import check_random_state
from numpy import (asarray, atleast_2d, reshape, zeros, newaxis, exp, pi,
sqrt, ravel, power, atleast_1d, squeeze, sum, transpose,
ones, cov)
import numpy as np
# Local imports.
from . import _mvn
from ._stats import gaussian_kernel_estimate, gaussian_kernel_estimate_log
__all__ = ['gaussian_kde']
class gaussian_kde:
"""Representation of a kernel-density estimate using Gaussian kernels.
Kernel density estimation is a way to estimate the probability density
function (PDF) of a random variable in a non-parametric way.
`gaussian_kde` works for both uni-variate and multi-variate data. It
includes automatic bandwidth determination. The estimation works best for
a unimodal distribution; bimodal or multi-modal distributions tend to be
oversmoothed.
Parameters
----------
dataset : array_like
Datapoints to estimate from. In case of univariate data this is a 1-D
array, otherwise a 2-D array with shape (# of dims, # of data).
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a scalar,
this will be used directly as `kde.factor`. If a callable, it should
take a `gaussian_kde` instance as only parameter and return a scalar.
If None (default), 'scott' is used. See Notes for more details.
weights : array_like, optional
weights of datapoints. This must be the same shape as dataset.
If None (default), the samples are assumed to be equally weighted
Attributes
----------
dataset : ndarray
The dataset with which `gaussian_kde` was initialized.
d : int
Number of dimensions.
n : int
Number of datapoints.
neff : int
Effective number of datapoints.
.. versionadded:: 1.2.0
factor : float
The bandwidth factor, obtained from `kde.covariance_factor`. The square
of `kde.factor` multiplies the covariance matrix of the data in the kde
estimation.
covariance : ndarray
The covariance matrix of `dataset`, scaled by the calculated bandwidth
(`kde.factor`).
inv_cov : ndarray
The inverse of `covariance`.
Methods
-------
evaluate
__call__
integrate_gaussian
integrate_box_1d
integrate_box
integrate_kde
pdf
logpdf
resample
set_bandwidth
covariance_factor
Notes
-----
Bandwidth selection strongly influences the estimate obtained from the KDE
(much more so than the actual shape of the kernel). Bandwidth selection
can be done by a "rule of thumb", by cross-validation, by "plug-in
methods" or by other means; see [3]_, [4]_ for reviews. `gaussian_kde`
uses a rule of thumb, the default is Scott's Rule.
Scott's Rule [1]_, implemented as `scotts_factor`, is::
n**(-1./(d+4)),
with ``n`` the number of data points and ``d`` the number of dimensions.
In the case of unequally weighted points, `scotts_factor` becomes::
neff**(-1./(d+4)),
with ``neff`` the effective number of datapoints.
Silverman's Rule [2]_, implemented as `silverman_factor`, is::
(n * (d + 2) / 4.)**(-1. / (d + 4)).
or in the case of unequally weighted points::
(neff * (d + 2) / 4.)**(-1. / (d + 4)).
Good general descriptions of kernel density estimation can be found in [1]_
and [2]_, the mathematics for this multi-dimensional implementation can be
found in [1]_.
With a set of weighted samples, the effective number of datapoints ``neff``
is defined by::
neff = sum(weights)^2 / sum(weights^2)
as detailed in [5]_.
`gaussian_kde` does not currently support data that lies in a
lower-dimensional subspace of the space in which it is expressed. For such
data, consider performing principle component analysis / dimensionality
reduction and using `gaussian_kde` with the transformed data.
References
----------
.. [1] D.W. Scott, "Multivariate Density Estimation: Theory, Practice, and
Visualization", John Wiley & Sons, New York, Chicester, 1992.
.. [2] B.W. Silverman, "Density Estimation for Statistics and Data
Analysis", Vol. 26, Monographs on Statistics and Applied Probability,
Chapman and Hall, London, 1986.
.. [3] B.A. Turlach, "Bandwidth Selection in Kernel Density Estimation: A
Review", CORE and Institut de Statistique, Vol. 19, pp. 1-33, 1993.
.. [4] D.M. Bashtannyk and R.J. Hyndman, "Bandwidth selection for kernel
conditional density estimation", Computational Statistics & Data
Analysis, Vol. 36, pp. 279-298, 2001.
.. [5] Gray P. G., 1969, Journal of the Royal Statistical Society.
Series A (General), 132, 272
Examples
--------
Generate some random two-dimensional data:
>>> import numpy as np
>>> from scipy import stats
>>> def measure(n):
... "Measurement model, return two coupled measurements."
... m1 = np.random.normal(size=n)
... m2 = np.random.normal(scale=0.5, size=n)
... return m1+m2, m1-m2
>>> m1, m2 = measure(2000)
>>> xmin = m1.min()
>>> xmax = m1.max()
>>> ymin = m2.min()
>>> ymax = m2.max()
Perform a kernel density estimate on the data:
>>> X, Y = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
>>> positions = np.vstack([X.ravel(), Y.ravel()])
>>> values = np.vstack([m1, m2])
>>> kernel = stats.gaussian_kde(values)
>>> Z = np.reshape(kernel(positions).T, X.shape)
Plot the results:
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> ax.imshow(np.rot90(Z), cmap=plt.cm.gist_earth_r,
... extent=[xmin, xmax, ymin, ymax])
>>> ax.plot(m1, m2, 'k.', markersize=2)
>>> ax.set_xlim([xmin, xmax])
>>> ax.set_ylim([ymin, ymax])
>>> plt.show()
"""
def __init__(self, dataset, bw_method=None, weights=None):
self.dataset = atleast_2d(asarray(dataset))
if not self.dataset.size > 1:
raise ValueError("`dataset` input should have multiple elements.")
self.d, self.n = self.dataset.shape
if weights is not None:
self._weights = atleast_1d(weights).astype(float)
self._weights /= sum(self._weights)
if self.weights.ndim != 1:
raise ValueError("`weights` input should be one-dimensional.")
if len(self._weights) != self.n:
raise ValueError("`weights` input should be of length n")
self._neff = 1/sum(self._weights**2)
try:
self.set_bandwidth(bw_method=bw_method)
except linalg.LinAlgError as e:
msg = ("The data appears to lie in a lower-dimensional subspace "
"of the space in which it is expressed. This has resulted "
"in a singular data covariance matrix, which cannot be "
"treated using the algorithms implemented in "
"`gaussian_kde`. Consider performing principle component "
"analysis / dimensionality reduction and using "
"`gaussian_kde` with the transformed data.")
raise linalg.LinAlgError(msg) from e
def evaluate(self, points):
"""Evaluate the estimated pdf on a set of points.
Parameters
----------
points : (# of dimensions, # of points)-array
Alternatively, a (# of dimensions,) vector can be passed in and
treated as a single point.
Returns
-------
values : (# of points,)-array
The values at each point.
Raises
------
ValueError : if the dimensionality of the input points is different than
the dimensionality of the KDE.
"""
points = atleast_2d(asarray(points))
d, m = points.shape
if d != self.d:
if d == 1 and m == self.d:
# points was passed in as a row vector
points = reshape(points, (self.d, 1))
m = 1
else:
msg = "points have dimension %s, dataset has dimension %s" % (d,
self.d)
raise ValueError(msg)
output_dtype, spec = _get_output_dtype(self.covariance, points)
result = gaussian_kernel_estimate[spec](
self.dataset.T, self.weights[:, None],
points.T, self.cho_cov, output_dtype)
return result[:, 0]
__call__ = evaluate
def integrate_gaussian(self, mean, cov):
"""
Multiply estimated density by a multivariate Gaussian and integrate
over the whole space.
Parameters
----------
mean : aray_like
A 1-D array, specifying the mean of the Gaussian.
cov : array_like
A 2-D array, specifying the covariance matrix of the Gaussian.
Returns
-------
result : scalar
The value of the integral.
Raises
------
ValueError
If the mean or covariance of the input Gaussian differs from
the KDE's dimensionality.
"""
mean = atleast_1d(squeeze(mean))
cov = atleast_2d(cov)
if mean.shape != (self.d,):
raise ValueError("mean does not have dimension %s" % self.d)
if cov.shape != (self.d, self.d):
raise ValueError("covariance does not have dimension %s" % self.d)
# make mean a column vector
mean = mean[:, newaxis]
sum_cov = self.covariance + cov
# This will raise LinAlgError if the new cov matrix is not s.p.d
# cho_factor returns (ndarray, bool) where bool is a flag for whether
# or not ndarray is upper or lower triangular
sum_cov_chol = linalg.cho_factor(sum_cov)
diff = self.dataset - mean
tdiff = linalg.cho_solve(sum_cov_chol, diff)
sqrt_det = np.prod(np.diagonal(sum_cov_chol[0]))
norm_const = power(2 * pi, sum_cov.shape[0] / 2.0) * sqrt_det
energies = sum(diff * tdiff, axis=0) / 2.0
result = sum(exp(-energies)*self.weights, axis=0) / norm_const
return result
def integrate_box_1d(self, low, high):
"""
Computes the integral of a 1D pdf between two bounds.
Parameters
----------
low : scalar
Lower bound of integration.
high : scalar
Upper bound of integration.
Returns
-------
value : scalar
The result of the integral.
Raises
------
ValueError
If the KDE is over more than one dimension.
"""
if self.d != 1:
raise ValueError("integrate_box_1d() only handles 1D pdfs")
stdev = ravel(sqrt(self.covariance))[0]
normalized_low = ravel((low - self.dataset) / stdev)
normalized_high = ravel((high - self.dataset) / stdev)
value = np.sum(self.weights*(
special.ndtr(normalized_high) -
special.ndtr(normalized_low)))
return value
def integrate_box(self, low_bounds, high_bounds, maxpts=None):
"""Computes the integral of a pdf over a rectangular interval.
Parameters
----------
low_bounds : array_like
A 1-D array containing the lower bounds of integration.
high_bounds : array_like
A 1-D array containing the upper bounds of integration.
maxpts : int, optional
The maximum number of points to use for integration.
Returns
-------
value : scalar
The result of the integral.
"""
if maxpts is not None:
extra_kwds = {'maxpts': maxpts}
else:
extra_kwds = {}
value, inform = _mvn.mvnun_weighted(low_bounds, high_bounds,
self.dataset, self.weights,
self.covariance, **extra_kwds)
if inform:
msg = ('An integral in _mvn.mvnun requires more points than %s' %
(self.d * 1000))
warnings.warn(msg)
return value
def integrate_kde(self, other):
"""
Computes the integral of the product of this kernel density estimate
with another.
Parameters
----------
other : gaussian_kde instance
The other kde.
Returns
-------
value : scalar
The result of the integral.
Raises
------
ValueError
If the KDEs have different dimensionality.
"""
if other.d != self.d:
raise ValueError("KDEs are not the same dimensionality")
# we want to iterate over the smallest number of points
if other.n < self.n:
small = other
large = self
else:
small = self
large = other
sum_cov = small.covariance + large.covariance
sum_cov_chol = linalg.cho_factor(sum_cov)
result = 0.0
for i in range(small.n):
mean = small.dataset[:, i, newaxis]
diff = large.dataset - mean
tdiff = linalg.cho_solve(sum_cov_chol, diff)
energies = sum(diff * tdiff, axis=0) / 2.0
result += sum(exp(-energies)*large.weights, axis=0)*small.weights[i]
sqrt_det = np.prod(np.diagonal(sum_cov_chol[0]))
norm_const = power(2 * pi, sum_cov.shape[0] / 2.0) * sqrt_det
result /= norm_const
return result
def resample(self, size=None, seed=None):
"""Randomly sample a dataset from the estimated pdf.
Parameters
----------
size : int, optional
The number of samples to draw. If not provided, then the size is
the same as the effective number of samples in the underlying
dataset.
seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
Returns
-------
resample : (self.d, `size`) ndarray
The sampled dataset.
"""
if size is None:
size = int(self.neff)
random_state = check_random_state(seed)
norm = transpose(random_state.multivariate_normal(
zeros((self.d,), float), self.covariance, size=size
))
indices = random_state.choice(self.n, size=size, p=self.weights)
means = self.dataset[:, indices]
return means + norm
def scotts_factor(self):
"""Compute Scott's factor.
Returns
-------
s : float
Scott's factor.
"""
return power(self.neff, -1./(self.d+4))
def silverman_factor(self):
"""Compute the Silverman factor.
Returns
-------
s : float
The silverman factor.
"""
return power(self.neff*(self.d+2.0)/4.0, -1./(self.d+4))
# Default method to calculate bandwidth, can be overwritten by subclass
covariance_factor = scotts_factor
covariance_factor.__doc__ = """Computes the coefficient (`kde.factor`) that
multiplies the data covariance matrix to obtain the kernel covariance
matrix. The default is `scotts_factor`. A subclass can overwrite this
method to provide a different method, or set it through a call to
`kde.set_bandwidth`."""
def set_bandwidth(self, bw_method=None):
"""Compute the estimator bandwidth with given method.
The new bandwidth calculated after a call to `set_bandwidth` is used
for subsequent evaluations of the estimated density.
Parameters
----------
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a
scalar, this will be used directly as `kde.factor`. If a callable,
it should take a `gaussian_kde` instance as only parameter and
return a scalar. If None (default), nothing happens; the current
`kde.covariance_factor` method is kept.
Notes
-----
.. versionadded:: 0.11
Examples
--------
>>> import numpy as np
>>> import scipy.stats as stats
>>> x1 = np.array([-7, -5, 1, 4, 5.])
>>> kde = stats.gaussian_kde(x1)
>>> xs = np.linspace(-10, 10, num=50)
>>> y1 = kde(xs)
>>> kde.set_bandwidth(bw_method='silverman')
>>> y2 = kde(xs)
>>> kde.set_bandwidth(bw_method=kde.factor / 3.)
>>> y3 = kde(xs)
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> ax.plot(x1, np.full(x1.shape, 1 / (4. * x1.size)), 'bo',
... label='Data points (rescaled)')
>>> ax.plot(xs, y1, label='Scott (default)')
>>> ax.plot(xs, y2, label='Silverman')
>>> ax.plot(xs, y3, label='Const (1/3 * Silverman)')
>>> ax.legend()
>>> plt.show()
"""
if bw_method is None:
pass
elif bw_method == 'scott':
self.covariance_factor = self.scotts_factor
elif bw_method == 'silverman':
self.covariance_factor = self.silverman_factor
elif np.isscalar(bw_method) and not isinstance(bw_method, str):
self._bw_method = 'use constant'
self.covariance_factor = lambda: bw_method
elif callable(bw_method):
self._bw_method = bw_method
self.covariance_factor = lambda: self._bw_method(self)
else:
msg = "`bw_method` should be 'scott', 'silverman', a scalar " \
"or a callable."
raise ValueError(msg)
self._compute_covariance()
def _compute_covariance(self):
"""Computes the covariance matrix for each Gaussian kernel using
covariance_factor().
"""
self.factor = self.covariance_factor()
# Cache covariance and Cholesky decomp of covariance
if not hasattr(self, '_data_cho_cov'):
self._data_covariance = atleast_2d(cov(self.dataset, rowvar=1,
bias=False,
aweights=self.weights))
self._data_cho_cov = linalg.cholesky(self._data_covariance,
lower=True)
self.covariance = self._data_covariance * self.factor**2
self.cho_cov = (self._data_cho_cov * self.factor).astype(np.float64)
self.log_det = 2*np.log(np.diag(self.cho_cov
* np.sqrt(2*pi))).sum()
@property
def inv_cov(self):
# Re-compute from scratch each time because I'm not sure how this is
# used in the wild. (Perhaps users change the `dataset`, since it's
# not a private attribute?) `_compute_covariance` used to recalculate
# all these, so we'll recalculate everything now that this is a
# a property.
self.factor = self.covariance_factor()
self._data_covariance = atleast_2d(cov(self.dataset, rowvar=1,
bias=False, aweights=self.weights))
return linalg.inv(self._data_covariance) / self.factor**2
def pdf(self, x):
"""
Evaluate the estimated pdf on a provided set of points.
Notes
-----
This is an alias for `gaussian_kde.evaluate`. See the ``evaluate``
docstring for more details.
"""
return self.evaluate(x)
def logpdf(self, x):
"""
Evaluate the log of the estimated pdf on a provided set of points.
"""
points = atleast_2d(x)
d, m = points.shape
if d != self.d:
if d == 1 and m == self.d:
# points was passed in as a row vector
points = reshape(points, (self.d, 1))
m = 1
else:
msg = (f"points have dimension {d}, "
f"dataset has dimension {self.d}")
raise ValueError(msg)
output_dtype, spec = _get_output_dtype(self.covariance, points)
result = gaussian_kernel_estimate_log[spec](
self.dataset.T, self.weights[:, None],
points.T, self.cho_cov, output_dtype)
return result[:, 0]
def marginal(self, dimensions):
"""Return a marginal KDE distribution
Parameters
----------
dimensions : int or 1-d array_like
The dimensions of the multivariate distribution corresponding
with the marginal variables, that is, the indices of the dimensions
that are being retained. The other dimensions are marginalized out.
Returns
-------
marginal_kde : gaussian_kde
An object representing the marginal distribution.
Notes
-----
.. versionadded:: 1.10.0
"""
dims = np.atleast_1d(dimensions)
if not np.issubdtype(dims.dtype, np.integer):
msg = ("Elements of `dimensions` must be integers - the indices "
"of the marginal variables being retained.")
raise ValueError(msg)
n = len(self.dataset) # number of dimensions
original_dims = dims.copy()
dims[dims < 0] = n + dims[dims < 0]
if len(np.unique(dims)) != len(dims):
msg = ("All elements of `dimensions` must be unique.")
raise ValueError(msg)
i_invalid = (dims < 0) | (dims >= n)
if np.any(i_invalid):
msg = (f"Dimensions {original_dims[i_invalid]} are invalid "
f"for a distribution in {n} dimensions.")
raise ValueError(msg)
dataset = self.dataset[dims]
weights = self.weights
return gaussian_kde(dataset, bw_method=self.covariance_factor(),
weights=weights)
@property
def weights(self):
try:
return self._weights
except AttributeError:
self._weights = ones(self.n)/self.n
return self._weights
@property
def neff(self):
try:
return self._neff
except AttributeError:
self._neff = 1/sum(self.weights**2)
return self._neff
def _get_output_dtype(covariance, points):
"""
Calculates the output dtype and the "spec" (=C type name).
This was necessary in order to deal with the fused types in the Cython
routine `gaussian_kernel_estimate`. See gh-10824 for details.
"""
output_dtype = np.common_type(covariance, points)
itemsize = np.dtype(output_dtype).itemsize
if itemsize == 4:
spec = 'float'
elif itemsize == 8:
spec = 'double'
elif itemsize in (12, 16):
spec = 'long double'
else:
raise ValueError(
f"{output_dtype} has unexpected item size: {itemsize}"
)
return output_dtype, spec
| bsd-3-clause | a79965a7b1b7d12290c3fbcf871f590e | 33.142657 | 90 | 0.56677 | 4.153113 | false | false | false | false |
scipy/scipy | scipy/optimize/_milp.py | 1 | 14580 | import warnings
import numpy as np
from scipy.sparse import csc_array, vstack
from ._highs._highs_wrapper import _highs_wrapper # type: ignore[import]
from ._constraints import LinearConstraint, Bounds
from ._optimize import OptimizeResult
from ._linprog_highs import _highs_to_scipy_status_message
def _constraints_to_components(constraints):
"""
Convert sequence of constraints to a single set of components A, b_l, b_u.
`constraints` could be
1. A LinearConstraint
2. A tuple representing a LinearConstraint
3. An invalid object
4. A sequence of composed entirely of objects of type 1/2
5. A sequence containing at least one object of type 3
We want to accept 1, 2, and 4 and reject 3 and 5.
"""
message = ("`constraints` (or each element within `constraints`) must be "
"convertible into an instance of "
"`scipy.optimize.LinearConstraint`.")
As = []
b_ls = []
b_us = []
# Accept case 1 by standardizing as case 4
if isinstance(constraints, LinearConstraint):
constraints = [constraints]
else:
# Reject case 3
try:
iter(constraints)
except TypeError as exc:
raise ValueError(message) from exc
# Accept case 2 by standardizing as case 4
if len(constraints) == 3:
# argument could be a single tuple representing a LinearConstraint
try:
constraints = [LinearConstraint(*constraints)]
except (TypeError, ValueError, np.VisibleDeprecationWarning):
# argument was not a tuple representing a LinearConstraint
pass
# Address cases 4/5
for constraint in constraints:
# if it's not a LinearConstraint or something that represents a
# LinearConstraint at this point, it's invalid
if not isinstance(constraint, LinearConstraint):
try:
constraint = LinearConstraint(*constraint)
except TypeError as exc:
raise ValueError(message) from exc
As.append(csc_array(constraint.A))
b_ls.append(np.atleast_1d(constraint.lb).astype(np.double))
b_us.append(np.atleast_1d(constraint.ub).astype(np.double))
if len(As) > 1:
A = vstack(As)
b_l = np.concatenate(b_ls)
b_u = np.concatenate(b_us)
else: # avoid unnecessary copying
A = As[0]
b_l = b_ls[0]
b_u = b_us[0]
return A, b_l, b_u
def _milp_iv(c, integrality, bounds, constraints, options):
# objective IV
c = np.atleast_1d(c).astype(np.double)
if c.ndim != 1 or c.size == 0 or not np.all(np.isfinite(c)):
message = ("`c` must be a one-dimensional array of finite numbers "
"with at least one element.")
raise ValueError(message)
# integrality IV
message = ("`integrality` must contain integers 0-3 and be broadcastable "
"to `c.shape`.")
if integrality is None:
integrality = 0
try:
integrality = np.broadcast_to(integrality, c.shape).astype(np.uint8)
except ValueError:
raise ValueError(message)
if integrality.min() < 0 or integrality.max() > 3:
raise ValueError(message)
# bounds IV
if bounds is None:
bounds = Bounds(0, np.inf)
elif not isinstance(bounds, Bounds):
message = ("`bounds` must be convertible into an instance of "
"`scipy.optimize.Bounds`.")
try:
bounds = Bounds(*bounds)
except TypeError as exc:
raise ValueError(message) from exc
try:
lb = np.broadcast_to(bounds.lb, c.shape).astype(np.double)
ub = np.broadcast_to(bounds.ub, c.shape).astype(np.double)
except (ValueError, TypeError) as exc:
message = ("`bounds.lb` and `bounds.ub` must contain reals and "
"be broadcastable to `c.shape`.")
raise ValueError(message) from exc
# constraints IV
if not constraints:
constraints = [LinearConstraint(np.empty((0, c.size)),
np.empty((0,)), np.empty((0,)))]
try:
A, b_l, b_u = _constraints_to_components(constraints)
except ValueError as exc:
message = ("`constraints` (or each element within `constraints`) must "
"be convertible into an instance of "
"`scipy.optimize.LinearConstraint`.")
raise ValueError(message) from exc
if A.shape != (b_l.size, c.size):
message = "The shape of `A` must be (len(b_l), len(c))."
raise ValueError(message)
indptr, indices, data = A.indptr, A.indices, A.data.astype(np.double)
# options IV
options = options or {}
supported_options = {'disp', 'presolve', 'time_limit', 'node_limit'}
unsupported_options = set(options).difference(supported_options)
if unsupported_options:
message = (f"Unrecognized options detected: {unsupported_options}. "
"These will be passed to HiGHS verbatim.")
warnings.warn(message, RuntimeWarning, stacklevel=3)
options_iv = {'log_to_console': options.pop("disp", False),
'mip_max_nodes': options.pop("node_limit", None)}
options_iv.update(options)
return c, integrality, lb, ub, indptr, indices, data, b_l, b_u, options_iv
def milp(c, *, integrality=None, bounds=None, constraints=None, options=None):
r"""
Mixed-integer linear programming
Solves problems of the following form:
.. math::
\min_x \ & c^T x \\
\mbox{such that} \ & b_l \leq A x \leq b_u,\\
& l \leq x \leq u, \\
& x_i \in \mathbb{Z}, i \in X_i
where :math:`x` is a vector of decision variables;
:math:`c`, :math:`b_l`, :math:`b_u`, :math:`l`, and :math:`u` are vectors;
:math:`A` is a matrix, and :math:`X_i` is the set of indices of
decision variables that must be integral. (In this context, a
variable that can assume only integer values is said to be "integral";
it has an "integrality" constraint.)
Alternatively, that's:
minimize::
c @ x
such that::
b_l <= A @ x <= b_u
l <= x <= u
Specified elements of x must be integers
By default, ``l = 0`` and ``u = np.inf`` unless specified with
``bounds``.
Parameters
----------
c : 1D array_like
The coefficients of the linear objective function to be minimized.
`c` is converted to a double precision array before the problem is
solved.
integrality : 1D array_like, optional
Indicates the type of integrality constraint on each decision variable.
``0`` : Continuous variable; no integrality constraint.
``1`` : Integer variable; decision variable must be an integer
within `bounds`.
``2`` : Semi-continuous variable; decision variable must be within
`bounds` or take value ``0``.
``3`` : Semi-integer variable; decision variable must be an integer
within `bounds` or take value ``0``.
By default, all variables are continuous. `integrality` is converted
to an array of integers before the problem is solved.
bounds : scipy.optimize.Bounds, optional
Bounds on the decision variables. Lower and upper bounds are converted
to double precision arrays before the problem is solved. The
``keep_feasible`` parameter of the `Bounds` object is ignored. If
not specified, all decision variables are constrained to be
non-negative.
constraints : sequence of scipy.optimize.LinearConstraint, optional
Linear constraints of the optimization problem. Arguments may be
one of the following:
1. A single `LinearConstraint` object
2. A single tuple that can be converted to a `LinearConstraint` object
as ``LinearConstraint(*constraints)``
3. A sequence composed entirely of objects of type 1. and 2.
Before the problem is solved, all values are converted to double
precision, and the matrices of constraint coefficients are converted to
instances of `scipy.sparse.csc_array`. The ``keep_feasible`` parameter
of `LinearConstraint` objects is ignored.
options : dict, optional
A dictionary of solver options. The following keys are recognized.
disp : bool (default: ``False``)
Set to ``True`` if indicators of optimization status are to be
printed to the console during optimization.
node_limit : int, optional
The maximum number of nodes (linear program relaxations) to solve
before stopping. Default is no maximum number of nodes.
presolve : bool (default: ``True``)
Presolve attempts to identify trivial infeasibilities,
identify trivial unboundedness, and simplify the problem before
sending it to the main solver.
time_limit : float, optional
The maximum number of seconds allotted to solve the problem.
Default is no time limit.
Returns
-------
res : OptimizeResult
An instance of :class:`scipy.optimize.OptimizeResult`. The object
is guaranteed to have the following attributes.
status : int
An integer representing the exit status of the algorithm.
``0`` : Optimal solution found.
``1`` : Iteration or time limit reached.
``2`` : Problem is infeasible.
``3`` : Problem is unbounded.
``4`` : Other; see message for details.
success : bool
``True`` when an optimal solution is found and ``False`` otherwise.
message : str
A string descriptor of the exit status of the algorithm.
The following attributes will also be present, but the values may be
``None``, depending on the solution status.
x : ndarray
The values of the decision variables that minimize the
objective function while satisfying the constraints.
fun : float
The optimal value of the objective function ``c @ x``.
mip_node_count : int
The number of subproblems or "nodes" solved by the MILP solver.
mip_dual_bound : float
The MILP solver's final estimate of the lower bound on the optimal
solution.
mip_gap : float
The difference between the final objective function value and the
final dual bound.
Notes
-----
`milp` is a wrapper of the HiGHS linear optimization software [1]_. The
algorithm is deterministic, and it typically finds the global optimum of
moderately challenging mixed-integer linear programs (when it exists).
References
----------
.. [1] Huangfu, Q., Galabova, I., Feldmeier, M., and Hall, J. A. J.
"HiGHS - high performance software for linear optimization."
https://highs.dev/
.. [2] Huangfu, Q. and Hall, J. A. J. "Parallelizing the dual revised
simplex method." Mathematical Programming Computation, 10 (1),
119-142, 2018. DOI: 10.1007/s12532-017-0130-5
Examples
--------
Consider the problem at
https://en.wikipedia.org/wiki/Integer_programming#Example, which is
expressed as a maximization problem of two variables. Since `milp` requires
that the problem be expressed as a minimization problem, the objective
function coefficients on the decision variables are:
>>> import numpy as np
>>> c = -np.array([0, 1])
Note the negative sign: we maximize the original objective function
by minimizing the negative of the objective function.
We collect the coefficients of the constraints into arrays like:
>>> A = np.array([[-1, 1], [3, 2], [2, 3]])
>>> b_u = np.array([1, 12, 12])
>>> b_l = np.full_like(b_u, -np.inf)
Because there is no lower limit on these constraints, we have defined a
variable ``b_l`` full of values representing negative infinity. This may
be unfamiliar to users of `scipy.optimize.linprog`, which only accepts
"less than" (or "upper bound") inequality constraints of the form
``A_ub @ x <= b_u``. By accepting both ``b_l`` and ``b_u`` of constraints
``b_l <= A_ub @ x <= b_u``, `milp` makes it easy to specify "greater than"
inequality constraints, "less than" inequality constraints, and equality
constraints concisely.
These arrays are collected into a single `LinearConstraint` object like:
>>> from scipy.optimize import LinearConstraint
>>> constraints = LinearConstraint(A, b_l, b_u)
The non-negativity bounds on the decision variables are enforced by
default, so we do not need to provide an argument for `bounds`.
Finally, the problem states that both decision variables must be integers:
>>> integrality = np.ones_like(c)
We solve the problem like:
>>> from scipy.optimize import milp
>>> res = milp(c=c, constraints=constraints, integrality=integrality)
>>> res.x
[1.0, 2.0]
Note that had we solved the relaxed problem (without integrality
constraints):
>>> res = milp(c=c, constraints=constraints) # OR:
>>> # from scipy.optimize import linprog; res = linprog(c, A, b_u)
>>> res.x
[1.8, 2.8]
we would not have obtained the correct solution by rounding to the nearest
integers.
Other examples are given :ref:`in the tutorial <tutorial-optimize_milp>`.
"""
args_iv = _milp_iv(c, integrality, bounds, constraints, options)
c, integrality, lb, ub, indptr, indices, data, b_l, b_u, options = args_iv
highs_res = _highs_wrapper(c, indptr, indices, data, b_l, b_u,
lb, ub, integrality, options)
res = {}
# Convert to scipy-style status and message
highs_status = highs_res.get('status', None)
highs_message = highs_res.get('message', None)
status, message = _highs_to_scipy_status_message(highs_status,
highs_message)
res['status'] = status
res['message'] = message
res['success'] = (status == 0)
x = highs_res.get('x', None)
res['x'] = np.array(x) if x is not None else None
res['fun'] = highs_res.get('fun', None)
res['mip_node_count'] = highs_res.get('mip_node_count', None)
res['mip_dual_bound'] = highs_res.get('mip_dual_bound', None)
res['mip_gap'] = highs_res.get('mip_gap', None)
return OptimizeResult(res)
| bsd-3-clause | d0ea3f944233ffc311879ad28874a381 | 37.167539 | 79 | 0.627778 | 4.05902 | false | false | false | false |
scipy/scipy | scipy/integrate/_ivp/base.py | 20 | 9550 | import numpy as np
def check_arguments(fun, y0, support_complex):
"""Helper function for checking arguments common to all solvers."""
y0 = np.asarray(y0)
if np.issubdtype(y0.dtype, np.complexfloating):
if not support_complex:
raise ValueError("`y0` is complex, but the chosen solver does "
"not support integration in a complex domain.")
dtype = complex
else:
dtype = float
y0 = y0.astype(dtype, copy=False)
if y0.ndim != 1:
raise ValueError("`y0` must be 1-dimensional.")
def fun_wrapped(t, y):
return np.asarray(fun(t, y), dtype=dtype)
return fun_wrapped, y0
class OdeSolver:
"""Base class for ODE solvers.
In order to implement a new solver you need to follow the guidelines:
1. A constructor must accept parameters presented in the base class
(listed below) along with any other parameters specific to a solver.
2. A constructor must accept arbitrary extraneous arguments
``**extraneous``, but warn that these arguments are irrelevant
using `common.warn_extraneous` function. Do not pass these
arguments to the base class.
3. A solver must implement a private method `_step_impl(self)` which
propagates a solver one step further. It must return tuple
``(success, message)``, where ``success`` is a boolean indicating
whether a step was successful, and ``message`` is a string
containing description of a failure if a step failed or None
otherwise.
4. A solver must implement a private method `_dense_output_impl(self)`,
which returns a `DenseOutput` object covering the last successful
step.
5. A solver must have attributes listed below in Attributes section.
Note that ``t_old`` and ``step_size`` are updated automatically.
6. Use `fun(self, t, y)` method for the system rhs evaluation, this
way the number of function evaluations (`nfev`) will be tracked
automatically.
7. For convenience, a base class provides `fun_single(self, t, y)` and
`fun_vectorized(self, t, y)` for evaluating the rhs in
non-vectorized and vectorized fashions respectively (regardless of
how `fun` from the constructor is implemented). These calls don't
increment `nfev`.
8. If a solver uses a Jacobian matrix and LU decompositions, it should
track the number of Jacobian evaluations (`njev`) and the number of
LU decompositions (`nlu`).
9. By convention, the function evaluations used to compute a finite
difference approximation of the Jacobian should not be counted in
`nfev`, thus use `fun_single(self, t, y)` or
`fun_vectorized(self, t, y)` when computing a finite difference
approximation of the Jacobian.
Parameters
----------
fun : callable
Right-hand side of the system. The calling signature is ``fun(t, y)``.
Here ``t`` is a scalar and there are two options for ndarray ``y``.
It can either have shape (n,), then ``fun`` must return array_like with
shape (n,). Or, alternatively, it can have shape (n, n_points), then
``fun`` must return array_like with shape (n, n_points) (each column
corresponds to a single column in ``y``). The choice between the two
options is determined by `vectorized` argument (see below).
t0 : float
Initial time.
y0 : array_like, shape (n,)
Initial state.
t_bound : float
Boundary time --- the integration won't continue beyond it. It also
determines the direction of the integration.
vectorized : bool
Whether `fun` is implemented in a vectorized fashion.
support_complex : bool, optional
Whether integration in a complex domain should be supported.
Generally determined by a derived solver class capabilities.
Default is False.
Attributes
----------
n : int
Number of equations.
status : string
Current status of the solver: 'running', 'finished' or 'failed'.
t_bound : float
Boundary time.
direction : float
Integration direction: +1 or -1.
t : float
Current time.
y : ndarray
Current state.
t_old : float
Previous time. None if no steps were made yet.
step_size : float
Size of the last successful step. None if no steps were made yet.
nfev : int
Number of the system's rhs evaluations.
njev : int
Number of the Jacobian evaluations.
nlu : int
Number of LU decompositions.
"""
TOO_SMALL_STEP = "Required step size is less than spacing between numbers."
def __init__(self, fun, t0, y0, t_bound, vectorized,
support_complex=False):
self.t_old = None
self.t = t0
self._fun, self.y = check_arguments(fun, y0, support_complex)
self.t_bound = t_bound
self.vectorized = vectorized
if vectorized:
def fun_single(t, y):
return self._fun(t, y[:, None]).ravel()
fun_vectorized = self._fun
else:
fun_single = self._fun
def fun_vectorized(t, y):
f = np.empty_like(y)
for i, yi in enumerate(y.T):
f[:, i] = self._fun(t, yi)
return f
def fun(t, y):
self.nfev += 1
return self.fun_single(t, y)
self.fun = fun
self.fun_single = fun_single
self.fun_vectorized = fun_vectorized
self.direction = np.sign(t_bound - t0) if t_bound != t0 else 1
self.n = self.y.size
self.status = 'running'
self.nfev = 0
self.njev = 0
self.nlu = 0
@property
def step_size(self):
if self.t_old is None:
return None
else:
return np.abs(self.t - self.t_old)
def step(self):
"""Perform one integration step.
Returns
-------
message : string or None
Report from the solver. Typically a reason for a failure if
`self.status` is 'failed' after the step was taken or None
otherwise.
"""
if self.status != 'running':
raise RuntimeError("Attempt to step on a failed or finished "
"solver.")
if self.n == 0 or self.t == self.t_bound:
# Handle corner cases of empty solver or no integration.
self.t_old = self.t
self.t = self.t_bound
message = None
self.status = 'finished'
else:
t = self.t
success, message = self._step_impl()
if not success:
self.status = 'failed'
else:
self.t_old = t
if self.direction * (self.t - self.t_bound) >= 0:
self.status = 'finished'
return message
def dense_output(self):
"""Compute a local interpolant over the last successful step.
Returns
-------
sol : `DenseOutput`
Local interpolant over the last successful step.
"""
if self.t_old is None:
raise RuntimeError("Dense output is available after a successful "
"step was made.")
if self.n == 0 or self.t == self.t_old:
# Handle corner cases of empty solver and no integration.
return ConstantDenseOutput(self.t_old, self.t, self.y)
else:
return self._dense_output_impl()
def _step_impl(self):
raise NotImplementedError
def _dense_output_impl(self):
raise NotImplementedError
class DenseOutput:
"""Base class for local interpolant over step made by an ODE solver.
It interpolates between `t_min` and `t_max` (see Attributes below).
Evaluation outside this interval is not forbidden, but the accuracy is not
guaranteed.
Attributes
----------
t_min, t_max : float
Time range of the interpolation.
"""
def __init__(self, t_old, t):
self.t_old = t_old
self.t = t
self.t_min = min(t, t_old)
self.t_max = max(t, t_old)
def __call__(self, t):
"""Evaluate the interpolant.
Parameters
----------
t : float or array_like with shape (n_points,)
Points to evaluate the solution at.
Returns
-------
y : ndarray, shape (n,) or (n, n_points)
Computed values. Shape depends on whether `t` was a scalar or a
1-D array.
"""
t = np.asarray(t)
if t.ndim > 1:
raise ValueError("`t` must be a float or a 1-D array.")
return self._call_impl(t)
def _call_impl(self, t):
raise NotImplementedError
class ConstantDenseOutput(DenseOutput):
"""Constant value interpolator.
This class used for degenerate integration cases: equal integration limits
or a system with 0 equations.
"""
def __init__(self, t_old, t, value):
super().__init__(t_old, t)
self.value = value
def _call_impl(self, t):
if t.ndim == 0:
return self.value
else:
ret = np.empty((self.value.shape[0], t.shape[0]))
ret[:] = self.value[:, None]
return ret
| bsd-3-clause | 07a887cf5b40905406b8a9911edd07d7 | 33.854015 | 79 | 0.579372 | 4.26149 | false | false | false | false |
scipy/scipy | scipy/sparse/tests/test_sparsetools.py | 8 | 10441 | import sys
import os
import gc
import threading
import numpy as np
from numpy.testing import assert_equal, assert_, assert_allclose
from scipy.sparse import (_sparsetools, coo_matrix, csr_matrix, csc_matrix,
bsr_matrix, dia_matrix)
from scipy.sparse._sputils import supported_dtypes
from scipy._lib._testutils import check_free_memory
import pytest
from pytest import raises as assert_raises
def int_to_int8(n):
"""
Wrap an integer to the interval [-128, 127].
"""
return (n + 128) % 256 - 128
def test_exception():
assert_raises(MemoryError, _sparsetools.test_throw_error)
def test_threads():
# Smoke test for parallel threaded execution; doesn't actually
# check that code runs in parallel, but just that it produces
# expected results.
nthreads = 10
niter = 100
n = 20
a = csr_matrix(np.ones([n, n]))
bres = []
class Worker(threading.Thread):
def run(self):
b = a.copy()
for j in range(niter):
_sparsetools.csr_plus_csr(n, n,
a.indptr, a.indices, a.data,
a.indptr, a.indices, a.data,
b.indptr, b.indices, b.data)
bres.append(b)
threads = [Worker() for _ in range(nthreads)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
for b in bres:
assert_(np.all(b.toarray() == 2))
def test_regression_std_vector_dtypes():
# Regression test for gh-3780, checking the std::vector typemaps
# in sparsetools.cxx are complete.
for dtype in supported_dtypes:
ad = np.array([[1, 2], [3, 4]]).astype(dtype)
a = csr_matrix(ad, dtype=dtype)
# getcol is one function using std::vector typemaps, and should not fail
assert_equal(a.getcol(0).toarray(), ad[:, :1])
@pytest.mark.slow
@pytest.mark.xfail_on_32bit("Can't create large array for test")
def test_nnz_overflow():
# Regression test for gh-7230 / gh-7871, checking that coo_toarray
# with nnz > int32max doesn't overflow.
nnz = np.iinfo(np.int32).max + 1
# Ensure ~20 GB of RAM is free to run this test.
check_free_memory((4 + 4 + 1) * nnz / 1e6 + 0.5)
# Use nnz duplicate entries to keep the dense version small.
row = np.zeros(nnz, dtype=np.int32)
col = np.zeros(nnz, dtype=np.int32)
data = np.zeros(nnz, dtype=np.int8)
data[-1] = 4
s = coo_matrix((data, (row, col)), shape=(1, 1), copy=False)
# Sums nnz duplicates to produce a 1x1 array containing 4.
d = s.toarray()
assert_allclose(d, [[4]])
@pytest.mark.skipif(not (sys.platform.startswith('linux') and np.dtype(np.intp).itemsize >= 8),
reason="test requires 64-bit Linux")
class TestInt32Overflow:
"""
Some of the sparsetools routines use dense 2D matrices whose
total size is not bounded by the nnz of the sparse matrix. These
routines used to suffer from int32 wraparounds; here, we try to
check that the wraparounds don't occur any more.
"""
# choose n large enough
n = 50000
def setup_method(self):
assert self.n**2 > np.iinfo(np.int32).max
# check there's enough memory even if everything is run at the
# same time
try:
parallel_count = int(os.environ.get('PYTEST_XDIST_WORKER_COUNT', '1'))
except ValueError:
parallel_count = np.inf
check_free_memory(3000 * parallel_count)
def teardown_method(self):
gc.collect()
def test_coo_todense(self):
# Check *_todense routines (cf. gh-2179)
#
# All of them in the end call coo_matrix.todense
n = self.n
i = np.array([0, n-1])
j = np.array([0, n-1])
data = np.array([1, 2], dtype=np.int8)
m = coo_matrix((data, (i, j)))
r = m.todense()
assert_equal(r[0,0], 1)
assert_equal(r[-1,-1], 2)
del r
gc.collect()
@pytest.mark.slow
def test_matvecs(self):
# Check *_matvecs routines
n = self.n
i = np.array([0, n-1])
j = np.array([0, n-1])
data = np.array([1, 2], dtype=np.int8)
m = coo_matrix((data, (i, j)))
b = np.ones((n, n), dtype=np.int8)
for sptype in (csr_matrix, csc_matrix, bsr_matrix):
m2 = sptype(m)
r = m2.dot(b)
assert_equal(r[0,0], 1)
assert_equal(r[-1,-1], 2)
del r
gc.collect()
del b
gc.collect()
@pytest.mark.slow
def test_dia_matvec(self):
# Check: huge dia_matrix _matvec
n = self.n
data = np.ones((n, n), dtype=np.int8)
offsets = np.arange(n)
m = dia_matrix((data, offsets), shape=(n, n))
v = np.ones(m.shape[1], dtype=np.int8)
r = m.dot(v)
assert_equal(r[0], int_to_int8(n))
del data, offsets, m, v, r
gc.collect()
_bsr_ops = [pytest.param("matmat", marks=pytest.mark.xslow),
pytest.param("matvecs", marks=pytest.mark.xslow),
"matvec",
"diagonal",
"sort_indices",
pytest.param("transpose", marks=pytest.mark.xslow)]
@pytest.mark.slow
@pytest.mark.parametrize("op", _bsr_ops)
def test_bsr_1_block(self, op):
# Check: huge bsr_matrix (1-block)
#
# The point here is that indices inside a block may overflow.
def get_matrix():
n = self.n
data = np.ones((1, n, n), dtype=np.int8)
indptr = np.array([0, 1], dtype=np.int32)
indices = np.array([0], dtype=np.int32)
m = bsr_matrix((data, indices, indptr), blocksize=(n, n), copy=False)
del data, indptr, indices
return m
gc.collect()
try:
getattr(self, "_check_bsr_" + op)(get_matrix)
finally:
gc.collect()
@pytest.mark.slow
@pytest.mark.parametrize("op", _bsr_ops)
def test_bsr_n_block(self, op):
# Check: huge bsr_matrix (n-block)
#
# The point here is that while indices within a block don't
# overflow, accumulators across many block may.
def get_matrix():
n = self.n
data = np.ones((n, n, 1), dtype=np.int8)
indptr = np.array([0, n], dtype=np.int32)
indices = np.arange(n, dtype=np.int32)
m = bsr_matrix((data, indices, indptr), blocksize=(n, 1), copy=False)
del data, indptr, indices
return m
gc.collect()
try:
getattr(self, "_check_bsr_" + op)(get_matrix)
finally:
gc.collect()
def _check_bsr_matvecs(self, m):
m = m()
n = self.n
# _matvecs
r = m.dot(np.ones((n, 2), dtype=np.int8))
assert_equal(r[0, 0], int_to_int8(n))
def _check_bsr_matvec(self, m):
m = m()
n = self.n
# _matvec
r = m.dot(np.ones((n,), dtype=np.int8))
assert_equal(r[0], int_to_int8(n))
def _check_bsr_diagonal(self, m):
m = m()
n = self.n
# _diagonal
r = m.diagonal()
assert_equal(r, np.ones(n))
def _check_bsr_sort_indices(self, m):
# _sort_indices
m = m()
m.sort_indices()
def _check_bsr_transpose(self, m):
# _transpose
m = m()
m.transpose()
def _check_bsr_matmat(self, m):
m = m()
n = self.n
# _bsr_matmat
m2 = bsr_matrix(np.ones((n, 2), dtype=np.int8), blocksize=(m.blocksize[1], 2))
m.dot(m2) # shouldn't SIGSEGV
del m2
# _bsr_matmat
m2 = bsr_matrix(np.ones((2, n), dtype=np.int8), blocksize=(2, m.blocksize[0]))
m2.dot(m) # shouldn't SIGSEGV
@pytest.mark.skip(reason="64-bit indices in sparse matrices not available")
def test_csr_matmat_int64_overflow():
n = 3037000500
assert n**2 > np.iinfo(np.int64).max
# the test would take crazy amounts of memory
check_free_memory(n * (8*2 + 1) * 3 / 1e6)
# int64 overflow
data = np.ones((n,), dtype=np.int8)
indptr = np.arange(n+1, dtype=np.int64)
indices = np.zeros(n, dtype=np.int64)
a = csr_matrix((data, indices, indptr))
b = a.T
assert_raises(RuntimeError, a.dot, b)
def test_upcast():
a0 = csr_matrix([[np.pi, np.pi*1j], [3, 4]], dtype=complex)
b0 = np.array([256+1j, 2**32], dtype=complex)
for a_dtype in supported_dtypes:
for b_dtype in supported_dtypes:
msg = "(%r, %r)" % (a_dtype, b_dtype)
if np.issubdtype(a_dtype, np.complexfloating):
a = a0.copy().astype(a_dtype)
else:
a = a0.real.copy().astype(a_dtype)
if np.issubdtype(b_dtype, np.complexfloating):
b = b0.copy().astype(b_dtype)
else:
with np.errstate(invalid="ignore"):
# Casting a large value (2**32) to int8 causes a warning in
# numpy >1.23
b = b0.real.copy().astype(b_dtype)
if not (a_dtype == np.bool_ and b_dtype == np.bool_):
c = np.zeros((2,), dtype=np.bool_)
assert_raises(ValueError, _sparsetools.csr_matvec,
2, 2, a.indptr, a.indices, a.data, b, c)
if ((np.issubdtype(a_dtype, np.complexfloating) and
not np.issubdtype(b_dtype, np.complexfloating)) or
(not np.issubdtype(a_dtype, np.complexfloating) and
np.issubdtype(b_dtype, np.complexfloating))):
c = np.zeros((2,), dtype=np.float64)
assert_raises(ValueError, _sparsetools.csr_matvec,
2, 2, a.indptr, a.indices, a.data, b, c)
c = np.zeros((2,), dtype=np.result_type(a_dtype, b_dtype))
_sparsetools.csr_matvec(2, 2, a.indptr, a.indices, a.data, b, c)
assert_allclose(c, np.dot(a.toarray(), b), err_msg=msg)
def test_endianness():
d = np.ones((3,4))
offsets = [-1,0,1]
a = dia_matrix((d.astype('<f8'), offsets), (4, 4))
b = dia_matrix((d.astype('>f8'), offsets), (4, 4))
v = np.arange(4)
assert_allclose(a.dot(v), [1, 3, 6, 5])
assert_allclose(b.dot(v), [1, 3, 6, 5])
| bsd-3-clause | cfde609624509b5ea208d26ad9fad661 | 29.982196 | 95 | 0.544488 | 3.272015 | false | true | false | false |
scipy/scipy | scipy/stats/_warnings_errors.py | 10 | 1195 | # Warnings
class DegenerateDataWarning(RuntimeWarning):
"""Warns when data is degenerate and results may not be reliable."""
def __init__(self, msg=None):
if msg is None:
msg = ("Degenerate data encountered; results may not be reliable.")
self.args = (msg,)
class ConstantInputWarning(DegenerateDataWarning):
"""Warns when all values in data are exactly equal."""
def __init__(self, msg=None):
if msg is None:
msg = ("All values in data are exactly equal; "
"results may not be reliable.")
self.args = (msg,)
class NearConstantInputWarning(DegenerateDataWarning):
"""Warns when all values in data are nearly equal."""
def __init__(self, msg=None):
if msg is None:
msg = ("All values in data are nearly equal; "
"results may not be reliable.")
self.args = (msg,)
# Errors
class FitError(RuntimeError):
"""Represents an error condition when fitting a distribution to data."""
def __init__(self, msg=None):
if msg is None:
msg = ("An error occured when fitting a distribution to data.")
self.args = (msg,)
| bsd-3-clause | fef62d5d8cb4b445979c30623d01a7e3 | 30.447368 | 79 | 0.609205 | 4.298561 | false | false | false | false |
scipy/scipy | scipy/optimize/_shgo_lib/triangulation.py | 10 | 21439 | import numpy as np
import copy
class Complex:
def __init__(self, dim, func, func_args=(), symmetry=False, bounds=None,
g_cons=None, g_args=()):
self.dim = dim
self.bounds = bounds
self.symmetry = symmetry # TODO: Define the functions to be used
# here in init to avoid if checks
self.gen = 0
self.perm_cycle = 0
# Every cell is stored in a list of its generation,
# e.g., the initial cell is stored in self.H[0]
# 1st get new cells are stored in self.H[1] etc.
# When a cell is subgenerated it is removed from this list
self.H = [] # Storage structure of cells
# Cache of all vertices
self.V = VertexCache(func, func_args, bounds, g_cons, g_args)
# Generate n-cube here:
self.n_cube(dim, symmetry=symmetry)
# TODO: Assign functions to a the complex instead
if symmetry:
self.generation_cycle = 1
# self.centroid = self.C0()[-1].x
# self.C0.centroid = self.centroid
else:
self.add_centroid()
self.H.append([])
self.H[0].append(self.C0)
self.hgr = self.C0.homology_group_rank()
self.hgrd = 0 # Complex group rank differential
# self.hgr = self.C0.hg_n
# Build initial graph
self.graph_map()
self.performance = []
self.performance.append(0)
self.performance.append(0)
def __call__(self):
return self.H
def n_cube(self, dim, symmetry=False, printout=False):
"""
Generate the simplicial triangulation of the N-D hypercube
containing 2**n vertices
"""
origin = list(np.zeros(dim, dtype=int))
self.origin = origin
supremum = list(np.ones(dim, dtype=int))
self.supremum = supremum
# tuple versions for indexing
origintuple = tuple(origin)
supremumtuple = tuple(supremum)
x_parents = [origintuple]
if symmetry:
self.C0 = Simplex(0, 0, 0, self.dim) # Initial cell object
self.C0.add_vertex(self.V[origintuple])
i_s = 0
self.perm_symmetry(i_s, x_parents, origin)
self.C0.add_vertex(self.V[supremumtuple])
else:
self.C0 = Cell(0, 0, origin, supremum) # Initial cell object
self.C0.add_vertex(self.V[origintuple])
self.C0.add_vertex(self.V[supremumtuple])
i_parents = []
self.perm(i_parents, x_parents, origin)
if printout:
print("Initial hyper cube:")
for v in self.C0():
v.print_out()
def perm(self, i_parents, x_parents, xi):
# TODO: Cut out of for if outside linear constraint cutting planes
xi_t = tuple(xi)
# Construct required iterator
iter_range = [x for x in range(self.dim) if x not in i_parents]
for i in iter_range:
i2_parents = copy.copy(i_parents)
i2_parents.append(i)
xi2 = copy.copy(xi)
xi2[i] = 1
# Make new vertex list a hashable tuple
xi2_t = tuple(xi2)
# Append to cell
self.C0.add_vertex(self.V[xi2_t])
# Connect neighbors and vice versa
# Parent point
self.V[xi2_t].connect(self.V[xi_t])
# Connect all family of simplices in parent containers
for x_ip in x_parents:
self.V[xi2_t].connect(self.V[x_ip])
x_parents2 = copy.copy(x_parents)
x_parents2.append(xi_t)
# Permutate
self.perm(i2_parents, x_parents2, xi2)
def perm_symmetry(self, i_s, x_parents, xi):
# TODO: Cut out of for if outside linear constraint cutting planes
xi_t = tuple(xi)
xi2 = copy.copy(xi)
xi2[i_s] = 1
# Make new vertex list a hashable tuple
xi2_t = tuple(xi2)
# Append to cell
self.C0.add_vertex(self.V[xi2_t])
# Connect neighbors and vice versa
# Parent point
self.V[xi2_t].connect(self.V[xi_t])
# Connect all family of simplices in parent containers
for x_ip in x_parents:
self.V[xi2_t].connect(self.V[x_ip])
x_parents2 = copy.copy(x_parents)
x_parents2.append(xi_t)
i_s += 1
if i_s == self.dim:
return
# Permutate
self.perm_symmetry(i_s, x_parents2, xi2)
def add_centroid(self):
"""Split the central edge between the origin and supremum of
a cell and add the new vertex to the complex"""
self.centroid = list(
(np.array(self.origin) + np.array(self.supremum)) / 2.0)
self.C0.add_vertex(self.V[tuple(self.centroid)])
self.C0.centroid = self.centroid
# Disconnect origin and supremum
self.V[tuple(self.origin)].disconnect(self.V[tuple(self.supremum)])
# Connect centroid to all other vertices
for v in self.C0():
self.V[tuple(self.centroid)].connect(self.V[tuple(v.x)])
self.centroid_added = True
return
# Construct incidence array:
def incidence(self):
if self.centroid_added:
self.structure = np.zeros([2 ** self.dim + 1, 2 ** self.dim + 1],
dtype=int)
else:
self.structure = np.zeros([2 ** self.dim, 2 ** self.dim],
dtype=int)
for v in self.HC.C0():
for v2 in v.nn:
self.structure[v.index, v2.index] = 1
return
# A more sparse incidence generator:
def graph_map(self):
""" Make a list of size 2**n + 1 where an entry is a vertex
incidence, each list element contains a list of indexes
corresponding to that entries neighbors"""
self.graph = [[v2.index for v2 in v.nn] for v in self.C0()]
# Graph structure method:
# 0. Capture the indices of the initial cell.
# 1. Generate new origin and supremum scalars based on current generation
# 2. Generate a new set of vertices corresponding to a new
# "origin" and "supremum"
# 3. Connected based on the indices of the previous graph structure
# 4. Disconnect the edges in the original cell
def sub_generate_cell(self, C_i, gen):
"""Subgenerate a cell `C_i` of generation `gen` and
homology group rank `hgr`."""
origin_new = tuple(C_i.centroid)
centroid_index = len(C_i()) - 1
# If not gen append
try:
self.H[gen]
except IndexError:
self.H.append([])
# Generate subcubes using every extreme vertex in C_i as a supremum
# and the centroid of C_i as the origin
H_new = [] # list storing all the new cubes split from C_i
for i, v in enumerate(C_i()[:-1]):
supremum = tuple(v.x)
H_new.append(
self.construct_hypercube(origin_new, supremum, gen, C_i.hg_n))
for i, connections in enumerate(self.graph):
# Present vertex V_new[i]; connect to all connections:
if i == centroid_index: # Break out of centroid
break
for j in connections:
C_i()[i].disconnect(C_i()[j])
# Destroy the old cell
if C_i is not self.C0: # Garbage collector does this anyway; not needed
del C_i
# TODO: Recalculate all the homology group ranks of each cell
return H_new
def split_generation(self):
"""
Run sub_generate_cell for every cell in the current complex self.gen
"""
no_splits = False # USED IN SHGO
try:
for c in self.H[self.gen]:
if self.symmetry:
# self.sub_generate_cell_symmetry(c, self.gen + 1)
self.split_simplex_symmetry(c, self.gen + 1)
else:
self.sub_generate_cell(c, self.gen + 1)
except IndexError:
no_splits = True # USED IN SHGO
self.gen += 1
return no_splits # USED IN SHGO
def construct_hypercube(self, origin, supremum, gen, hgr,
printout=False):
"""
Build a hypercube with triangulations symmetric to C0.
Parameters
----------
origin : vec
supremum : vec (tuple)
gen : generation
hgr : parent homology group rank
"""
# Initiate new cell
v_o = np.array(origin)
v_s = np.array(supremum)
C_new = Cell(gen, hgr, origin, supremum)
C_new.centroid = tuple((v_o + v_s) * .5)
# Build new indexed vertex list
V_new = []
for i, v in enumerate(self.C0()[:-1]):
v_x = np.array(v.x)
sub_cell_t1 = v_o - v_o * v_x
sub_cell_t2 = v_s * v_x
vec = sub_cell_t1 + sub_cell_t2
vec = tuple(vec)
C_new.add_vertex(self.V[vec])
V_new.append(vec)
# Add new centroid
C_new.add_vertex(self.V[C_new.centroid])
V_new.append(C_new.centroid)
# Connect new vertices #TODO: Thread into other loop; no need for V_new
for i, connections in enumerate(self.graph):
# Present vertex V_new[i]; connect to all connections:
for j in connections:
self.V[V_new[i]].connect(self.V[V_new[j]])
if printout:
print("A sub hyper cube with:")
print("origin: {}".format(origin))
print("supremum: {}".format(supremum))
for v in C_new():
v.print_out()
# Append the new cell to the to complex
self.H[gen].append(C_new)
return C_new
def split_simplex_symmetry(self, S, gen):
"""
Split a hypersimplex S into two sub simplices by building a hyperplane
which connects to a new vertex on an edge (the longest edge in
dim = {2, 3}) and every other vertex in the simplex that is not
connected to the edge being split.
This function utilizes the knowledge that the problem is specified
with symmetric constraints
The longest edge is tracked by an ordering of the
vertices in every simplices, the edge between first and second
vertex is the longest edge to be split in the next iteration.
"""
# If not gen append
try:
self.H[gen]
except IndexError:
self.H.append([])
# Find new vertex.
# V_new_x = tuple((np.array(C()[0].x) + np.array(C()[1].x)) / 2.0)
s = S()
firstx = s[0].x
lastx = s[-1].x
V_new = self.V[tuple((np.array(firstx) + np.array(lastx)) / 2.0)]
# Disconnect old longest edge
self.V[firstx].disconnect(self.V[lastx])
# Connect new vertices to all other vertices
for v in s[:]:
v.connect(self.V[V_new.x])
# New "lower" simplex
S_new_l = Simplex(gen, S.hg_n, self.generation_cycle,
self.dim)
S_new_l.add_vertex(s[0])
S_new_l.add_vertex(V_new) # Add new vertex
for v in s[1:-1]: # Add all other vertices
S_new_l.add_vertex(v)
# New "upper" simplex
S_new_u = Simplex(gen, S.hg_n, S.generation_cycle, self.dim)
# First vertex on new long edge
S_new_u.add_vertex(s[S_new_u.generation_cycle + 1])
for v in s[1:-1]: # Remaining vertices
S_new_u.add_vertex(v)
for k, v in enumerate(s[1:-1]): # iterate through inner vertices
if k == S.generation_cycle:
S_new_u.add_vertex(V_new)
else:
S_new_u.add_vertex(v)
S_new_u.add_vertex(s[-1]) # Second vertex on new long edge
self.H[gen].append(S_new_l)
self.H[gen].append(S_new_u)
return
# Plots
def plot_complex(self):
"""
Here, C is the LIST of simplexes S in the
2- or 3-D complex
To plot a single simplex S in a set C, use e.g., [C[0]]
"""
from matplotlib import pyplot
if self.dim == 2:
pyplot.figure()
for C in self.H:
for c in C:
for v in c():
if self.bounds is None:
x_a = np.array(v.x, dtype=float)
else:
x_a = np.array(v.x, dtype=float)
for i in range(len(self.bounds)):
x_a[i] = (x_a[i] * (self.bounds[i][1]
- self.bounds[i][0])
+ self.bounds[i][0])
# logging.info('v.x_a = {}'.format(x_a))
pyplot.plot([x_a[0]], [x_a[1]], 'o')
xlines = []
ylines = []
for vn in v.nn:
if self.bounds is None:
xn_a = np.array(vn.x, dtype=float)
else:
xn_a = np.array(vn.x, dtype=float)
for i in range(len(self.bounds)):
xn_a[i] = (xn_a[i] * (self.bounds[i][1]
- self.bounds[i][0])
+ self.bounds[i][0])
# logging.info('vn.x = {}'.format(vn.x))
xlines.append(xn_a[0])
ylines.append(xn_a[1])
xlines.append(x_a[0])
ylines.append(x_a[1])
pyplot.plot(xlines, ylines)
if self.bounds is None:
pyplot.ylim([-1e-2, 1 + 1e-2])
pyplot.xlim([-1e-2, 1 + 1e-2])
else:
pyplot.ylim(
[self.bounds[1][0] - 1e-2, self.bounds[1][1] + 1e-2])
pyplot.xlim(
[self.bounds[0][0] - 1e-2, self.bounds[0][1] + 1e-2])
pyplot.show()
elif self.dim == 3:
fig = pyplot.figure()
ax = fig.add_subplot(111, projection='3d')
for C in self.H:
for c in C:
for v in c():
x = []
y = []
z = []
# logging.info('v.x = {}'.format(v.x))
x.append(v.x[0])
y.append(v.x[1])
z.append(v.x[2])
for vn in v.nn:
x.append(vn.x[0])
y.append(vn.x[1])
z.append(vn.x[2])
x.append(v.x[0])
y.append(v.x[1])
z.append(v.x[2])
# logging.info('vn.x = {}'.format(vn.x))
ax.plot(x, y, z, label='simplex')
pyplot.show()
else:
print("dimension higher than 3 or wrong complex format")
return
class VertexGroup:
def __init__(self, p_gen, p_hgr):
self.p_gen = p_gen # parent generation
self.p_hgr = p_hgr # parent homology group rank
self.hg_n = None
self.hg_d = None
# Maybe add parent homology group rank total history
# This is the sum off all previously split cells
# cumulatively throughout its entire history
self.C = []
def __call__(self):
return self.C
def add_vertex(self, V):
if V not in self.C:
self.C.append(V)
def homology_group_rank(self):
"""
Returns the homology group order of the current cell
"""
if self.hg_n is None:
self.hg_n = sum(1 for v in self.C if v.minimiser())
return self.hg_n
def homology_group_differential(self):
"""
Returns the difference between the current homology group of the
cell and its parent group
"""
if self.hg_d is None:
self.hgd = self.hg_n - self.p_hgr
return self.hgd
def polytopial_sperner_lemma(self):
"""
Returns the number of stationary points theoretically contained in the
cell based information currently known about the cell
"""
pass
def print_out(self):
"""
Print the current cell to console
"""
for v in self():
v.print_out()
class Cell(VertexGroup):
"""
Contains a cell that is symmetric to the initial hypercube triangulation
"""
def __init__(self, p_gen, p_hgr, origin, supremum):
super().__init__(p_gen, p_hgr)
self.origin = origin
self.supremum = supremum
self.centroid = None # (Not always used)
# TODO: self.bounds
class Simplex(VertexGroup):
"""
Contains a simplex that is symmetric to the initial symmetry constrained
hypersimplex triangulation
"""
def __init__(self, p_gen, p_hgr, generation_cycle, dim):
super().__init__(p_gen, p_hgr)
self.generation_cycle = (generation_cycle + 1) % (dim - 1)
class Vertex:
def __init__(self, x, bounds=None, func=None, func_args=(), g_cons=None,
g_cons_args=(), nn=None, index=None):
self.x = x
self.order = sum(x)
x_a = np.array(x, dtype=float)
if bounds is not None:
for i, (lb, ub) in enumerate(bounds):
x_a[i] = x_a[i] * (ub - lb) + lb
# TODO: Make saving the array structure optional
self.x_a = x_a
# Note Vertex is only initiated once for all x so only
# evaluated once
if func is not None:
self.feasible = True
if g_cons is not None:
for g, args in zip(g_cons, g_cons_args):
if g(self.x_a, *args) < 0.0:
self.f = np.inf
self.feasible = False
break
if self.feasible:
self.f = func(x_a, *func_args)
if nn is not None:
self.nn = nn
else:
self.nn = set()
self.fval = None
self.check_min = True
# Index:
if index is not None:
self.index = index
def __hash__(self):
return hash(self.x)
def connect(self, v):
if v is not self and v not in self.nn:
self.nn.add(v)
v.nn.add(self)
if self.minimiser():
v._min = False
v.check_min = False
# TEMPORARY
self.check_min = True
v.check_min = True
def disconnect(self, v):
if v in self.nn:
self.nn.remove(v)
v.nn.remove(self)
self.check_min = True
v.check_min = True
def minimiser(self):
"""Check whether this vertex is strictly less than all its neighbors"""
if self.check_min:
self._min = all(self.f < v.f for v in self.nn)
self.check_min = False
return self._min
def print_out(self):
print("Vertex: {}".format(self.x))
constr = 'Connections: '
for vc in self.nn:
constr += '{} '.format(vc.x)
print(constr)
print('Order = {}'.format(self.order))
class VertexCache:
def __init__(self, func, func_args=(), bounds=None, g_cons=None,
g_cons_args=(), indexed=True):
self.cache = {}
self.func = func
self.g_cons = g_cons
self.g_cons_args = g_cons_args
self.func_args = func_args
self.bounds = bounds
self.nfev = 0
self.size = 0
if indexed:
self.index = -1
def __getitem__(self, x, indexed=True):
try:
return self.cache[x]
except KeyError:
if indexed:
self.index += 1
xval = Vertex(x, bounds=self.bounds,
func=self.func, func_args=self.func_args,
g_cons=self.g_cons,
g_cons_args=self.g_cons_args,
index=self.index)
else:
xval = Vertex(x, bounds=self.bounds,
func=self.func, func_args=self.func_args,
g_cons=self.g_cons,
g_cons_args=self.g_cons_args)
# logging.info("New generated vertex at x = {}".format(x))
# NOTE: Surprisingly high performance increase if logging is commented out
self.cache[x] = xval
# TODO: Check
if self.func is not None:
if self.g_cons is not None:
if xval.feasible:
self.nfev += 1
self.size += 1
else:
self.size += 1
else:
self.nfev += 1
self.size += 1
return self.cache[x]
| bsd-3-clause | 984ad94cff54d69c795adff1ea23eb09 | 31.434191 | 86 | 0.497131 | 3.771152 | false | false | false | false |
scipy/scipy | scipy/io/matlab/_mio5.py | 12 | 33426 | ''' Classes for read / write of matlab (TM) 5 files
The matfile specification last found here:
https://www.mathworks.com/access/helpdesk/help/pdf_doc/matlab/matfile_format.pdf
(as of December 5 2008)
'''
'''
=================================
Note on functions and mat files
=================================
The document above does not give any hints as to the storage of matlab
function handles, or anonymous function handles. I had, therefore, to
guess the format of matlab arrays of ``mxFUNCTION_CLASS`` and
``mxOPAQUE_CLASS`` by looking at example mat files.
``mxFUNCTION_CLASS`` stores all types of matlab functions. It seems to
contain a struct matrix with a set pattern of fields. For anonymous
functions, a sub-fields of one of these fields seems to contain the
well-named ``mxOPAQUE_CLASS``. This seems to contain:
* array flags as for any matlab matrix
* 3 int8 strings
* a matrix
It seems that whenever the mat file contains a ``mxOPAQUE_CLASS``
instance, there is also an un-named matrix (name == '') at the end of
the mat file. I'll call this the ``__function_workspace__`` matrix.
When I saved two anonymous functions in a mat file, or appended another
anonymous function to the mat file, there was still only one
``__function_workspace__`` un-named matrix at the end, but larger than
that for a mat file with a single anonymous function, suggesting that
the workspaces for the two functions had been merged.
The ``__function_workspace__`` matrix appears to be of double class
(``mxCLASS_DOUBLE``), but stored as uint8, the memory for which is in
the format of a mini .mat file, without the first 124 bytes of the file
header (the description and the subsystem_offset), but with the version
U2 bytes, and the S2 endian test bytes. There follow 4 zero bytes,
presumably for 8 byte padding, and then a series of ``miMATRIX``
entries, as in a standard mat file. The ``miMATRIX`` entries appear to
be series of un-named (name == '') matrices, and may also contain arrays
of this same mini-mat format.
I guess that:
* saving an anonymous function back to a mat file will need the
associated ``__function_workspace__`` matrix saved as well for the
anonymous function to work correctly.
* appending to a mat file that has a ``__function_workspace__`` would
involve first pulling off this workspace, appending, checking whether
there were any more anonymous functions appended, and then somehow
merging the relevant workspaces, and saving at the end of the mat
file.
The mat files I was playing with are in ``tests/data``:
* sqr.mat
* parabola.mat
* some_functions.mat
See ``tests/test_mio.py:test_mio_funcs.py`` for the debugging
script I was working with.
'''
# Small fragments of current code adapted from matfile.py by Heiko
# Henkelmann; parts of the code for simplify_cells=True adapted from
# http://blog.nephics.com/2019/08/28/better-loadmat-for-scipy/.
import os
import time
import sys
import zlib
from io import BytesIO
import warnings
import numpy as np
import scipy.sparse
from ._byteordercodes import native_code, swapped_code
from ._miobase import (MatFileReader, docfiller, matdims, read_dtype,
arr_to_chars, arr_dtype_number, MatWriteError,
MatReadError, MatReadWarning)
# Reader object for matlab 5 format variables
from ._mio5_utils import VarReader5
# Constants and helper objects
from ._mio5_params import (MatlabObject, MatlabFunction, MDTYPES, NP_TO_MTYPES,
NP_TO_MXTYPES, miCOMPRESSED, miMATRIX, miINT8,
miUTF8, miUINT32, mxCELL_CLASS, mxSTRUCT_CLASS,
mxOBJECT_CLASS, mxCHAR_CLASS, mxSPARSE_CLASS,
mxDOUBLE_CLASS, mclass_info, mat_struct)
from ._streams import ZlibInputStream
def _has_struct(elem):
"""Determine if elem is an array and if first array item is a struct."""
return (isinstance(elem, np.ndarray) and (elem.size > 0) and
isinstance(elem[0], mat_struct))
def _inspect_cell_array(ndarray):
"""Construct lists from cell arrays (loaded as numpy ndarrays), recursing
into items if they contain mat_struct objects."""
elem_list = []
for sub_elem in ndarray:
if isinstance(sub_elem, mat_struct):
elem_list.append(_matstruct_to_dict(sub_elem))
elif _has_struct(sub_elem):
elem_list.append(_inspect_cell_array(sub_elem))
else:
elem_list.append(sub_elem)
return elem_list
def _matstruct_to_dict(matobj):
"""Construct nested dicts from mat_struct objects."""
d = {}
for f in matobj._fieldnames:
elem = matobj.__dict__[f]
if isinstance(elem, mat_struct):
d[f] = _matstruct_to_dict(elem)
elif _has_struct(elem):
d[f] = _inspect_cell_array(elem)
else:
d[f] = elem
return d
def _simplify_cells(d):
"""Convert mat objects in dict to nested dicts."""
for key in d:
if isinstance(d[key], mat_struct):
d[key] = _matstruct_to_dict(d[key])
elif _has_struct(d[key]):
d[key] = _inspect_cell_array(d[key])
return d
class MatFile5Reader(MatFileReader):
''' Reader for Mat 5 mat files
Adds the following attribute to base class
uint16_codec - char codec to use for uint16 char arrays
(defaults to system default codec)
Uses variable reader that has the following stardard interface (see
abstract class in ``miobase``::
__init__(self, file_reader)
read_header(self)
array_from_header(self)
and added interface::
set_stream(self, stream)
read_full_tag(self)
'''
@docfiller
def __init__(self,
mat_stream,
byte_order=None,
mat_dtype=False,
squeeze_me=False,
chars_as_strings=True,
matlab_compatible=False,
struct_as_record=True,
verify_compressed_data_integrity=True,
uint16_codec=None,
simplify_cells=False):
'''Initializer for matlab 5 file format reader
%(matstream_arg)s
%(load_args)s
%(struct_arg)s
uint16_codec : {None, string}
Set codec to use for uint16 char arrays (e.g., 'utf-8').
Use system default codec if None
'''
super().__init__(
mat_stream,
byte_order,
mat_dtype,
squeeze_me,
chars_as_strings,
matlab_compatible,
struct_as_record,
verify_compressed_data_integrity,
simplify_cells)
# Set uint16 codec
if not uint16_codec:
uint16_codec = sys.getdefaultencoding()
self.uint16_codec = uint16_codec
# placeholders for readers - see initialize_read method
self._file_reader = None
self._matrix_reader = None
def guess_byte_order(self):
''' Guess byte order.
Sets stream pointer to 0'''
self.mat_stream.seek(126)
mi = self.mat_stream.read(2)
self.mat_stream.seek(0)
return mi == b'IM' and '<' or '>'
def read_file_header(self):
''' Read in mat 5 file header '''
hdict = {}
hdr_dtype = MDTYPES[self.byte_order]['dtypes']['file_header']
hdr = read_dtype(self.mat_stream, hdr_dtype)
hdict['__header__'] = hdr['description'].item().strip(b' \t\n\000')
v_major = hdr['version'] >> 8
v_minor = hdr['version'] & 0xFF
hdict['__version__'] = '%d.%d' % (v_major, v_minor)
return hdict
def initialize_read(self):
''' Run when beginning read of variables
Sets up readers from parameters in `self`
'''
# reader for top level stream. We need this extra top-level
# reader because we use the matrix_reader object to contain
# compressed matrices (so they have their own stream)
self._file_reader = VarReader5(self)
# reader for matrix streams
self._matrix_reader = VarReader5(self)
def read_var_header(self):
''' Read header, return header, next position
Header has to define at least .name and .is_global
Parameters
----------
None
Returns
-------
header : object
object that can be passed to self.read_var_array, and that
has attributes .name and .is_global
next_position : int
position in stream of next variable
'''
mdtype, byte_count = self._file_reader.read_full_tag()
if not byte_count > 0:
raise ValueError("Did not read any bytes")
next_pos = self.mat_stream.tell() + byte_count
if mdtype == miCOMPRESSED:
# Make new stream from compressed data
stream = ZlibInputStream(self.mat_stream, byte_count)
self._matrix_reader.set_stream(stream)
check_stream_limit = self.verify_compressed_data_integrity
mdtype, byte_count = self._matrix_reader.read_full_tag()
else:
check_stream_limit = False
self._matrix_reader.set_stream(self.mat_stream)
if not mdtype == miMATRIX:
raise TypeError('Expecting miMATRIX type here, got %d' % mdtype)
header = self._matrix_reader.read_header(check_stream_limit)
return header, next_pos
def read_var_array(self, header, process=True):
''' Read array, given `header`
Parameters
----------
header : header object
object with fields defining variable header
process : {True, False} bool, optional
If True, apply recursive post-processing during loading of
array.
Returns
-------
arr : array
array with post-processing applied or not according to
`process`.
'''
return self._matrix_reader.array_from_header(header, process)
def get_variables(self, variable_names=None):
''' get variables from stream as dictionary
variable_names - optional list of variable names to get
If variable_names is None, then get all variables in file
'''
if isinstance(variable_names, str):
variable_names = [variable_names]
elif variable_names is not None:
variable_names = list(variable_names)
self.mat_stream.seek(0)
# Here we pass all the parameters in self to the reading objects
self.initialize_read()
mdict = self.read_file_header()
mdict['__globals__'] = []
while not self.end_of_stream():
hdr, next_position = self.read_var_header()
name = 'None' if hdr.name is None else hdr.name.decode('latin1')
if name in mdict:
warnings.warn('Duplicate variable name "%s" in stream'
' - replacing previous with new\n'
'Consider mio5.varmats_from_mat to split '
'file into single variable files' % name,
MatReadWarning, stacklevel=2)
if name == '':
# can only be a matlab 7 function workspace
name = '__function_workspace__'
# We want to keep this raw because mat_dtype processing
# will break the format (uint8 as mxDOUBLE_CLASS)
process = False
else:
process = True
if variable_names is not None and name not in variable_names:
self.mat_stream.seek(next_position)
continue
try:
res = self.read_var_array(hdr, process)
except MatReadError as err:
warnings.warn(
'Unreadable variable "%s", because "%s"' %
(name, err),
Warning, stacklevel=2)
res = "Read error: %s" % err
self.mat_stream.seek(next_position)
mdict[name] = res
if hdr.is_global:
mdict['__globals__'].append(name)
if variable_names is not None:
variable_names.remove(name)
if len(variable_names) == 0:
break
if self.simplify_cells:
return _simplify_cells(mdict)
else:
return mdict
def list_variables(self):
''' list variables from stream '''
self.mat_stream.seek(0)
# Here we pass all the parameters in self to the reading objects
self.initialize_read()
self.read_file_header()
vars = []
while not self.end_of_stream():
hdr, next_position = self.read_var_header()
name = 'None' if hdr.name is None else hdr.name.decode('latin1')
if name == '':
# can only be a matlab 7 function workspace
name = '__function_workspace__'
shape = self._matrix_reader.shape_from_header(hdr)
if hdr.is_logical:
info = 'logical'
else:
info = mclass_info.get(hdr.mclass, 'unknown')
vars.append((name, shape, info))
self.mat_stream.seek(next_position)
return vars
def varmats_from_mat(file_obj):
""" Pull variables out of mat 5 file as a sequence of mat file objects
This can be useful with a difficult mat file, containing unreadable
variables. This routine pulls the variables out in raw form and puts them,
unread, back into a file stream for saving or reading. Another use is the
pathological case where there is more than one variable of the same name in
the file; this routine returns the duplicates, whereas the standard reader
will overwrite duplicates in the returned dictionary.
The file pointer in `file_obj` will be undefined. File pointers for the
returned file-like objects are set at 0.
Parameters
----------
file_obj : file-like
file object containing mat file
Returns
-------
named_mats : list
list contains tuples of (name, BytesIO) where BytesIO is a file-like
object containing mat file contents as for a single variable. The
BytesIO contains a string with the original header and a single var. If
``var_file_obj`` is an individual BytesIO instance, then save as a mat
file with something like ``open('test.mat',
'wb').write(var_file_obj.read())``
Examples
--------
>>> import scipy.io
BytesIO is from the ``io`` module in Python 3, and is ``cStringIO`` for
Python < 3.
>>> mat_fileobj = BytesIO()
>>> scipy.io.savemat(mat_fileobj, {'b': np.arange(10), 'a': 'a string'})
>>> varmats = varmats_from_mat(mat_fileobj)
>>> sorted([name for name, str_obj in varmats])
['a', 'b']
"""
rdr = MatFile5Reader(file_obj)
file_obj.seek(0)
# Raw read of top-level file header
hdr_len = MDTYPES[native_code]['dtypes']['file_header'].itemsize
raw_hdr = file_obj.read(hdr_len)
# Initialize variable reading
file_obj.seek(0)
rdr.initialize_read()
rdr.read_file_header()
next_position = file_obj.tell()
named_mats = []
while not rdr.end_of_stream():
start_position = next_position
hdr, next_position = rdr.read_var_header()
name = 'None' if hdr.name is None else hdr.name.decode('latin1')
# Read raw variable string
file_obj.seek(start_position)
byte_count = next_position - start_position
var_str = file_obj.read(byte_count)
# write to stringio object
out_obj = BytesIO()
out_obj.write(raw_hdr)
out_obj.write(var_str)
out_obj.seek(0)
named_mats.append((name, out_obj))
return named_mats
class EmptyStructMarker:
""" Class to indicate presence of empty matlab struct on output """
def to_writeable(source):
''' Convert input object ``source`` to something we can write
Parameters
----------
source : object
Returns
-------
arr : None or ndarray or EmptyStructMarker
If `source` cannot be converted to something we can write to a matfile,
return None. If `source` is equivalent to an empty dictionary, return
``EmptyStructMarker``. Otherwise return `source` converted to an
ndarray with contents for writing to matfile.
'''
if isinstance(source, np.ndarray):
return source
if source is None:
return None
# Objects that implement mappings
is_mapping = (hasattr(source, 'keys') and hasattr(source, 'values') and
hasattr(source, 'items'))
# Objects that don't implement mappings, but do have dicts
if isinstance(source, np.generic):
# NumPy scalars are never mappings (PyPy issue workaround)
pass
elif not is_mapping and hasattr(source, '__dict__'):
source = dict((key, value) for key, value in source.__dict__.items()
if not key.startswith('_'))
is_mapping = True
if is_mapping:
dtype = []
values = []
for field, value in source.items():
if (isinstance(field, str) and
field[0] not in '_0123456789'):
dtype.append((str(field), object))
values.append(value)
if dtype:
return np.array([tuple(values)], dtype)
else:
return EmptyStructMarker
# Next try and convert to an array
narr = np.asanyarray(source)
if narr.dtype.type in (object, np.object_) and \
narr.shape == () and narr == source:
# No interesting conversion possible
return None
return narr
# Native byte ordered dtypes for convenience for writers
NDT_FILE_HDR = MDTYPES[native_code]['dtypes']['file_header']
NDT_TAG_FULL = MDTYPES[native_code]['dtypes']['tag_full']
NDT_TAG_SMALL = MDTYPES[native_code]['dtypes']['tag_smalldata']
NDT_ARRAY_FLAGS = MDTYPES[native_code]['dtypes']['array_flags']
class VarWriter5:
''' Generic matlab matrix writing class '''
mat_tag = np.zeros((), NDT_TAG_FULL)
mat_tag['mdtype'] = miMATRIX
def __init__(self, file_writer):
self.file_stream = file_writer.file_stream
self.unicode_strings = file_writer.unicode_strings
self.long_field_names = file_writer.long_field_names
self.oned_as = file_writer.oned_as
# These are used for top level writes, and unset after
self._var_name = None
self._var_is_global = False
def write_bytes(self, arr):
self.file_stream.write(arr.tobytes(order='F'))
def write_string(self, s):
self.file_stream.write(s)
def write_element(self, arr, mdtype=None):
''' write tag and data '''
if mdtype is None:
mdtype = NP_TO_MTYPES[arr.dtype.str[1:]]
# Array needs to be in native byte order
if arr.dtype.byteorder == swapped_code:
arr = arr.byteswap().newbyteorder()
byte_count = arr.size*arr.itemsize
if byte_count <= 4:
self.write_smalldata_element(arr, mdtype, byte_count)
else:
self.write_regular_element(arr, mdtype, byte_count)
def write_smalldata_element(self, arr, mdtype, byte_count):
# write tag with embedded data
tag = np.zeros((), NDT_TAG_SMALL)
tag['byte_count_mdtype'] = (byte_count << 16) + mdtype
# if arr.tobytes is < 4, the element will be zero-padded as needed.
tag['data'] = arr.tobytes(order='F')
self.write_bytes(tag)
def write_regular_element(self, arr, mdtype, byte_count):
# write tag, data
tag = np.zeros((), NDT_TAG_FULL)
tag['mdtype'] = mdtype
tag['byte_count'] = byte_count
self.write_bytes(tag)
self.write_bytes(arr)
# pad to next 64-bit boundary
bc_mod_8 = byte_count % 8
if bc_mod_8:
self.file_stream.write(b'\x00' * (8-bc_mod_8))
def write_header(self,
shape,
mclass,
is_complex=False,
is_logical=False,
nzmax=0):
''' Write header for given data options
shape : sequence
array shape
mclass - mat5 matrix class
is_complex - True if matrix is complex
is_logical - True if matrix is logical
nzmax - max non zero elements for sparse arrays
We get the name and the global flag from the object, and reset
them to defaults after we've used them
'''
# get name and is_global from one-shot object store
name = self._var_name
is_global = self._var_is_global
# initialize the top-level matrix tag, store position
self._mat_tag_pos = self.file_stream.tell()
self.write_bytes(self.mat_tag)
# write array flags (complex, global, logical, class, nzmax)
af = np.zeros((), NDT_ARRAY_FLAGS)
af['data_type'] = miUINT32
af['byte_count'] = 8
flags = is_complex << 3 | is_global << 2 | is_logical << 1
af['flags_class'] = mclass | flags << 8
af['nzmax'] = nzmax
self.write_bytes(af)
# shape
self.write_element(np.array(shape, dtype='i4'))
# write name
name = np.asarray(name)
if name == '': # empty string zero-terminated
self.write_smalldata_element(name, miINT8, 0)
else:
self.write_element(name, miINT8)
# reset the one-shot store to defaults
self._var_name = ''
self._var_is_global = False
def update_matrix_tag(self, start_pos):
curr_pos = self.file_stream.tell()
self.file_stream.seek(start_pos)
byte_count = curr_pos - start_pos - 8
if byte_count >= 2**32:
raise MatWriteError("Matrix too large to save with Matlab "
"5 format")
self.mat_tag['byte_count'] = byte_count
self.write_bytes(self.mat_tag)
self.file_stream.seek(curr_pos)
def write_top(self, arr, name, is_global):
""" Write variable at top level of mat file
Parameters
----------
arr : array_like
array-like object to create writer for
name : str, optional
name as it will appear in matlab workspace
default is empty string
is_global : {False, True}, optional
whether variable will be global on load into matlab
"""
# these are set before the top-level header write, and unset at
# the end of the same write, because they do not apply for lower levels
self._var_is_global = is_global
self._var_name = name
# write the header and data
self.write(arr)
def write(self, arr):
''' Write `arr` to stream at top and sub levels
Parameters
----------
arr : array_like
array-like object to create writer for
'''
# store position, so we can update the matrix tag
mat_tag_pos = self.file_stream.tell()
# First check if these are sparse
if scipy.sparse.issparse(arr):
self.write_sparse(arr)
self.update_matrix_tag(mat_tag_pos)
return
# Try to convert things that aren't arrays
narr = to_writeable(arr)
if narr is None:
raise TypeError('Could not convert %s (type %s) to array'
% (arr, type(arr)))
if isinstance(narr, MatlabObject):
self.write_object(narr)
elif isinstance(narr, MatlabFunction):
raise MatWriteError('Cannot write matlab functions')
elif narr is EmptyStructMarker: # empty struct array
self.write_empty_struct()
elif narr.dtype.fields: # struct array
self.write_struct(narr)
elif narr.dtype.hasobject: # cell array
self.write_cells(narr)
elif narr.dtype.kind in ('U', 'S'):
if self.unicode_strings:
codec = 'UTF8'
else:
codec = 'ascii'
self.write_char(narr, codec)
else:
self.write_numeric(narr)
self.update_matrix_tag(mat_tag_pos)
def write_numeric(self, arr):
imagf = arr.dtype.kind == 'c'
logif = arr.dtype.kind == 'b'
try:
mclass = NP_TO_MXTYPES[arr.dtype.str[1:]]
except KeyError:
# No matching matlab type, probably complex256 / float128 / float96
# Cast data to complex128 / float64.
if imagf:
arr = arr.astype('c128')
elif logif:
arr = arr.astype('i1') # Should only contain 0/1
else:
arr = arr.astype('f8')
mclass = mxDOUBLE_CLASS
self.write_header(matdims(arr, self.oned_as),
mclass,
is_complex=imagf,
is_logical=logif)
if imagf:
self.write_element(arr.real)
self.write_element(arr.imag)
else:
self.write_element(arr)
def write_char(self, arr, codec='ascii'):
''' Write string array `arr` with given `codec`
'''
if arr.size == 0 or np.all(arr == ''):
# This an empty string array or a string array containing
# only empty strings. Matlab cannot distinguish between a
# string array that is empty, and a string array containing
# only empty strings, because it stores strings as arrays of
# char. There is no way of having an array of char that is
# not empty, but contains an empty string. We have to
# special-case the array-with-empty-strings because even
# empty strings have zero padding, which would otherwise
# appear in matlab as a string with a space.
shape = (0,) * np.max([arr.ndim, 2])
self.write_header(shape, mxCHAR_CLASS)
self.write_smalldata_element(arr, miUTF8, 0)
return
# non-empty string.
#
# Convert to char array
arr = arr_to_chars(arr)
# We have to write the shape directly, because we are going
# recode the characters, and the resulting stream of chars
# may have a different length
shape = arr.shape
self.write_header(shape, mxCHAR_CLASS)
if arr.dtype.kind == 'U' and arr.size:
# Make one long string from all the characters. We need to
# transpose here, because we're flattening the array, before
# we write the bytes. The bytes have to be written in
# Fortran order.
n_chars = np.prod(shape)
st_arr = np.ndarray(shape=(),
dtype=arr_dtype_number(arr, n_chars),
buffer=arr.T.copy()) # Fortran order
# Recode with codec to give byte string
st = st_arr.item().encode(codec)
# Reconstruct as 1-D byte array
arr = np.ndarray(shape=(len(st),),
dtype='S1',
buffer=st)
self.write_element(arr, mdtype=miUTF8)
def write_sparse(self, arr):
''' Sparse matrices are 2D
'''
A = arr.tocsc() # convert to sparse CSC format
A.sort_indices() # MATLAB expects sorted row indices
is_complex = (A.dtype.kind == 'c')
is_logical = (A.dtype.kind == 'b')
nz = A.nnz
self.write_header(matdims(arr, self.oned_as),
mxSPARSE_CLASS,
is_complex=is_complex,
is_logical=is_logical,
# matlab won't load file with 0 nzmax
nzmax=1 if nz == 0 else nz)
self.write_element(A.indices.astype('i4'))
self.write_element(A.indptr.astype('i4'))
self.write_element(A.data.real)
if is_complex:
self.write_element(A.data.imag)
def write_cells(self, arr):
self.write_header(matdims(arr, self.oned_as),
mxCELL_CLASS)
# loop over data, column major
A = np.atleast_2d(arr).flatten('F')
for el in A:
self.write(el)
def write_empty_struct(self):
self.write_header((1, 1), mxSTRUCT_CLASS)
# max field name length set to 1 in an example matlab struct
self.write_element(np.array(1, dtype=np.int32))
# Field names element is empty
self.write_element(np.array([], dtype=np.int8))
def write_struct(self, arr):
self.write_header(matdims(arr, self.oned_as),
mxSTRUCT_CLASS)
self._write_items(arr)
def _write_items(self, arr):
# write fieldnames
fieldnames = [f[0] for f in arr.dtype.descr]
length = max([len(fieldname) for fieldname in fieldnames])+1
max_length = (self.long_field_names and 64) or 32
if length > max_length:
raise ValueError("Field names are restricted to %d characters" %
(max_length-1))
self.write_element(np.array([length], dtype='i4'))
self.write_element(
np.array(fieldnames, dtype='S%d' % (length)),
mdtype=miINT8)
A = np.atleast_2d(arr).flatten('F')
for el in A:
for f in fieldnames:
self.write(el[f])
def write_object(self, arr):
'''Same as writing structs, except different mx class, and extra
classname element after header
'''
self.write_header(matdims(arr, self.oned_as),
mxOBJECT_CLASS)
self.write_element(np.array(arr.classname, dtype='S'),
mdtype=miINT8)
self._write_items(arr)
class MatFile5Writer:
''' Class for writing mat5 files '''
@docfiller
def __init__(self, file_stream,
do_compression=False,
unicode_strings=False,
global_vars=None,
long_field_names=False,
oned_as='row'):
''' Initialize writer for matlab 5 format files
Parameters
----------
%(do_compression)s
%(unicode_strings)s
global_vars : None or sequence of strings, optional
Names of variables to be marked as global for matlab
%(long_fields)s
%(oned_as)s
'''
self.file_stream = file_stream
self.do_compression = do_compression
self.unicode_strings = unicode_strings
if global_vars:
self.global_vars = global_vars
else:
self.global_vars = []
self.long_field_names = long_field_names
self.oned_as = oned_as
self._matrix_writer = None
def write_file_header(self):
# write header
hdr = np.zeros((), NDT_FILE_HDR)
hdr['description'] = 'MATLAB 5.0 MAT-file Platform: %s, Created on: %s' \
% (os.name,time.asctime())
hdr['version'] = 0x0100
hdr['endian_test'] = np.ndarray(shape=(),
dtype='S2',
buffer=np.uint16(0x4d49))
self.file_stream.write(hdr.tobytes())
def put_variables(self, mdict, write_header=None):
''' Write variables in `mdict` to stream
Parameters
----------
mdict : mapping
mapping with method ``items`` returns name, contents pairs where
``name`` which will appear in the matlab workspace in file load, and
``contents`` is something writeable to a matlab file, such as a NumPy
array.
write_header : {None, True, False}, optional
If True, then write the matlab file header before writing the
variables. If None (the default) then write the file header
if we are at position 0 in the stream. By setting False
here, and setting the stream position to the end of the file,
you can append variables to a matlab file
'''
# write header if requested, or None and start of file
if write_header is None:
write_header = self.file_stream.tell() == 0
if write_header:
self.write_file_header()
self._matrix_writer = VarWriter5(self)
for name, var in mdict.items():
if name[0] == '_':
continue
is_global = name in self.global_vars
if self.do_compression:
stream = BytesIO()
self._matrix_writer.file_stream = stream
self._matrix_writer.write_top(var, name.encode('latin1'), is_global)
out_str = zlib.compress(stream.getvalue())
tag = np.empty((), NDT_TAG_FULL)
tag['mdtype'] = miCOMPRESSED
tag['byte_count'] = len(out_str)
self.file_stream.write(tag.tobytes())
self.file_stream.write(out_str)
else: # not compressing
self._matrix_writer.write_top(var, name.encode('latin1'), is_global)
| bsd-3-clause | 4abfb4cbc76cc2bbf9dc412ee75211af | 36.473094 | 84 | 0.581553 | 4.044773 | false | false | false | false |
scipy/scipy | scipy/optimize/tests/test_zeros.py | 15 | 28443 | import pytest
from math import sqrt, exp, sin, cos
from functools import lru_cache
from numpy.testing import (assert_warns, assert_,
assert_allclose,
assert_equal,
assert_array_equal,
suppress_warnings)
import numpy as np
from numpy import finfo, power, nan, isclose
from scipy.optimize import _zeros_py as zeros, newton, root_scalar
from scipy._lib._util import getfullargspec_no_self as _getfullargspec
# Import testing parameters
from scipy.optimize._tstutils import get_tests, functions as tstutils_functions, fstrings as tstutils_fstrings
TOL = 4*np.finfo(float).eps # tolerance
_FLOAT_EPS = finfo(float).eps
# A few test functions used frequently:
# # A simple quadratic, (x-1)^2 - 1
def f1(x):
return x ** 2 - 2 * x - 1
def f1_1(x):
return 2 * x - 2
def f1_2(x):
return 2.0 + 0 * x
def f1_and_p_and_pp(x):
return f1(x), f1_1(x), f1_2(x)
# Simple transcendental function
def f2(x):
return exp(x) - cos(x)
def f2_1(x):
return exp(x) + sin(x)
def f2_2(x):
return exp(x) + cos(x)
# lru cached function
@lru_cache()
def f_lrucached(x):
return x
class TestBasic:
def run_check_by_name(self, name, smoothness=0, **kwargs):
a = .5
b = sqrt(3)
xtol = 4*np.finfo(float).eps
rtol = 4*np.finfo(float).eps
for function, fname in zip(tstutils_functions, tstutils_fstrings):
if smoothness > 0 and fname in ['f4', 'f5', 'f6']:
continue
r = root_scalar(function, method=name, bracket=[a, b], x0=a,
xtol=xtol, rtol=rtol, **kwargs)
zero = r.root
assert_(r.converged)
assert_allclose(zero, 1.0, atol=xtol, rtol=rtol,
err_msg='method %s, function %s' % (name, fname))
def run_check(self, method, name):
a = .5
b = sqrt(3)
xtol = 4 * _FLOAT_EPS
rtol = 4 * _FLOAT_EPS
for function, fname in zip(tstutils_functions, tstutils_fstrings):
zero, r = method(function, a, b, xtol=xtol, rtol=rtol,
full_output=True)
assert_(r.converged)
assert_allclose(zero, 1.0, atol=xtol, rtol=rtol,
err_msg='method %s, function %s' % (name, fname))
def run_check_lru_cached(self, method, name):
# check that https://github.com/scipy/scipy/issues/10846 is fixed
a = -1
b = 1
zero, r = method(f_lrucached, a, b, full_output=True)
assert_(r.converged)
assert_allclose(zero, 0,
err_msg='method %s, function %s' % (name, 'f_lrucached'))
def _run_one_test(self, tc, method, sig_args_keys=None,
sig_kwargs_keys=None, **kwargs):
method_args = []
for k in sig_args_keys or []:
if k not in tc:
# If a,b not present use x0, x1. Similarly for f and func
k = {'a': 'x0', 'b': 'x1', 'func': 'f'}.get(k, k)
method_args.append(tc[k])
method_kwargs = dict(**kwargs)
method_kwargs.update({'full_output': True, 'disp': False})
for k in sig_kwargs_keys or []:
method_kwargs[k] = tc[k]
root = tc.get('root')
func_args = tc.get('args', ())
try:
r, rr = method(*method_args, args=func_args, **method_kwargs)
return root, rr, tc
except Exception:
return root, zeros.RootResults(nan, -1, -1, zeros._EVALUEERR), tc
def run_tests(self, tests, method, name,
xtol=4 * _FLOAT_EPS, rtol=4 * _FLOAT_EPS,
known_fail=None, **kwargs):
r"""Run test-cases using the specified method and the supplied signature.
Extract the arguments for the method call from the test case
dictionary using the supplied keys for the method's signature."""
# The methods have one of two base signatures:
# (f, a, b, **kwargs) # newton
# (func, x0, **kwargs) # bisect/brentq/...
sig = _getfullargspec(method) # FullArgSpec with args, varargs, varkw, defaults, ...
assert_(not sig.kwonlyargs)
nDefaults = len(sig.defaults)
nRequired = len(sig.args) - nDefaults
sig_args_keys = sig.args[:nRequired]
sig_kwargs_keys = []
if name in ['secant', 'newton', 'halley']:
if name in ['newton', 'halley']:
sig_kwargs_keys.append('fprime')
if name in ['halley']:
sig_kwargs_keys.append('fprime2')
kwargs['tol'] = xtol
else:
kwargs['xtol'] = xtol
kwargs['rtol'] = rtol
results = [list(self._run_one_test(
tc, method, sig_args_keys=sig_args_keys,
sig_kwargs_keys=sig_kwargs_keys, **kwargs)) for tc in tests]
# results= [[true root, full output, tc], ...]
known_fail = known_fail or []
notcvgd = [elt for elt in results if not elt[1].converged]
notcvgd = [elt for elt in notcvgd if elt[-1]['ID'] not in known_fail]
notcvged_IDS = [elt[-1]['ID'] for elt in notcvgd]
assert_equal([len(notcvged_IDS), notcvged_IDS], [0, []])
# The usable xtol and rtol depend on the test
tols = {'xtol': 4 * _FLOAT_EPS, 'rtol': 4 * _FLOAT_EPS}
tols.update(**kwargs)
rtol = tols['rtol']
atol = tols.get('tol', tols['xtol'])
cvgd = [elt for elt in results if elt[1].converged]
approx = [elt[1].root for elt in cvgd]
correct = [elt[0] for elt in cvgd]
notclose = [[a] + elt for a, c, elt in zip(approx, correct, cvgd) if
not isclose(a, c, rtol=rtol, atol=atol)
and elt[-1]['ID'] not in known_fail]
# Evaluate the function and see if is 0 at the purported root
fvs = [tc['f'](aroot, *(tc['args'])) for aroot, c, fullout, tc in notclose]
notclose = [[fv] + elt for fv, elt in zip(fvs, notclose) if fv != 0]
assert_equal([notclose, len(notclose)], [[], 0])
def run_collection(self, collection, method, name, smoothness=None,
known_fail=None,
xtol=4 * _FLOAT_EPS, rtol=4 * _FLOAT_EPS,
**kwargs):
r"""Run a collection of tests using the specified method.
The name is used to determine some optional arguments."""
tests = get_tests(collection, smoothness=smoothness)
self.run_tests(tests, method, name, xtol=xtol, rtol=rtol,
known_fail=known_fail, **kwargs)
def test_bisect(self):
self.run_check(zeros.bisect, 'bisect')
self.run_check_lru_cached(zeros.bisect, 'bisect')
self.run_check_by_name('bisect')
self.run_collection('aps', zeros.bisect, 'bisect', smoothness=1)
def test_ridder(self):
self.run_check(zeros.ridder, 'ridder')
self.run_check_lru_cached(zeros.ridder, 'ridder')
self.run_check_by_name('ridder')
self.run_collection('aps', zeros.ridder, 'ridder', smoothness=1)
def test_brentq(self):
self.run_check(zeros.brentq, 'brentq')
self.run_check_lru_cached(zeros.brentq, 'brentq')
self.run_check_by_name('brentq')
# Brentq/h needs a lower tolerance to be specified
self.run_collection('aps', zeros.brentq, 'brentq', smoothness=1,
xtol=1e-14, rtol=1e-14)
def test_brenth(self):
self.run_check(zeros.brenth, 'brenth')
self.run_check_lru_cached(zeros.brenth, 'brenth')
self.run_check_by_name('brenth')
self.run_collection('aps', zeros.brenth, 'brenth', smoothness=1,
xtol=1e-14, rtol=1e-14)
def test_toms748(self):
self.run_check(zeros.toms748, 'toms748')
self.run_check_lru_cached(zeros.toms748, 'toms748')
self.run_check_by_name('toms748')
self.run_collection('aps', zeros.toms748, 'toms748', smoothness=1)
def test_newton_collections(self):
known_fail = ['aps.13.00']
known_fail += ['aps.12.05', 'aps.12.17'] # fails under Windows Py27
for collection in ['aps', 'complex']:
self.run_collection(collection, zeros.newton, 'newton',
smoothness=2, known_fail=known_fail)
def test_halley_collections(self):
known_fail = ['aps.12.06', 'aps.12.07', 'aps.12.08', 'aps.12.09',
'aps.12.10', 'aps.12.11', 'aps.12.12', 'aps.12.13',
'aps.12.14', 'aps.12.15', 'aps.12.16', 'aps.12.17',
'aps.12.18', 'aps.13.00']
for collection in ['aps', 'complex']:
self.run_collection(collection, zeros.newton, 'halley',
smoothness=2, known_fail=known_fail)
@staticmethod
def f1(x):
return x**2 - 2*x - 1 # == (x-1)**2 - 2
@staticmethod
def f1_1(x):
return 2*x - 2
@staticmethod
def f1_2(x):
return 2.0 + 0*x
@staticmethod
def f2(x):
return exp(x) - cos(x)
@staticmethod
def f2_1(x):
return exp(x) + sin(x)
@staticmethod
def f2_2(x):
return exp(x) + cos(x)
def test_newton(self):
for f, f_1, f_2 in [(self.f1, self.f1_1, self.f1_2),
(self.f2, self.f2_1, self.f2_2)]:
x = zeros.newton(f, 3, tol=1e-6)
assert_allclose(f(x), 0, atol=1e-6)
x = zeros.newton(f, 3, x1=5, tol=1e-6) # secant, x0 and x1
assert_allclose(f(x), 0, atol=1e-6)
x = zeros.newton(f, 3, fprime=f_1, tol=1e-6) # newton
assert_allclose(f(x), 0, atol=1e-6)
x = zeros.newton(f, 3, fprime=f_1, fprime2=f_2, tol=1e-6) # halley
assert_allclose(f(x), 0, atol=1e-6)
def test_newton_by_name(self):
r"""Invoke newton through root_scalar()"""
for f, f_1, f_2 in [(f1, f1_1, f1_2), (f2, f2_1, f2_2)]:
r = root_scalar(f, method='newton', x0=3, fprime=f_1, xtol=1e-6)
assert_allclose(f(r.root), 0, atol=1e-6)
def test_secant_by_name(self):
r"""Invoke secant through root_scalar()"""
for f, f_1, f_2 in [(f1, f1_1, f1_2), (f2, f2_1, f2_2)]:
r = root_scalar(f, method='secant', x0=3, x1=2, xtol=1e-6)
assert_allclose(f(r.root), 0, atol=1e-6)
r = root_scalar(f, method='secant', x0=3, x1=5, xtol=1e-6)
assert_allclose(f(r.root), 0, atol=1e-6)
def test_halley_by_name(self):
r"""Invoke halley through root_scalar()"""
for f, f_1, f_2 in [(f1, f1_1, f1_2), (f2, f2_1, f2_2)]:
r = root_scalar(f, method='halley', x0=3,
fprime=f_1, fprime2=f_2, xtol=1e-6)
assert_allclose(f(r.root), 0, atol=1e-6)
def test_root_scalar_fail(self):
with pytest.raises(ValueError):
root_scalar(f1, method='secant', x0=3, xtol=1e-6) # no x1
with pytest.raises(ValueError):
root_scalar(f1, method='newton', x0=3, xtol=1e-6) # no fprime
with pytest.raises(ValueError):
root_scalar(f1, method='halley', fprime=f1_1, x0=3, xtol=1e-6) # no fprime2
with pytest.raises(ValueError):
root_scalar(f1, method='halley', fprime2=f1_2, x0=3, xtol=1e-6) # no fprime
def test_array_newton(self):
"""test newton with array"""
def f1(x, *a):
b = a[0] + x * a[3]
return a[1] - a[2] * (np.exp(b / a[5]) - 1.0) - b / a[4] - x
def f1_1(x, *a):
b = a[3] / a[5]
return -a[2] * np.exp(a[0] / a[5] + x * b) * b - a[3] / a[4] - 1
def f1_2(x, *a):
b = a[3] / a[5]
return -a[2] * np.exp(a[0] / a[5] + x * b) * b**2
a0 = np.array([
5.32725221, 5.48673747, 5.49539973,
5.36387202, 4.80237316, 1.43764452,
5.23063958, 5.46094772, 5.50512718,
5.42046290
])
a1 = (np.sin(range(10)) + 1.0) * 7.0
args = (a0, a1, 1e-09, 0.004, 10, 0.27456)
x0 = [7.0] * 10
x = zeros.newton(f1, x0, f1_1, args)
x_expected = (
6.17264965, 11.7702805, 12.2219954,
7.11017681, 1.18151293, 0.143707955,
4.31928228, 10.5419107, 12.7552490,
8.91225749
)
assert_allclose(x, x_expected)
# test halley's
x = zeros.newton(f1, x0, f1_1, args, fprime2=f1_2)
assert_allclose(x, x_expected)
# test secant
x = zeros.newton(f1, x0, args=args)
assert_allclose(x, x_expected)
def test_array_newton_complex(self):
def f(x):
return x + 1+1j
def fprime(x):
return 1.0
t = np.full(4, 1j)
x = zeros.newton(f, t, fprime=fprime)
assert_allclose(f(x), 0.)
# should work even if x0 is not complex
t = np.ones(4)
x = zeros.newton(f, t, fprime=fprime)
assert_allclose(f(x), 0.)
x = zeros.newton(f, t)
assert_allclose(f(x), 0.)
def test_array_secant_active_zero_der(self):
"""test secant doesn't continue to iterate zero derivatives"""
x = zeros.newton(lambda x, *a: x*x - a[0], x0=[4.123, 5],
args=[np.array([17, 25])])
assert_allclose(x, (4.123105625617661, 5.0))
def test_array_newton_integers(self):
# test secant with float
x = zeros.newton(lambda y, z: z - y ** 2, [4.0] * 2,
args=([15.0, 17.0],))
assert_allclose(x, (3.872983346207417, 4.123105625617661))
# test integer becomes float
x = zeros.newton(lambda y, z: z - y ** 2, [4] * 2, args=([15, 17],))
assert_allclose(x, (3.872983346207417, 4.123105625617661))
def test_array_newton_zero_der_failures(self):
# test derivative zero warning
assert_warns(RuntimeWarning, zeros.newton,
lambda y: y**2 - 2, [0., 0.], lambda y: 2 * y)
# test failures and zero_der
with pytest.warns(RuntimeWarning):
results = zeros.newton(lambda y: y**2 - 2, [0., 0.],
lambda y: 2*y, full_output=True)
assert_allclose(results.root, 0)
assert results.zero_der.all()
assert not results.converged.any()
def test_newton_combined(self):
f1 = lambda x: x**2 - 2*x - 1
f1_1 = lambda x: 2*x - 2
f1_2 = lambda x: 2.0 + 0*x
def f1_and_p_and_pp(x):
return x**2 - 2*x-1, 2*x-2, 2.0
sol0 = root_scalar(f1, method='newton', x0=3, fprime=f1_1)
sol = root_scalar(f1_and_p_and_pp, method='newton', x0=3, fprime=True)
assert_allclose(sol0.root, sol.root, atol=1e-8)
assert_equal(2*sol.function_calls, sol0.function_calls)
sol0 = root_scalar(f1, method='halley', x0=3, fprime=f1_1, fprime2=f1_2)
sol = root_scalar(f1_and_p_and_pp, method='halley', x0=3, fprime2=True)
assert_allclose(sol0.root, sol.root, atol=1e-8)
assert_equal(3*sol.function_calls, sol0.function_calls)
def test_newton_full_output(self):
# Test the full_output capability, both when converging and not.
# Use simple polynomials, to avoid hitting platform dependencies
# (e.g., exp & trig) in number of iterations
x0 = 3
expected_counts = [(6, 7), (5, 10), (3, 9)]
for derivs in range(3):
kwargs = {'tol': 1e-6, 'full_output': True, }
for k, v in [['fprime', self.f1_1], ['fprime2', self.f1_2]][:derivs]:
kwargs[k] = v
x, r = zeros.newton(self.f1, x0, disp=False, **kwargs)
assert_(r.converged)
assert_equal(x, r.root)
assert_equal((r.iterations, r.function_calls), expected_counts[derivs])
if derivs == 0:
assert(r.function_calls <= r.iterations + 1)
else:
assert_equal(r.function_calls, (derivs + 1) * r.iterations)
# Now repeat, allowing one fewer iteration to force convergence failure
iters = r.iterations - 1
x, r = zeros.newton(self.f1, x0, maxiter=iters, disp=False, **kwargs)
assert_(not r.converged)
assert_equal(x, r.root)
assert_equal(r.iterations, iters)
if derivs == 1:
# Check that the correct Exception is raised and
# validate the start of the message.
with pytest.raises(
RuntimeError,
match='Failed to converge after %d iterations, value is .*' % (iters)):
x, r = zeros.newton(self.f1, x0, maxiter=iters, disp=True, **kwargs)
def test_deriv_zero_warning(self):
func = lambda x: x**2 - 2.0
dfunc = lambda x: 2*x
assert_warns(RuntimeWarning, zeros.newton, func, 0.0, dfunc, disp=False)
with pytest.raises(RuntimeError, match='Derivative was zero'):
zeros.newton(func, 0.0, dfunc)
def test_newton_does_not_modify_x0(self):
# https://github.com/scipy/scipy/issues/9964
x0 = np.array([0.1, 3])
x0_copy = x0.copy() # Copy to test for equality.
newton(np.sin, x0, np.cos)
assert_array_equal(x0, x0_copy)
def test_maxiter_int_check(self):
for method in [zeros.bisect, zeros.newton, zeros.ridder, zeros.brentq,
zeros.brenth, zeros.toms748]:
with pytest.raises(TypeError,
match="'float' object cannot be interpreted as an integer"):
method(f1, 0.0, 1.0, maxiter=72.45)
def test_gh_5555():
root = 0.1
def f(x):
return x - root
methods = [zeros.bisect, zeros.ridder]
xtol = rtol = TOL
for method in methods:
res = method(f, -1e8, 1e7, xtol=xtol, rtol=rtol)
assert_allclose(root, res, atol=xtol, rtol=rtol,
err_msg='method %s' % method.__name__)
def test_gh_5557():
# Show that without the changes in 5557 brentq and brenth might
# only achieve a tolerance of 2*(xtol + rtol*|res|).
# f linearly interpolates (0, -0.1), (0.5, -0.1), and (1,
# 0.4). The important parts are that |f(0)| < |f(1)| (so that
# brent takes 0 as the initial guess), |f(0)| < atol (so that
# brent accepts 0 as the root), and that the exact root of f lies
# more than atol away from 0 (so that brent doesn't achieve the
# desired tolerance).
def f(x):
if x < 0.5:
return -0.1
else:
return x - 0.6
atol = 0.51
rtol = 4 * _FLOAT_EPS
methods = [zeros.brentq, zeros.brenth]
for method in methods:
res = method(f, 0, 1, xtol=atol, rtol=rtol)
assert_allclose(0.6, res, atol=atol, rtol=rtol)
def test_brent_underflow_in_root_bracketing():
# Tetsing if an interval [a,b] brackets a zero of a function
# by checking f(a)*f(b) < 0 is not reliable when the product
# underflows/overflows. (reported in issue# 13737)
underflow_scenario = (-450.0, -350.0, -400.0)
overflow_scenario = (350.0, 450.0, 400.0)
for a, b, root in [underflow_scenario, overflow_scenario]:
c = np.exp(root)
for method in [zeros.brenth, zeros.brentq]:
res = method(lambda x: np.exp(x)-c, a, b)
assert_allclose(root, res)
class TestRootResults:
def test_repr(self):
r = zeros.RootResults(root=1.0,
iterations=44,
function_calls=46,
flag=0)
expected_repr = (" converged: True\n flag: 'converged'"
"\n function_calls: 46\n iterations: 44\n"
" root: 1.0")
assert_equal(repr(r), expected_repr)
def test_complex_halley():
"""Test Halley's works with complex roots"""
def f(x, *a):
return a[0] * x**2 + a[1] * x + a[2]
def f_1(x, *a):
return 2 * a[0] * x + a[1]
def f_2(x, *a):
retval = 2 * a[0]
try:
size = len(x)
except TypeError:
return retval
else:
return [retval] * size
z = complex(1.0, 2.0)
coeffs = (2.0, 3.0, 4.0)
y = zeros.newton(f, z, args=coeffs, fprime=f_1, fprime2=f_2, tol=1e-6)
# (-0.75000000000000078+1.1989578808281789j)
assert_allclose(f(y, *coeffs), 0, atol=1e-6)
z = [z] * 10
coeffs = (2.0, 3.0, 4.0)
y = zeros.newton(f, z, args=coeffs, fprime=f_1, fprime2=f_2, tol=1e-6)
assert_allclose(f(y, *coeffs), 0, atol=1e-6)
def test_zero_der_nz_dp():
"""Test secant method with a non-zero dp, but an infinite newton step"""
# pick a symmetrical functions and choose a point on the side that with dx
# makes a secant that is a flat line with zero slope, EG: f = (x - 100)**2,
# which has a root at x = 100 and is symmetrical around the line x = 100
# we have to pick a really big number so that it is consistently true
# now find a point on each side so that the secant has a zero slope
dx = np.finfo(float).eps ** 0.33
# 100 - p0 = p1 - 100 = p0 * (1 + dx) + dx - 100
# -> 200 = p0 * (2 + dx) + dx
p0 = (200.0 - dx) / (2.0 + dx)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "RMS of")
x = zeros.newton(lambda y: (y - 100.0)**2, x0=[p0] * 10)
assert_allclose(x, [100] * 10)
# test scalar cases too
p0 = (2.0 - 1e-4) / (2.0 + 1e-4)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "Tolerance of")
x = zeros.newton(lambda y: (y - 1.0) ** 2, x0=p0, disp=False)
assert_allclose(x, 1)
with pytest.raises(RuntimeError, match='Tolerance of'):
x = zeros.newton(lambda y: (y - 1.0) ** 2, x0=p0, disp=True)
p0 = (-2.0 + 1e-4) / (2.0 + 1e-4)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "Tolerance of")
x = zeros.newton(lambda y: (y + 1.0) ** 2, x0=p0, disp=False)
assert_allclose(x, -1)
with pytest.raises(RuntimeError, match='Tolerance of'):
x = zeros.newton(lambda y: (y + 1.0) ** 2, x0=p0, disp=True)
def test_array_newton_failures():
"""Test that array newton fails as expected"""
# p = 0.68 # [MPa]
# dp = -0.068 * 1e6 # [Pa]
# T = 323 # [K]
diameter = 0.10 # [m]
# L = 100 # [m]
roughness = 0.00015 # [m]
rho = 988.1 # [kg/m**3]
mu = 5.4790e-04 # [Pa*s]
u = 2.488 # [m/s]
reynolds_number = rho * u * diameter / mu # Reynolds number
def colebrook_eqn(darcy_friction, re, dia):
return (1 / np.sqrt(darcy_friction) +
2 * np.log10(roughness / 3.7 / dia +
2.51 / re / np.sqrt(darcy_friction)))
# only some failures
with pytest.warns(RuntimeWarning):
result = zeros.newton(
colebrook_eqn, x0=[0.01, 0.2, 0.02223, 0.3], maxiter=2,
args=[reynolds_number, diameter], full_output=True
)
assert not result.converged.all()
# they all fail
with pytest.raises(RuntimeError):
result = zeros.newton(
colebrook_eqn, x0=[0.01] * 2, maxiter=2,
args=[reynolds_number, diameter], full_output=True
)
# this test should **not** raise a RuntimeWarning
def test_gh8904_zeroder_at_root_fails():
"""Test that Newton or Halley don't warn if zero derivative at root"""
# a function that has a zero derivative at it's root
def f_zeroder_root(x):
return x**3 - x**2
# should work with secant
r = zeros.newton(f_zeroder_root, x0=0)
assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol)
# test again with array
r = zeros.newton(f_zeroder_root, x0=[0]*10)
assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol)
# 1st derivative
def fder(x):
return 3 * x**2 - 2 * x
# 2nd derivative
def fder2(x):
return 6*x - 2
# should work with newton and halley
r = zeros.newton(f_zeroder_root, x0=0, fprime=fder)
assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol)
r = zeros.newton(f_zeroder_root, x0=0, fprime=fder,
fprime2=fder2)
assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol)
# test again with array
r = zeros.newton(f_zeroder_root, x0=[0]*10, fprime=fder)
assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol)
r = zeros.newton(f_zeroder_root, x0=[0]*10, fprime=fder,
fprime2=fder2)
assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol)
# also test that if a root is found we do not raise RuntimeWarning even if
# the derivative is zero, EG: at x = 0.5, then fval = -0.125 and
# fder = -0.25 so the next guess is 0.5 - (-0.125/-0.5) = 0 which is the
# root, but if the solver continued with that guess, then it will calculate
# a zero derivative, so it should return the root w/o RuntimeWarning
r = zeros.newton(f_zeroder_root, x0=0.5, fprime=fder)
assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol)
# test again with array
r = zeros.newton(f_zeroder_root, x0=[0.5]*10, fprime=fder)
assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol)
# doesn't apply to halley
def test_gh_8881():
r"""Test that Halley's method realizes that the 2nd order adjustment
is too big and drops off to the 1st order adjustment."""
n = 9
def f(x):
return power(x, 1.0/n) - power(n, 1.0/n)
def fp(x):
return power(x, (1.0-n)/n)/n
def fpp(x):
return power(x, (1.0-2*n)/n) * (1.0/n) * (1.0-n)/n
x0 = 0.1
# The root is at x=9.
# The function has positive slope, x0 < root.
# Newton succeeds in 8 iterations
rt, r = newton(f, x0, fprime=fp, full_output=True)
assert(r.converged)
# Before the Issue 8881/PR 8882, halley would send x in the wrong direction.
# Check that it now succeeds.
rt, r = newton(f, x0, fprime=fp, fprime2=fpp, full_output=True)
assert(r.converged)
def test_gh_9608_preserve_array_shape():
"""
Test that shape is preserved for array inputs even if fprime or fprime2 is
scalar
"""
def f(x):
return x**2
def fp(x):
return 2 * x
def fpp(x):
return 2
x0 = np.array([-2], dtype=np.float32)
rt, r = newton(f, x0, fprime=fp, fprime2=fpp, full_output=True)
assert(r.converged)
x0_array = np.array([-2, -3], dtype=np.float32)
# This next invocation should fail
with pytest.raises(IndexError):
result = zeros.newton(
f, x0_array, fprime=fp, fprime2=fpp, full_output=True
)
def fpp_array(x):
return np.full(np.shape(x), 2, dtype=np.float32)
result = zeros.newton(
f, x0_array, fprime=fp, fprime2=fpp_array, full_output=True
)
assert result.converged.all()
@pytest.mark.parametrize(
"maximum_iterations,flag_expected",
[(10, zeros.CONVERR), (100, zeros.CONVERGED)])
def test_gh9254_flag_if_maxiter_exceeded(maximum_iterations, flag_expected):
"""
Test that if the maximum iterations is exceeded that the flag is not
converged.
"""
result = zeros.brentq(
lambda x: ((1.2*x - 2.3)*x + 3.4)*x - 4.5,
-30, 30, (), 1e-6, 1e-6, maximum_iterations,
full_output=True, disp=False)
assert result[1].flag == flag_expected
if flag_expected == zeros.CONVERR:
# didn't converge because exceeded maximum iterations
assert result[1].iterations == maximum_iterations
elif flag_expected == zeros.CONVERGED:
# converged before maximum iterations
assert result[1].iterations < maximum_iterations
def test_gh9551_raise_error_if_disp_true():
"""Test that if disp is true then zero derivative raises RuntimeError"""
def f(x):
return x*x + 1
def f_p(x):
return 2*x
assert_warns(RuntimeWarning, zeros.newton, f, 1.0, f_p, disp=False)
with pytest.raises(
RuntimeError,
match=r'^Derivative was zero\. Failed to converge after \d+ iterations, value is [+-]?\d*\.\d+\.$'):
zeros.newton(f, 1.0, f_p)
root = zeros.newton(f, complex(10.0, 10.0), f_p)
assert_allclose(root, complex(0.0, 1.0))
| bsd-3-clause | 9a051f9084d151c45cf610e8a61b4138 | 35.938961 | 112 | 0.557149 | 3.05379 | false | true | false | false |
scipy/scipy | scipy/stats/_levy_stable/__init__.py | 8 | 43857 | # -*- coding: utf-8 -*-
#
import warnings
from functools import partial
import numpy as np
from scipy import optimize
from scipy import integrate
from scipy.integrate._quadrature import _builtincoeffs
from scipy import interpolate
from scipy.interpolate import RectBivariateSpline
import scipy.special as sc
from scipy._lib._util import _lazywhere
from .._distn_infrastructure import rv_continuous, _ShapeInfo
from .._continuous_distns import uniform, expon, _norm_pdf, _norm_cdf
from .levyst import Nolan
from scipy._lib.doccer import inherit_docstring_from
__all__ = ["levy_stable", "levy_stable_gen", "pdf_from_cf_with_fft"]
# Stable distributions are known for various parameterisations
# some being advantageous for numerical considerations and others
# useful due to their location/scale awareness.
#
# Here we follow [NO] convention (see the references in the docstring
# for levy_stable_gen below).
#
# S0 / Z0 / x0 (aka Zoleterav's M)
# S1 / Z1 / x1
#
# Where S* denotes parameterisation, Z* denotes standardized
# version where gamma = 1, delta = 0 and x* denotes variable.
#
# Scipy's original Stable was a random variate generator. It
# uses S1 and unfortunately is not a location/scale aware.
# default numerical integration tolerance
# used for epsrel in piecewise and both epsrel and epsabs in dni
# (epsabs needed in dni since weighted quad requires epsabs > 0)
_QUAD_EPS = 1.2e-14
def _Phi_Z0(alpha, t):
return (
-np.tan(np.pi * alpha / 2) * (np.abs(t) ** (1 - alpha) - 1)
if alpha != 1
else -2.0 * np.log(np.abs(t)) / np.pi
)
def _Phi_Z1(alpha, t):
return (
np.tan(np.pi * alpha / 2)
if alpha != 1
else -2.0 * np.log(np.abs(t)) / np.pi
)
def _cf(Phi, t, alpha, beta):
"""Characteristic function."""
return np.exp(
-(np.abs(t) ** alpha) * (1 - 1j * beta * np.sign(t) * Phi(alpha, t))
)
_cf_Z0 = partial(_cf, _Phi_Z0)
_cf_Z1 = partial(_cf, _Phi_Z1)
def _pdf_single_value_cf_integrate(Phi, x, alpha, beta, **kwds):
"""To improve DNI accuracy convert characteristic function in to real
valued integral using Euler's formula, then exploit cosine symmetry to
change limits to [0, inf). Finally use cosine addition formula to split
into two parts that can be handled by weighted quad pack.
"""
quad_eps = kwds.get("quad_eps", _QUAD_EPS)
def integrand1(t):
if t == 0:
return 0
return np.exp(-(t ** alpha)) * (
np.cos(beta * (t ** alpha) * Phi(alpha, t))
)
def integrand2(t):
if t == 0:
return 0
return np.exp(-(t ** alpha)) * (
np.sin(beta * (t ** alpha) * Phi(alpha, t))
)
with np.errstate(invalid="ignore"):
int1, *ret1 = integrate.quad(
integrand1,
0,
np.inf,
weight="cos",
wvar=x,
limit=1000,
epsabs=quad_eps,
epsrel=quad_eps,
full_output=1,
)
int2, *ret2 = integrate.quad(
integrand2,
0,
np.inf,
weight="sin",
wvar=x,
limit=1000,
epsabs=quad_eps,
epsrel=quad_eps,
full_output=1,
)
return (int1 + int2) / np.pi
_pdf_single_value_cf_integrate_Z0 = partial(
_pdf_single_value_cf_integrate, _Phi_Z0
)
_pdf_single_value_cf_integrate_Z1 = partial(
_pdf_single_value_cf_integrate, _Phi_Z1
)
def _nolan_round_difficult_input(
x0, alpha, beta, zeta, x_tol_near_zeta, alpha_tol_near_one
):
"""Round difficult input values for Nolan's method in [NO]."""
# following Nolan's STABLE,
# "1. When 0 < |alpha-1| < 0.005, the program has numerical problems
# evaluating the pdf and cdf. The current version of the program sets
# alpha=1 in these cases. This approximation is not bad in the S0
# parameterization."
if np.abs(alpha - 1) < alpha_tol_near_one:
alpha = 1.0
# "2. When alpha=1 and |beta| < 0.005, the program has numerical
# problems. The current version sets beta=0."
# We seem to have addressed this through re-expression of g(theta) here
# "8. When |x0-beta*tan(pi*alpha/2)| is small, the
# computations of the density and cumulative have numerical problems.
# The program works around this by setting
# z = beta*tan(pi*alpha/2) when
# |z-beta*tan(pi*alpha/2)| < tol(5)*alpha**(1/alpha).
# (The bound on the right is ad hoc, to get reasonable behavior
# when alpha is small)."
# where tol(5) = 0.5e-2 by default.
#
# We seem to have partially addressed this through re-expression of
# g(theta) here, but it still needs to be used in some extreme cases.
# Perhaps tol(5) = 0.5e-2 could be reduced for our implementation.
if np.abs(x0 - zeta) < x_tol_near_zeta * alpha ** (1 / alpha):
x0 = zeta
return x0, alpha, beta
def _pdf_single_value_piecewise_Z1(x, alpha, beta, **kwds):
# convert from Nolan's S_1 (aka S) to S_0 (aka Zolaterev M)
# parameterization
zeta = -beta * np.tan(np.pi * alpha / 2.0)
x0 = x + zeta if alpha != 1 else x
return _pdf_single_value_piecewise_Z0(x0, alpha, beta, **kwds)
def _pdf_single_value_piecewise_Z0(x0, alpha, beta, **kwds):
quad_eps = kwds.get("quad_eps", _QUAD_EPS)
x_tol_near_zeta = kwds.get("piecewise_x_tol_near_zeta", 0.005)
alpha_tol_near_one = kwds.get("piecewise_alpha_tol_near_one", 0.005)
zeta = -beta * np.tan(np.pi * alpha / 2.0)
x0, alpha, beta = _nolan_round_difficult_input(
x0, alpha, beta, zeta, x_tol_near_zeta, alpha_tol_near_one
)
# some other known distribution pdfs / analytical cases
# TODO: add more where possible with test coverage,
# eg https://en.wikipedia.org/wiki/Stable_distribution#Other_analytic_cases
if alpha == 2.0:
# normal
return _norm_pdf(x0 / np.sqrt(2)) / np.sqrt(2)
elif alpha == 0.5 and beta == 1.0:
# levy
# since S(1/2, 1, gamma, delta; <x>) ==
# S(1/2, 1, gamma, gamma + delta; <x0>).
_x = x0 + 1
if _x <= 0:
return 0
return 1 / np.sqrt(2 * np.pi * _x) / _x * np.exp(-1 / (2 * _x))
elif alpha == 0.5 and beta == 0.0 and x0 != 0:
# analytical solution [HO]
S, C = sc.fresnel([1 / np.sqrt(2 * np.pi * np.abs(x0))])
arg = 1 / (4 * np.abs(x0))
return (
np.sin(arg) * (0.5 - S[0]) + np.cos(arg) * (0.5 - C[0])
) / np.sqrt(2 * np.pi * np.abs(x0) ** 3)
elif alpha == 1.0 and beta == 0.0:
# cauchy
return 1 / (1 + x0 ** 2) / np.pi
return _pdf_single_value_piecewise_post_rounding_Z0(
x0, alpha, beta, quad_eps
)
def _pdf_single_value_piecewise_post_rounding_Z0(x0, alpha, beta, quad_eps):
"""Calculate pdf using Nolan's methods as detailed in [NO].
"""
_nolan = Nolan(alpha, beta, x0)
zeta = _nolan.zeta
xi = _nolan.xi
c2 = _nolan.c2
g = _nolan.g
# handle Nolan's initial case logic
if x0 == zeta:
return (
sc.gamma(1 + 1 / alpha)
* np.cos(xi)
/ np.pi
/ ((1 + zeta ** 2) ** (1 / alpha / 2))
)
elif x0 < zeta:
return _pdf_single_value_piecewise_post_rounding_Z0(
-x0, alpha, -beta, quad_eps
)
# following Nolan, we may now assume
# x0 > zeta when alpha != 1
# beta != 0 when alpha == 1
# spare calculating integral on null set
# use isclose as macos has fp differences
if np.isclose(-xi, np.pi / 2, rtol=1e-014, atol=1e-014):
return 0.0
def integrand(theta):
# limit any numerical issues leading to g_1 < 0 near theta limits
g_1 = g(theta)
if not np.isfinite(g_1) or g_1 < 0:
g_1 = 0
return g_1 * np.exp(-g_1)
with np.errstate(all="ignore"):
peak = optimize.bisect(
lambda t: g(t) - 1, -xi, np.pi / 2, xtol=quad_eps
)
# this integrand can be very peaked, so we need to force
# QUADPACK to evaluate the function inside its support
#
# lastly, we add additional samples at
# ~exp(-100), ~exp(-10), ~exp(-5), ~exp(-1)
# to improve QUADPACK's detection of rapidly descending tail behavior
# (this choice is fairly ad hoc)
tail_points = [
optimize.bisect(lambda t: g(t) - exp_height, -xi, np.pi / 2)
for exp_height in [100, 10, 5]
# exp_height = 1 is handled by peak
]
intg_points = [0, peak] + tail_points
intg, *ret = integrate.quad(
integrand,
-xi,
np.pi / 2,
points=intg_points,
limit=100,
epsrel=quad_eps,
epsabs=0,
full_output=1,
)
return c2 * intg
def _cdf_single_value_piecewise_Z1(x, alpha, beta, **kwds):
# convert from Nolan's S_1 (aka S) to S_0 (aka Zolaterev M)
# parameterization
zeta = -beta * np.tan(np.pi * alpha / 2.0)
x0 = x + zeta if alpha != 1 else x
return _cdf_single_value_piecewise_Z0(x0, alpha, beta, **kwds)
def _cdf_single_value_piecewise_Z0(x0, alpha, beta, **kwds):
quad_eps = kwds.get("quad_eps", _QUAD_EPS)
x_tol_near_zeta = kwds.get("piecewise_x_tol_near_zeta", 0.005)
alpha_tol_near_one = kwds.get("piecewise_alpha_tol_near_one", 0.005)
zeta = -beta * np.tan(np.pi * alpha / 2.0)
x0, alpha, beta = _nolan_round_difficult_input(
x0, alpha, beta, zeta, x_tol_near_zeta, alpha_tol_near_one
)
# some other known distribution cdfs / analytical cases
# TODO: add more where possible with test coverage,
# eg https://en.wikipedia.org/wiki/Stable_distribution#Other_analytic_cases
if alpha == 2.0:
# normal
return _norm_cdf(x0 / np.sqrt(2))
elif alpha == 0.5 and beta == 1.0:
# levy
# since S(1/2, 1, gamma, delta; <x>) ==
# S(1/2, 1, gamma, gamma + delta; <x0>).
_x = x0 + 1
if _x <= 0:
return 0
return sc.erfc(np.sqrt(0.5 / _x))
elif alpha == 1.0 and beta == 0.0:
# cauchy
return 0.5 + np.arctan(x0) / np.pi
return _cdf_single_value_piecewise_post_rounding_Z0(
x0, alpha, beta, quad_eps
)
def _cdf_single_value_piecewise_post_rounding_Z0(x0, alpha, beta, quad_eps):
"""Calculate cdf using Nolan's methods as detailed in [NO].
"""
_nolan = Nolan(alpha, beta, x0)
zeta = _nolan.zeta
xi = _nolan.xi
c1 = _nolan.c1
# c2 = _nolan.c2
c3 = _nolan.c3
g = _nolan.g
# handle Nolan's initial case logic
if (alpha == 1 and beta < 0) or x0 < zeta:
# NOTE: Nolan's paper has a typo here!
# He states F(x) = 1 - F(x, alpha, -beta), but this is clearly
# incorrect since F(-infty) would be 1.0 in this case
# Indeed, the alpha != 1, x0 < zeta case is correct here.
return 1 - _cdf_single_value_piecewise_post_rounding_Z0(
-x0, alpha, -beta, quad_eps
)
elif x0 == zeta:
return 0.5 - xi / np.pi
# following Nolan, we may now assume
# x0 > zeta when alpha != 1
# beta > 0 when alpha == 1
# spare calculating integral on null set
# use isclose as macos has fp differences
if np.isclose(-xi, np.pi / 2, rtol=1e-014, atol=1e-014):
return c1
def integrand(theta):
g_1 = g(theta)
return np.exp(-g_1)
with np.errstate(all="ignore"):
# shrink supports where required
left_support = -xi
right_support = np.pi / 2
if alpha > 1:
# integrand(t) monotonic 0 to 1
if integrand(-xi) != 0.0:
res = optimize.minimize(
integrand,
(-xi,),
method="L-BFGS-B",
bounds=[(-xi, np.pi / 2)],
)
left_support = res.x[0]
else:
# integrand(t) monotonic 1 to 0
if integrand(np.pi / 2) != 0.0:
res = optimize.minimize(
integrand,
(np.pi / 2,),
method="L-BFGS-B",
bounds=[(-xi, np.pi / 2)],
)
right_support = res.x[0]
intg, *ret = integrate.quad(
integrand,
left_support,
right_support,
points=[left_support, right_support],
limit=100,
epsrel=quad_eps,
epsabs=0,
full_output=1,
)
return c1 + c3 * intg
def _rvs_Z1(alpha, beta, size=None, random_state=None):
"""Simulate random variables using Nolan's methods as detailed in [NO].
"""
def alpha1func(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W):
return (
2
/ np.pi
* (
(np.pi / 2 + bTH) * tanTH
- beta * np.log((np.pi / 2 * W * cosTH) / (np.pi / 2 + bTH))
)
)
def beta0func(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W):
return (
W
/ (cosTH / np.tan(aTH) + np.sin(TH))
* ((np.cos(aTH) + np.sin(aTH) * tanTH) / W) ** (1.0 / alpha)
)
def otherwise(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W):
# alpha is not 1 and beta is not 0
val0 = beta * np.tan(np.pi * alpha / 2)
th0 = np.arctan(val0) / alpha
val3 = W / (cosTH / np.tan(alpha * (th0 + TH)) + np.sin(TH))
res3 = val3 * (
(
np.cos(aTH)
+ np.sin(aTH) * tanTH
- val0 * (np.sin(aTH) - np.cos(aTH) * tanTH)
)
/ W
) ** (1.0 / alpha)
return res3
def alphanot1func(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W):
res = _lazywhere(
beta == 0,
(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W),
beta0func,
f2=otherwise,
)
return res
alpha = np.broadcast_to(alpha, size)
beta = np.broadcast_to(beta, size)
TH = uniform.rvs(
loc=-np.pi / 2.0, scale=np.pi, size=size, random_state=random_state
)
W = expon.rvs(size=size, random_state=random_state)
aTH = alpha * TH
bTH = beta * TH
cosTH = np.cos(TH)
tanTH = np.tan(TH)
res = _lazywhere(
alpha == 1,
(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W),
alpha1func,
f2=alphanot1func,
)
return res
def _fitstart_S0(data):
alpha, beta, delta1, gamma = _fitstart_S1(data)
# Formulas for mapping parameters in S1 parameterization to
# those in S0 parameterization can be found in [NO]. Note that
# only delta changes.
if alpha != 1:
delta0 = delta1 + beta * gamma * np.tan(np.pi * alpha / 2.0)
else:
delta0 = delta1 + 2 * beta * gamma * np.log(gamma) / np.pi
return alpha, beta, delta0, gamma
def _fitstart_S1(data):
# We follow McCullock 1986 method - Simple Consistent Estimators
# of Stable Distribution Parameters
# fmt: off
# Table III and IV
nu_alpha_range = [2.439, 2.5, 2.6, 2.7, 2.8, 3, 3.2, 3.5, 4,
5, 6, 8, 10, 15, 25]
nu_beta_range = [0, 0.1, 0.2, 0.3, 0.5, 0.7, 1]
# table III - alpha = psi_1(nu_alpha, nu_beta)
alpha_table = np.array([
[2.000, 2.000, 2.000, 2.000, 2.000, 2.000, 2.000],
[1.916, 1.924, 1.924, 1.924, 1.924, 1.924, 1.924],
[1.808, 1.813, 1.829, 1.829, 1.829, 1.829, 1.829],
[1.729, 1.730, 1.737, 1.745, 1.745, 1.745, 1.745],
[1.664, 1.663, 1.663, 1.668, 1.676, 1.676, 1.676],
[1.563, 1.560, 1.553, 1.548, 1.547, 1.547, 1.547],
[1.484, 1.480, 1.471, 1.460, 1.448, 1.438, 1.438],
[1.391, 1.386, 1.378, 1.364, 1.337, 1.318, 1.318],
[1.279, 1.273, 1.266, 1.250, 1.210, 1.184, 1.150],
[1.128, 1.121, 1.114, 1.101, 1.067, 1.027, 0.973],
[1.029, 1.021, 1.014, 1.004, 0.974, 0.935, 0.874],
[0.896, 0.892, 0.884, 0.883, 0.855, 0.823, 0.769],
[0.818, 0.812, 0.806, 0.801, 0.780, 0.756, 0.691],
[0.698, 0.695, 0.692, 0.689, 0.676, 0.656, 0.597],
[0.593, 0.590, 0.588, 0.586, 0.579, 0.563, 0.513]]).T
# transpose because interpolation with `RectBivariateSpline` is with
# `nu_beta` as `x` and `nu_alpha` as `y`
# table IV - beta = psi_2(nu_alpha, nu_beta)
beta_table = np.array([
[0, 2.160, 1.000, 1.000, 1.000, 1.000, 1.000],
[0, 1.592, 3.390, 1.000, 1.000, 1.000, 1.000],
[0, 0.759, 1.800, 1.000, 1.000, 1.000, 1.000],
[0, 0.482, 1.048, 1.694, 1.000, 1.000, 1.000],
[0, 0.360, 0.760, 1.232, 2.229, 1.000, 1.000],
[0, 0.253, 0.518, 0.823, 1.575, 1.000, 1.000],
[0, 0.203, 0.410, 0.632, 1.244, 1.906, 1.000],
[0, 0.165, 0.332, 0.499, 0.943, 1.560, 1.000],
[0, 0.136, 0.271, 0.404, 0.689, 1.230, 2.195],
[0, 0.109, 0.216, 0.323, 0.539, 0.827, 1.917],
[0, 0.096, 0.190, 0.284, 0.472, 0.693, 1.759],
[0, 0.082, 0.163, 0.243, 0.412, 0.601, 1.596],
[0, 0.074, 0.147, 0.220, 0.377, 0.546, 1.482],
[0, 0.064, 0.128, 0.191, 0.330, 0.478, 1.362],
[0, 0.056, 0.112, 0.167, 0.285, 0.428, 1.274]]).T
# Table V and VII
# These are ordered with decreasing `alpha_range`; so we will need to
# reverse them as required by RectBivariateSpline.
alpha_range = [2, 1.9, 1.8, 1.7, 1.6, 1.5, 1.4, 1.3, 1.2, 1.1,
1, 0.9, 0.8, 0.7, 0.6, 0.5][::-1]
beta_range = [0, 0.25, 0.5, 0.75, 1]
# Table V - nu_c = psi_3(alpha, beta)
nu_c_table = np.array([
[1.908, 1.908, 1.908, 1.908, 1.908],
[1.914, 1.915, 1.916, 1.918, 1.921],
[1.921, 1.922, 1.927, 1.936, 1.947],
[1.927, 1.930, 1.943, 1.961, 1.987],
[1.933, 1.940, 1.962, 1.997, 2.043],
[1.939, 1.952, 1.988, 2.045, 2.116],
[1.946, 1.967, 2.022, 2.106, 2.211],
[1.955, 1.984, 2.067, 2.188, 2.333],
[1.965, 2.007, 2.125, 2.294, 2.491],
[1.980, 2.040, 2.205, 2.435, 2.696],
[2.000, 2.085, 2.311, 2.624, 2.973],
[2.040, 2.149, 2.461, 2.886, 3.356],
[2.098, 2.244, 2.676, 3.265, 3.912],
[2.189, 2.392, 3.004, 3.844, 4.775],
[2.337, 2.634, 3.542, 4.808, 6.247],
[2.588, 3.073, 4.534, 6.636, 9.144]])[::-1].T
# transpose because interpolation with `RectBivariateSpline` is with
# `beta` as `x` and `alpha` as `y`
# Table VII - nu_zeta = psi_5(alpha, beta)
nu_zeta_table = np.array([
[0, 0.000, 0.000, 0.000, 0.000],
[0, -0.017, -0.032, -0.049, -0.064],
[0, -0.030, -0.061, -0.092, -0.123],
[0, -0.043, -0.088, -0.132, -0.179],
[0, -0.056, -0.111, -0.170, -0.232],
[0, -0.066, -0.134, -0.206, -0.283],
[0, -0.075, -0.154, -0.241, -0.335],
[0, -0.084, -0.173, -0.276, -0.390],
[0, -0.090, -0.192, -0.310, -0.447],
[0, -0.095, -0.208, -0.346, -0.508],
[0, -0.098, -0.223, -0.380, -0.576],
[0, -0.099, -0.237, -0.424, -0.652],
[0, -0.096, -0.250, -0.469, -0.742],
[0, -0.089, -0.262, -0.520, -0.853],
[0, -0.078, -0.272, -0.581, -0.997],
[0, -0.061, -0.279, -0.659, -1.198]])[::-1].T
# fmt: on
psi_1 = RectBivariateSpline(nu_beta_range, nu_alpha_range,
alpha_table, kx=1, ky=1, s=0)
def psi_1_1(nu_beta, nu_alpha):
return psi_1(nu_beta, nu_alpha) \
if nu_beta > 0 else psi_1(-nu_beta, nu_alpha)
psi_2 = RectBivariateSpline(nu_beta_range, nu_alpha_range,
beta_table, kx=1, ky=1, s=0)
def psi_2_1(nu_beta, nu_alpha):
return psi_2(nu_beta, nu_alpha) \
if nu_beta > 0 else -psi_2(-nu_beta, nu_alpha)
phi_3 = RectBivariateSpline(beta_range, alpha_range, nu_c_table,
kx=1, ky=1, s=0)
def phi_3_1(beta, alpha):
return phi_3(beta, alpha) if beta > 0 else phi_3(-beta, alpha)
phi_5 = RectBivariateSpline(beta_range, alpha_range, nu_zeta_table,
kx=1, ky=1, s=0)
def phi_5_1(beta, alpha):
return phi_5(beta, alpha) if beta > 0 else -phi_5(-beta, alpha)
# quantiles
p05 = np.percentile(data, 5)
p50 = np.percentile(data, 50)
p95 = np.percentile(data, 95)
p25 = np.percentile(data, 25)
p75 = np.percentile(data, 75)
nu_alpha = (p95 - p05) / (p75 - p25)
nu_beta = (p95 + p05 - 2 * p50) / (p95 - p05)
if nu_alpha >= 2.439:
eps = np.finfo(float).eps
alpha = np.clip(psi_1_1(nu_beta, nu_alpha)[0, 0], eps, 2.)
beta = np.clip(psi_2_1(nu_beta, nu_alpha)[0, 0], -1.0, 1.0)
else:
alpha = 2.0
beta = np.sign(nu_beta)
c = (p75 - p25) / phi_3_1(beta, alpha)[0, 0]
zeta = p50 + c * phi_5_1(beta, alpha)[0, 0]
delta = zeta-beta*c*np.tan(np.pi*alpha/2.) if alpha != 1. else zeta
return (alpha, beta, delta, c)
class levy_stable_gen(rv_continuous):
r"""A Levy-stable continuous random variable.
%(before_notes)s
See Also
--------
levy, levy_l, cauchy, norm
Notes
-----
The distribution for `levy_stable` has characteristic function:
.. math::
\varphi(t, \alpha, \beta, c, \mu) =
e^{it\mu -|ct|^{\alpha}(1-i\beta\operatorname{sign}(t)\Phi(\alpha, t))}
where two different parameterizations are supported. The first :math:`S_1`:
.. math::
\Phi = \begin{cases}
\tan \left({\frac {\pi \alpha }{2}}\right)&\alpha \neq 1\\
-{\frac {2}{\pi }}\log |t|&\alpha =1
\end{cases}
The second :math:`S_0`:
.. math::
\Phi = \begin{cases}
-\tan \left({\frac {\pi \alpha }{2}}\right)(|ct|^{1-\alpha}-1)
&\alpha \neq 1\\
-{\frac {2}{\pi }}\log |ct|&\alpha =1
\end{cases}
The probability density function for `levy_stable` is:
.. math::
f(x) = \frac{1}{2\pi}\int_{-\infty}^\infty \varphi(t)e^{-ixt}\,dt
where :math:`-\infty < t < \infty`. This integral does not have a known
closed form.
`levy_stable` generalizes several distributions. Where possible, they
should be used instead. Specifically, when the shape parameters
assume the values in the table below, the corresponding equivalent
distribution should be used.
========= ======== ===========
``alpha`` ``beta`` Equivalent
========= ======== ===========
1/2 -1 `levy_l`
1/2 1 `levy`
1 0 `cauchy`
2 any `norm` (with ``scale=sqrt(2)``)
========= ======== ===========
Evaluation of the pdf uses Nolan's piecewise integration approach with the
Zolotarev :math:`M` parameterization by default. There is also the option
to use direct numerical integration of the standard parameterization of the
characteristic function or to evaluate by taking the FFT of the
characteristic function.
The default method can changed by setting the class variable
``levy_stable.pdf_default_method`` to one of 'piecewise' for Nolan's
approach, 'dni' for direct numerical integration, or 'fft-simpson' for the
FFT based approach. For the sake of backwards compatibility, the methods
'best' and 'zolotarev' are equivalent to 'piecewise' and the method
'quadrature' is equivalent to 'dni'.
The parameterization can be changed by setting the class variable
``levy_stable.parameterization`` to either 'S0' or 'S1'.
The default is 'S1'.
To improve performance of piecewise and direct numerical integration one
can specify ``levy_stable.quad_eps`` (defaults to 1.2e-14). This is used
as both the absolute and relative quadrature tolerance for direct numerical
integration and as the relative quadrature tolerance for the piecewise
method. One can also specify ``levy_stable.piecewise_x_tol_near_zeta``
(defaults to 0.005) for how close x is to zeta before it is considered the
same as x [NO]. The exact check is
``abs(x0 - zeta) < piecewise_x_tol_near_zeta*alpha**(1/alpha)``. One can
also specify ``levy_stable.piecewise_alpha_tol_near_one`` (defaults to
0.005) for how close alpha is to 1 before being considered equal to 1.
To increase accuracy of FFT calculation one can specify
``levy_stable.pdf_fft_grid_spacing`` (defaults to 0.001) and
``pdf_fft_n_points_two_power`` (defaults to None which means a value is
calculated that sufficiently covers the input range).
Further control over FFT calculation is available by setting
``pdf_fft_interpolation_degree`` (defaults to 3) for spline order and
``pdf_fft_interpolation_level`` for determining the number of points to use
in the Newton-Cotes formula when approximating the characteristic function
(considered experimental).
Evaluation of the cdf uses Nolan's piecewise integration approach with the
Zolatarev :math:`S_0` parameterization by default. There is also the option
to evaluate through integration of an interpolated spline of the pdf
calculated by means of the FFT method. The settings affecting FFT
calculation are the same as for pdf calculation. The default cdf method can
be changed by setting ``levy_stable.cdf_default_method`` to either
'piecewise' or 'fft-simpson'. For cdf calculations the Zolatarev method is
superior in accuracy, so FFT is disabled by default.
Fitting estimate uses quantile estimation method in [MC]. MLE estimation of
parameters in fit method uses this quantile estimate initially. Note that
MLE doesn't always converge if using FFT for pdf calculations; this will be
the case if alpha <= 1 where the FFT approach doesn't give good
approximations.
Any non-missing value for the attribute
``levy_stable.pdf_fft_min_points_threshold`` will set
``levy_stable.pdf_default_method`` to 'fft-simpson' if a valid
default method is not otherwise set.
.. warning::
For pdf calculations FFT calculation is considered experimental.
For cdf calculations FFT calculation is considered experimental. Use
Zolatarev's method instead (default).
%(after_notes)s
References
----------
.. [MC] McCulloch, J., 1986. Simple consistent estimators of stable
distribution parameters. Communications in Statistics - Simulation and
Computation 15, 11091136.
.. [WZ] Wang, Li and Zhang, Ji-Hong, 2008. Simpson's rule based FFT method
to compute densities of stable distribution.
.. [NO] Nolan, J., 1997. Numerical Calculation of Stable Densities and
distributions Functions.
.. [HO] Hopcraft, K. I., Jakeman, E., Tanner, R. M. J., 1999. Lévy random
walks with fluctuating step number and multiscale behavior.
%(example)s
"""
# Configurable options as class variables
# (accesible from self by attribute lookup).
parameterization = "S1"
pdf_default_method = "piecewise"
cdf_default_method = "piecewise"
quad_eps = _QUAD_EPS
piecewise_x_tol_near_zeta = 0.005
piecewise_alpha_tol_near_one = 0.005
pdf_fft_min_points_threshold = None
pdf_fft_grid_spacing = 0.001
pdf_fft_n_points_two_power = None
pdf_fft_interpolation_level = 3
pdf_fft_interpolation_degree = 3
def _argcheck(self, alpha, beta):
return (alpha > 0) & (alpha <= 2) & (beta <= 1) & (beta >= -1)
def _shape_info(self):
ialpha = _ShapeInfo("alpha", False, (0, 2), (False, True))
ibeta = _ShapeInfo("beta", False, (-1, 1), (True, True))
return [ialpha, ibeta]
def _parameterization(self):
allowed = ("S0", "S1")
pz = self.parameterization
if pz not in allowed:
raise RuntimeError(
f"Parameterization '{pz}' in supported list: {allowed}"
)
return pz
@inherit_docstring_from(rv_continuous)
def rvs(self, *args, **kwds):
X1 = super().rvs(*args, **kwds)
discrete = kwds.pop("discrete", None) # noqa
rndm = kwds.pop("random_state", None) # noqa
(alpha, beta), delta, gamma, size = self._parse_args_rvs(*args, **kwds)
# shift location for this parameterisation (S1)
X1 = np.where(
alpha == 1.0, X1 + 2 * beta * gamma * np.log(gamma) / np.pi, X1
)
if self._parameterization() == "S0":
return np.where(
alpha == 1.0,
X1 - (beta * 2 * gamma * np.log(gamma) / np.pi),
X1 - gamma * beta * np.tan(np.pi * alpha / 2.0),
)
elif self._parameterization() == "S1":
return X1
def _rvs(self, alpha, beta, size=None, random_state=None):
return _rvs_Z1(alpha, beta, size, random_state)
@inherit_docstring_from(rv_continuous)
def pdf(self, x, *args, **kwds):
# override base class version to correct
# location for S1 parameterization
if self._parameterization() == "S0":
return super().pdf(x, *args, **kwds)
elif self._parameterization() == "S1":
(alpha, beta), delta, gamma = self._parse_args(*args, **kwds)
if np.all(np.reshape(alpha, (1, -1))[0, :] != 1):
return super().pdf(x, *args, **kwds)
else:
# correct location for this parameterisation
x = np.reshape(x, (1, -1))[0, :]
x, alpha, beta = np.broadcast_arrays(x, alpha, beta)
data_in = np.dstack((x, alpha, beta))[0]
data_out = np.empty(shape=(len(data_in), 1))
# group data in unique arrays of alpha, beta pairs
uniq_param_pairs = np.unique(data_in[:, 1:], axis=0)
for pair in uniq_param_pairs:
_alpha, _beta = pair
_delta = (
delta + 2 * _beta * gamma * np.log(gamma) / np.pi
if _alpha == 1.0
else delta
)
data_mask = np.all(data_in[:, 1:] == pair, axis=-1)
_x = data_in[data_mask, 0]
data_out[data_mask] = (
super()
.pdf(_x, _alpha, _beta, loc=_delta, scale=gamma)
.reshape(len(_x), 1)
)
output = data_out.T[0]
if output.shape == (1,):
return output[0]
return output
def _pdf(self, x, alpha, beta):
if self._parameterization() == "S0":
_pdf_single_value_piecewise = _pdf_single_value_piecewise_Z0
_pdf_single_value_cf_integrate = _pdf_single_value_cf_integrate_Z0
_cf = _cf_Z0
elif self._parameterization() == "S1":
_pdf_single_value_piecewise = _pdf_single_value_piecewise_Z1
_pdf_single_value_cf_integrate = _pdf_single_value_cf_integrate_Z1
_cf = _cf_Z1
x = np.asarray(x).reshape(1, -1)[0, :]
x, alpha, beta = np.broadcast_arrays(x, alpha, beta)
data_in = np.dstack((x, alpha, beta))[0]
data_out = np.empty(shape=(len(data_in), 1))
pdf_default_method_name = levy_stable_gen.pdf_default_method
if pdf_default_method_name in ("piecewise", "best", "zolotarev"):
pdf_single_value_method = _pdf_single_value_piecewise
elif pdf_default_method_name in ("dni", "quadrature"):
pdf_single_value_method = _pdf_single_value_cf_integrate
elif (
pdf_default_method_name == "fft-simpson"
or self.pdf_fft_min_points_threshold is not None
):
pdf_single_value_method = None
pdf_single_value_kwds = {
"quad_eps": self.quad_eps,
"piecewise_x_tol_near_zeta": self.piecewise_x_tol_near_zeta,
"piecewise_alpha_tol_near_one": self.piecewise_alpha_tol_near_one,
}
fft_grid_spacing = self.pdf_fft_grid_spacing
fft_n_points_two_power = self.pdf_fft_n_points_two_power
fft_interpolation_level = self.pdf_fft_interpolation_level
fft_interpolation_degree = self.pdf_fft_interpolation_degree
# group data in unique arrays of alpha, beta pairs
uniq_param_pairs = np.unique(data_in[:, 1:], axis=0)
for pair in uniq_param_pairs:
data_mask = np.all(data_in[:, 1:] == pair, axis=-1)
data_subset = data_in[data_mask]
if pdf_single_value_method is not None:
data_out[data_mask] = np.array(
[
pdf_single_value_method(
_x, _alpha, _beta, **pdf_single_value_kwds
)
for _x, _alpha, _beta in data_subset
]
).reshape(len(data_subset), 1)
else:
warnings.warn(
"Density calculations experimental for FFT method."
+ " Use combination of piecewise and dni methods instead.",
RuntimeWarning,
)
_alpha, _beta = pair
_x = data_subset[:, (0,)]
if _alpha < 1.0:
raise RuntimeError(
"FFT method does not work well for alpha less than 1."
)
# need enough points to "cover" _x for interpolation
if fft_grid_spacing is None and fft_n_points_two_power is None:
raise ValueError(
"One of fft_grid_spacing or fft_n_points_two_power "
+ "needs to be set."
)
max_abs_x = np.max(np.abs(_x))
h = (
2 ** (3 - fft_n_points_two_power) * max_abs_x
if fft_grid_spacing is None
else fft_grid_spacing
)
q = (
np.ceil(np.log(2 * max_abs_x / h) / np.log(2)) + 2
if fft_n_points_two_power is None
else int(fft_n_points_two_power)
)
# for some parameters, the range of x can be quite
# large, let's choose an arbitrary cut off (8GB) to save on
# computer memory.
MAX_Q = 30
if q > MAX_Q:
raise RuntimeError(
"fft_n_points_two_power has a maximum "
+ f"value of {MAX_Q}"
)
density_x, density = pdf_from_cf_with_fft(
lambda t: _cf(t, _alpha, _beta),
h=h,
q=q,
level=fft_interpolation_level,
)
f = interpolate.InterpolatedUnivariateSpline(
density_x, np.real(density), k=fft_interpolation_degree
) # patch FFT to use cubic
data_out[data_mask] = f(_x)
return data_out.T[0]
@inherit_docstring_from(rv_continuous)
def cdf(self, x, *args, **kwds):
# override base class version to correct
# location for S1 parameterization
# NOTE: this is near identical to pdf() above
if self._parameterization() == "S0":
return super().cdf(x, *args, **kwds)
elif self._parameterization() == "S1":
(alpha, beta), delta, gamma = self._parse_args(*args, **kwds)
if np.all(np.reshape(alpha, (1, -1))[0, :] != 1):
return super().cdf(x, *args, **kwds)
else:
# correct location for this parameterisation
x = np.reshape(x, (1, -1))[0, :]
x, alpha, beta = np.broadcast_arrays(x, alpha, beta)
data_in = np.dstack((x, alpha, beta))[0]
data_out = np.empty(shape=(len(data_in), 1))
# group data in unique arrays of alpha, beta pairs
uniq_param_pairs = np.unique(data_in[:, 1:], axis=0)
for pair in uniq_param_pairs:
_alpha, _beta = pair
_delta = (
delta + 2 * _beta * gamma * np.log(gamma) / np.pi
if _alpha == 1.0
else delta
)
data_mask = np.all(data_in[:, 1:] == pair, axis=-1)
_x = data_in[data_mask, 0]
data_out[data_mask] = (
super()
.cdf(_x, _alpha, _beta, loc=_delta, scale=gamma)
.reshape(len(_x), 1)
)
output = data_out.T[0]
if output.shape == (1,):
return output[0]
return output
def _cdf(self, x, alpha, beta):
if self._parameterization() == "S0":
_cdf_single_value_piecewise = _cdf_single_value_piecewise_Z0
_cf = _cf_Z0
elif self._parameterization() == "S1":
_cdf_single_value_piecewise = _cdf_single_value_piecewise_Z1
_cf = _cf_Z1
x = np.asarray(x).reshape(1, -1)[0, :]
x, alpha, beta = np.broadcast_arrays(x, alpha, beta)
data_in = np.dstack((x, alpha, beta))[0]
data_out = np.empty(shape=(len(data_in), 1))
cdf_default_method_name = self.cdf_default_method
if cdf_default_method_name == "piecewise":
cdf_single_value_method = _cdf_single_value_piecewise
elif cdf_default_method_name == "fft-simpson":
cdf_single_value_method = None
cdf_single_value_kwds = {
"quad_eps": self.quad_eps,
"piecewise_x_tol_near_zeta": self.piecewise_x_tol_near_zeta,
"piecewise_alpha_tol_near_one": self.piecewise_alpha_tol_near_one,
}
fft_grid_spacing = self.pdf_fft_grid_spacing
fft_n_points_two_power = self.pdf_fft_n_points_two_power
fft_interpolation_level = self.pdf_fft_interpolation_level
fft_interpolation_degree = self.pdf_fft_interpolation_degree
# group data in unique arrays of alpha, beta pairs
uniq_param_pairs = np.unique(data_in[:, 1:], axis=0)
for pair in uniq_param_pairs:
data_mask = np.all(data_in[:, 1:] == pair, axis=-1)
data_subset = data_in[data_mask]
if cdf_single_value_method is not None:
data_out[data_mask] = np.array(
[
cdf_single_value_method(
_x, _alpha, _beta, **cdf_single_value_kwds
)
for _x, _alpha, _beta in data_subset
]
).reshape(len(data_subset), 1)
else:
warnings.warn(
"Cumulative density calculations experimental for FFT"
+ " method. Use piecewise method instead.",
RuntimeWarning,
)
_alpha, _beta = pair
_x = data_subset[:, (0,)]
# need enough points to "cover" _x for interpolation
if fft_grid_spacing is None and fft_n_points_two_power is None:
raise ValueError(
"One of fft_grid_spacing or fft_n_points_two_power "
+ "needs to be set."
)
max_abs_x = np.max(np.abs(_x))
h = (
2 ** (3 - fft_n_points_two_power) * max_abs_x
if fft_grid_spacing is None
else fft_grid_spacing
)
q = (
np.ceil(np.log(2 * max_abs_x / h) / np.log(2)) + 2
if fft_n_points_two_power is None
else int(fft_n_points_two_power)
)
density_x, density = pdf_from_cf_with_fft(
lambda t: _cf(t, _alpha, _beta),
h=h,
q=q,
level=fft_interpolation_level,
)
f = interpolate.InterpolatedUnivariateSpline(
density_x, np.real(density), k=fft_interpolation_degree
)
data_out[data_mask] = np.array(
[f.integral(self.a, x_1) for x_1 in _x]
).reshape(data_out[data_mask].shape)
return data_out.T[0]
def _fitstart(self, data):
if self._parameterization() == "S0":
_fitstart = _fitstart_S0
elif self._parameterization() == "S1":
_fitstart = _fitstart_S1
return _fitstart(data)
def _stats(self, alpha, beta):
mu = 0 if alpha > 1 else np.nan
mu2 = 2 if alpha == 2 else np.inf
g1 = 0.0 if alpha == 2.0 else np.NaN
g2 = 0.0 if alpha == 2.0 else np.NaN
return mu, mu2, g1, g2
# cotes numbers - see sequence from http://oeis.org/A100642
Cotes_table = np.array(
[[], [1]] + [v[2] for v in _builtincoeffs.values()], dtype=object
)
Cotes = np.array(
[
np.pad(r, (0, len(Cotes_table) - 1 - len(r)), mode='constant')
for r in Cotes_table
]
)
def pdf_from_cf_with_fft(cf, h=0.01, q=9, level=3):
"""Calculates pdf from characteristic function.
Uses fast Fourier transform with Newton-Cotes integration following [WZ].
Defaults to using Simpson's method (3-point Newton-Cotes integration).
Parameters
----------
cf : callable
Single argument function from float -> complex expressing a
characteristic function for some distribution.
h : Optional[float]
Step size for Newton-Cotes integration. Default: 0.01
q : Optional[int]
Use 2**q steps when peforming Newton-Cotes integration.
The infinite integral in the inverse Fourier transform will then
be restricted to the interval [-2**q * h / 2, 2**q * h / 2]. Setting
the number of steps equal to a power of 2 allows the fft to be
calculated in O(n*log(n)) time rather than O(n**2).
Default: 9
level : Optional[int]
Calculate integral using n-point Newton-Cotes integration for
n = level. The 3-point Newton-Cotes formula corresponds to Simpson's
rule. Default: 3
Returns
-------
x_l : ndarray
Array of points x at which pdf is estimated. 2**q equally spaced
points from -pi/h up to but not including pi/h.
density : ndarray
Estimated values of pdf corresponding to cf at points in x_l.
References
----------
.. [WZ] Wang, Li and Zhang, Ji-Hong, 2008. Simpson's rule based FFT method
to compute densities of stable distribution.
"""
n = level
N = 2**q
steps = np.arange(0, N)
L = N * h / 2
x_l = np.pi * (steps - N / 2) / L
if level > 1:
indices = np.arange(n).reshape(n, 1)
s1 = np.sum(
(-1) ** steps * Cotes[n, indices] * np.fft.fft(
(-1)**steps * cf(-L + h * steps + h * indices / (n - 1))
) * np.exp(
1j * np.pi * indices / (n - 1)
- 2 * 1j * np.pi * indices * steps /
(N * (n - 1))
),
axis=0
)
else:
s1 = (-1) ** steps * Cotes[n, 0] * np.fft.fft(
(-1) ** steps * cf(-L + h * steps)
)
density = h * s1 / (2 * np.pi * np.sum(Cotes[n]))
return (x_l, density)
levy_stable = levy_stable_gen(name="levy_stable")
| bsd-3-clause | 0ee8e691fff1002d2ba97c47238fc3b7 | 35.546667 | 79 | 0.536415 | 3.226367 | false | false | false | false |
scipy/scipy | scipy/optimize/_trustregion_constr/equality_constrained_sqp.py | 27 | 8592 | """Byrd-Omojokun Trust-Region SQP method."""
from scipy.sparse import eye as speye
from .projections import projections
from .qp_subproblem import modified_dogleg, projected_cg, box_intersections
import numpy as np
from numpy.linalg import norm
__all__ = ['equality_constrained_sqp']
def default_scaling(x):
n, = np.shape(x)
return speye(n)
def equality_constrained_sqp(fun_and_constr, grad_and_jac, lagr_hess,
x0, fun0, grad0, constr0,
jac0, stop_criteria,
state,
initial_penalty,
initial_trust_radius,
factorization_method,
trust_lb=None,
trust_ub=None,
scaling=default_scaling):
"""Solve nonlinear equality-constrained problem using trust-region SQP.
Solve optimization problem:
minimize fun(x)
subject to: constr(x) = 0
using Byrd-Omojokun Trust-Region SQP method described in [1]_. Several
implementation details are based on [2]_ and [3]_, p. 549.
References
----------
.. [1] Lalee, Marucha, Jorge Nocedal, and Todd Plantenga. "On the
implementation of an algorithm for large-scale equality
constrained optimization." SIAM Journal on
Optimization 8.3 (1998): 682-706.
.. [2] Byrd, Richard H., Mary E. Hribar, and Jorge Nocedal.
"An interior point algorithm for large-scale nonlinear
programming." SIAM Journal on Optimization 9.4 (1999): 877-900.
.. [3] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization"
Second Edition (2006).
"""
PENALTY_FACTOR = 0.3 # Rho from formula (3.51), reference [2]_, p.891.
LARGE_REDUCTION_RATIO = 0.9
INTERMEDIARY_REDUCTION_RATIO = 0.3
SUFFICIENT_REDUCTION_RATIO = 1e-8 # Eta from reference [2]_, p.892.
TRUST_ENLARGEMENT_FACTOR_L = 7.0
TRUST_ENLARGEMENT_FACTOR_S = 2.0
MAX_TRUST_REDUCTION = 0.5
MIN_TRUST_REDUCTION = 0.1
SOC_THRESHOLD = 0.1
TR_FACTOR = 0.8 # Zeta from formula (3.21), reference [2]_, p.885.
BOX_FACTOR = 0.5
n, = np.shape(x0) # Number of parameters
# Set default lower and upper bounds.
if trust_lb is None:
trust_lb = np.full(n, -np.inf)
if trust_ub is None:
trust_ub = np.full(n, np.inf)
# Initial values
x = np.copy(x0)
trust_radius = initial_trust_radius
penalty = initial_penalty
# Compute Values
f = fun0
c = grad0
b = constr0
A = jac0
S = scaling(x)
# Get projections
Z, LS, Y = projections(A, factorization_method)
# Compute least-square lagrange multipliers
v = -LS.dot(c)
# Compute Hessian
H = lagr_hess(x, v)
# Update state parameters
optimality = norm(c + A.T.dot(v), np.inf)
constr_violation = norm(b, np.inf) if len(b) > 0 else 0
cg_info = {'niter': 0, 'stop_cond': 0,
'hits_boundary': False}
last_iteration_failed = False
while not stop_criteria(state, x, last_iteration_failed,
optimality, constr_violation,
trust_radius, penalty, cg_info):
# Normal Step - `dn`
# minimize 1/2*||A dn + b||^2
# subject to:
# ||dn|| <= TR_FACTOR * trust_radius
# BOX_FACTOR * lb <= dn <= BOX_FACTOR * ub.
dn = modified_dogleg(A, Y, b,
TR_FACTOR*trust_radius,
BOX_FACTOR*trust_lb,
BOX_FACTOR*trust_ub)
# Tangential Step - `dt`
# Solve the QP problem:
# minimize 1/2 dt.T H dt + dt.T (H dn + c)
# subject to:
# A dt = 0
# ||dt|| <= sqrt(trust_radius**2 - ||dn||**2)
# lb - dn <= dt <= ub - dn
c_t = H.dot(dn) + c
b_t = np.zeros_like(b)
trust_radius_t = np.sqrt(trust_radius**2 - np.linalg.norm(dn)**2)
lb_t = trust_lb - dn
ub_t = trust_ub - dn
dt, cg_info = projected_cg(H, c_t, Z, Y, b_t,
trust_radius_t,
lb_t, ub_t)
# Compute update (normal + tangential steps).
d = dn + dt
# Compute second order model: 1/2 d H d + c.T d + f.
quadratic_model = 1/2*(H.dot(d)).dot(d) + c.T.dot(d)
# Compute linearized constraint: l = A d + b.
linearized_constr = A.dot(d)+b
# Compute new penalty parameter according to formula (3.52),
# reference [2]_, p.891.
vpred = norm(b) - norm(linearized_constr)
# Guarantee `vpred` always positive,
# regardless of roundoff errors.
vpred = max(1e-16, vpred)
previous_penalty = penalty
if quadratic_model > 0:
new_penalty = quadratic_model / ((1-PENALTY_FACTOR)*vpred)
penalty = max(penalty, new_penalty)
# Compute predicted reduction according to formula (3.52),
# reference [2]_, p.891.
predicted_reduction = -quadratic_model + penalty*vpred
# Compute merit function at current point
merit_function = f + penalty*norm(b)
# Evaluate function and constraints at trial point
x_next = x + S.dot(d)
f_next, b_next = fun_and_constr(x_next)
# Compute merit function at trial point
merit_function_next = f_next + penalty*norm(b_next)
# Compute actual reduction according to formula (3.54),
# reference [2]_, p.892.
actual_reduction = merit_function - merit_function_next
# Compute reduction ratio
reduction_ratio = actual_reduction / predicted_reduction
# Second order correction (SOC), reference [2]_, p.892.
if reduction_ratio < SUFFICIENT_REDUCTION_RATIO and \
norm(dn) <= SOC_THRESHOLD * norm(dt):
# Compute second order correction
y = -Y.dot(b_next)
# Make sure increment is inside box constraints
_, t, intersect = box_intersections(d, y, trust_lb, trust_ub)
# Compute tentative point
x_soc = x + S.dot(d + t*y)
f_soc, b_soc = fun_and_constr(x_soc)
# Recompute actual reduction
merit_function_soc = f_soc + penalty*norm(b_soc)
actual_reduction_soc = merit_function - merit_function_soc
# Recompute reduction ratio
reduction_ratio_soc = actual_reduction_soc / predicted_reduction
if intersect and reduction_ratio_soc >= SUFFICIENT_REDUCTION_RATIO:
x_next = x_soc
f_next = f_soc
b_next = b_soc
reduction_ratio = reduction_ratio_soc
# Readjust trust region step, formula (3.55), reference [2]_, p.892.
if reduction_ratio >= LARGE_REDUCTION_RATIO:
trust_radius = max(TRUST_ENLARGEMENT_FACTOR_L * norm(d),
trust_radius)
elif reduction_ratio >= INTERMEDIARY_REDUCTION_RATIO:
trust_radius = max(TRUST_ENLARGEMENT_FACTOR_S * norm(d),
trust_radius)
# Reduce trust region step, according to reference [3]_, p.696.
elif reduction_ratio < SUFFICIENT_REDUCTION_RATIO:
trust_reduction = ((1-SUFFICIENT_REDUCTION_RATIO) /
(1-reduction_ratio))
new_trust_radius = trust_reduction * norm(d)
if new_trust_radius >= MAX_TRUST_REDUCTION * trust_radius:
trust_radius *= MAX_TRUST_REDUCTION
elif new_trust_radius >= MIN_TRUST_REDUCTION * trust_radius:
trust_radius = new_trust_radius
else:
trust_radius *= MIN_TRUST_REDUCTION
# Update iteration
if reduction_ratio >= SUFFICIENT_REDUCTION_RATIO:
x = x_next
f, b = f_next, b_next
c, A = grad_and_jac(x)
S = scaling(x)
# Get projections
Z, LS, Y = projections(A, factorization_method)
# Compute least-square lagrange multipliers
v = -LS.dot(c)
# Compute Hessian
H = lagr_hess(x, v)
# Set Flag
last_iteration_failed = False
# Otimality values
optimality = norm(c + A.T.dot(v), np.inf)
constr_violation = norm(b, np.inf) if len(b) > 0 else 0
else:
penalty = previous_penalty
last_iteration_failed = True
return x, state
| bsd-3-clause | f8b4b316d9e6e25f76ca7f522f251b2b | 38.59447 | 79 | 0.554004 | 3.544554 | false | false | false | false |
scipy/scipy | scipy/optimize/_linprog_ip.py | 11 | 45914 | """Interior-point method for linear programming
The *interior-point* method uses the primal-dual path following algorithm
outlined in [1]_. This algorithm supports sparse constraint matrices and
is typically faster than the simplex methods, especially for large, sparse
problems. Note, however, that the solution returned may be slightly less
accurate than those of the simplex methods and will not, in general,
correspond with a vertex of the polytope defined by the constraints.
.. versionadded:: 1.0.0
References
----------
.. [1] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
optimizer for linear programming: an implementation of the
homogeneous algorithm." High performance optimization. Springer US,
2000. 197-232.
"""
# Author: Matt Haberland
import numpy as np
import scipy as sp
import scipy.sparse as sps
from warnings import warn
from scipy.linalg import LinAlgError
from ._optimize import OptimizeWarning, OptimizeResult, _check_unknown_options
from ._linprog_util import _postsolve
has_umfpack = True
has_cholmod = True
try:
import sksparse
from sksparse.cholmod import cholesky as cholmod
from sksparse.cholmod import analyze as cholmod_analyze
except ImportError:
has_cholmod = False
try:
import scikits.umfpack # test whether to use factorized
except ImportError:
has_umfpack = False
def _get_solver(M, sparse=False, lstsq=False, sym_pos=True,
cholesky=True, permc_spec='MMD_AT_PLUS_A'):
"""
Given solver options, return a handle to the appropriate linear system
solver.
Parameters
----------
M : 2-D array
As defined in [4] Equation 8.31
sparse : bool (default = False)
True if the system to be solved is sparse. This is typically set
True when the original ``A_ub`` and ``A_eq`` arrays are sparse.
lstsq : bool (default = False)
True if the system is ill-conditioned and/or (nearly) singular and
thus a more robust least-squares solver is desired. This is sometimes
needed as the solution is approached.
sym_pos : bool (default = True)
True if the system matrix is symmetric positive definite
Sometimes this needs to be set false as the solution is approached,
even when the system should be symmetric positive definite, due to
numerical difficulties.
cholesky : bool (default = True)
True if the system is to be solved by Cholesky, rather than LU,
decomposition. This is typically faster unless the problem is very
small or prone to numerical difficulties.
permc_spec : str (default = 'MMD_AT_PLUS_A')
Sparsity preservation strategy used by SuperLU. Acceptable values are:
- ``NATURAL``: natural ordering.
- ``MMD_ATA``: minimum degree ordering on the structure of A^T A.
- ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A.
- ``COLAMD``: approximate minimum degree column ordering.
See SuperLU documentation.
Returns
-------
solve : function
Handle to the appropriate solver function
"""
try:
if sparse:
if lstsq:
def solve(r, sym_pos=False):
return sps.linalg.lsqr(M, r)[0]
elif cholesky:
try:
# Will raise an exception in the first call,
# or when the matrix changes due to a new problem
_get_solver.cholmod_factor.cholesky_inplace(M)
except Exception:
_get_solver.cholmod_factor = cholmod_analyze(M)
_get_solver.cholmod_factor.cholesky_inplace(M)
solve = _get_solver.cholmod_factor
else:
if has_umfpack and sym_pos:
solve = sps.linalg.factorized(M)
else: # factorized doesn't pass permc_spec
solve = sps.linalg.splu(M, permc_spec=permc_spec).solve
else:
if lstsq: # sometimes necessary as solution is approached
def solve(r):
return sp.linalg.lstsq(M, r)[0]
elif cholesky:
L = sp.linalg.cho_factor(M)
def solve(r):
return sp.linalg.cho_solve(L, r)
else:
# this seems to cache the matrix factorization, so solving
# with multiple right hand sides is much faster
def solve(r, sym_pos=sym_pos):
if sym_pos:
return sp.linalg.solve(M, r, assume_a="pos")
else:
return sp.linalg.solve(M, r)
# There are many things that can go wrong here, and it's hard to say
# what all of them are. It doesn't really matter: if the matrix can't be
# factorized, return None. get_solver will be called again with different
# inputs, and a new routine will try to factorize the matrix.
except KeyboardInterrupt:
raise
except Exception:
return None
return solve
def _get_delta(A, b, c, x, y, z, tau, kappa, gamma, eta, sparse=False,
lstsq=False, sym_pos=True, cholesky=True, pc=True, ip=False,
permc_spec='MMD_AT_PLUS_A'):
"""
Given standard form problem defined by ``A``, ``b``, and ``c``;
current variable estimates ``x``, ``y``, ``z``, ``tau``, and ``kappa``;
algorithmic parameters ``gamma and ``eta;
and options ``sparse``, ``lstsq``, ``sym_pos``, ``cholesky``, ``pc``
(predictor-corrector), and ``ip`` (initial point improvement),
get the search direction for increments to the variable estimates.
Parameters
----------
As defined in [4], except:
sparse : bool
True if the system to be solved is sparse. This is typically set
True when the original ``A_ub`` and ``A_eq`` arrays are sparse.
lstsq : bool
True if the system is ill-conditioned and/or (nearly) singular and
thus a more robust least-squares solver is desired. This is sometimes
needed as the solution is approached.
sym_pos : bool
True if the system matrix is symmetric positive definite
Sometimes this needs to be set false as the solution is approached,
even when the system should be symmetric positive definite, due to
numerical difficulties.
cholesky : bool
True if the system is to be solved by Cholesky, rather than LU,
decomposition. This is typically faster unless the problem is very
small or prone to numerical difficulties.
pc : bool
True if the predictor-corrector method of Mehrota is to be used. This
is almost always (if not always) beneficial. Even though it requires
the solution of an additional linear system, the factorization
is typically (implicitly) reused so solution is efficient, and the
number of algorithm iterations is typically reduced.
ip : bool
True if the improved initial point suggestion due to [4] section 4.3
is desired. It's unclear whether this is beneficial.
permc_spec : str (default = 'MMD_AT_PLUS_A')
(Has effect only with ``sparse = True``, ``lstsq = False``, ``sym_pos =
True``.) A matrix is factorized in each iteration of the algorithm.
This option specifies how to permute the columns of the matrix for
sparsity preservation. Acceptable values are:
- ``NATURAL``: natural ordering.
- ``MMD_ATA``: minimum degree ordering on the structure of A^T A.
- ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A.
- ``COLAMD``: approximate minimum degree column ordering.
This option can impact the convergence of the
interior point algorithm; test different values to determine which
performs best for your problem. For more information, refer to
``scipy.sparse.linalg.splu``.
Returns
-------
Search directions as defined in [4]
References
----------
.. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
optimizer for linear programming: an implementation of the
homogeneous algorithm." High performance optimization. Springer US,
2000. 197-232.
"""
if A.shape[0] == 0:
# If there are no constraints, some solvers fail (understandably)
# rather than returning empty solution. This gets the job done.
sparse, lstsq, sym_pos, cholesky = False, False, True, False
n_x = len(x)
# [4] Equation 8.8
r_P = b * tau - A.dot(x)
r_D = c * tau - A.T.dot(y) - z
r_G = c.dot(x) - b.transpose().dot(y) + kappa
mu = (x.dot(z) + tau * kappa) / (n_x + 1)
# Assemble M from [4] Equation 8.31
Dinv = x / z
if sparse:
M = A.dot(sps.diags(Dinv, 0, format="csc").dot(A.T))
else:
M = A.dot(Dinv.reshape(-1, 1) * A.T)
solve = _get_solver(M, sparse, lstsq, sym_pos, cholesky, permc_spec)
# pc: "predictor-corrector" [4] Section 4.1
# In development this option could be turned off
# but it always seems to improve performance substantially
n_corrections = 1 if pc else 0
i = 0
alpha, d_x, d_z, d_tau, d_kappa = 0, 0, 0, 0, 0
while i <= n_corrections:
# Reference [4] Eq. 8.6
rhatp = eta(gamma) * r_P
rhatd = eta(gamma) * r_D
rhatg = eta(gamma) * r_G
# Reference [4] Eq. 8.7
rhatxs = gamma * mu - x * z
rhattk = gamma * mu - tau * kappa
if i == 1:
if ip: # if the correction is to get "initial point"
# Reference [4] Eq. 8.23
rhatxs = ((1 - alpha) * gamma * mu -
x * z - alpha**2 * d_x * d_z)
rhattk = ((1 - alpha) * gamma * mu -
tau * kappa -
alpha**2 * d_tau * d_kappa)
else: # if the correction is for "predictor-corrector"
# Reference [4] Eq. 8.13
rhatxs -= d_x * d_z
rhattk -= d_tau * d_kappa
# sometimes numerical difficulties arise as the solution is approached
# this loop tries to solve the equations using a sequence of functions
# for solve. For dense systems, the order is:
# 1. scipy.linalg.cho_factor/scipy.linalg.cho_solve,
# 2. scipy.linalg.solve w/ sym_pos = True,
# 3. scipy.linalg.solve w/ sym_pos = False, and if all else fails
# 4. scipy.linalg.lstsq
# For sparse systems, the order is:
# 1. sksparse.cholmod.cholesky (if available)
# 2. scipy.sparse.linalg.factorized (if umfpack available)
# 3. scipy.sparse.linalg.splu
# 4. scipy.sparse.linalg.lsqr
solved = False
while(not solved):
try:
# [4] Equation 8.28
p, q = _sym_solve(Dinv, A, c, b, solve)
# [4] Equation 8.29
u, v = _sym_solve(Dinv, A, rhatd -
(1 / x) * rhatxs, rhatp, solve)
if np.any(np.isnan(p)) or np.any(np.isnan(q)):
raise LinAlgError
solved = True
except (LinAlgError, ValueError, TypeError) as e:
# Usually this doesn't happen. If it does, it happens when
# there are redundant constraints or when approaching the
# solution. If so, change solver.
if cholesky:
cholesky = False
warn(
"Solving system with option 'cholesky':True "
"failed. It is normal for this to happen "
"occasionally, especially as the solution is "
"approached. However, if you see this frequently, "
"consider setting option 'cholesky' to False.",
OptimizeWarning, stacklevel=5)
elif sym_pos:
sym_pos = False
warn(
"Solving system with option 'sym_pos':True "
"failed. It is normal for this to happen "
"occasionally, especially as the solution is "
"approached. However, if you see this frequently, "
"consider setting option 'sym_pos' to False.",
OptimizeWarning, stacklevel=5)
elif not lstsq:
lstsq = True
warn(
"Solving system with option 'sym_pos':False "
"failed. This may happen occasionally, "
"especially as the solution is "
"approached. However, if you see this frequently, "
"your problem may be numerically challenging. "
"If you cannot improve the formulation, consider "
"setting 'lstsq' to True. Consider also setting "
"`presolve` to True, if it is not already.",
OptimizeWarning, stacklevel=5)
else:
raise e
solve = _get_solver(M, sparse, lstsq, sym_pos,
cholesky, permc_spec)
# [4] Results after 8.29
d_tau = ((rhatg + 1 / tau * rhattk - (-c.dot(u) + b.dot(v))) /
(1 / tau * kappa + (-c.dot(p) + b.dot(q))))
d_x = u + p * d_tau
d_y = v + q * d_tau
# [4] Relations between after 8.25 and 8.26
d_z = (1 / x) * (rhatxs - z * d_x)
d_kappa = 1 / tau * (rhattk - kappa * d_tau)
# [4] 8.12 and "Let alpha be the maximal possible step..." before 8.23
alpha = _get_step(x, d_x, z, d_z, tau, d_tau, kappa, d_kappa, 1)
if ip: # initial point - see [4] 4.4
gamma = 10
else: # predictor-corrector, [4] definition after 8.12
beta1 = 0.1 # [4] pg. 220 (Table 8.1)
gamma = (1 - alpha)**2 * min(beta1, (1 - alpha))
i += 1
return d_x, d_y, d_z, d_tau, d_kappa
def _sym_solve(Dinv, A, r1, r2, solve):
"""
An implementation of [4] equation 8.31 and 8.32
References
----------
.. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
optimizer for linear programming: an implementation of the
homogeneous algorithm." High performance optimization. Springer US,
2000. 197-232.
"""
# [4] 8.31
r = r2 + A.dot(Dinv * r1)
v = solve(r)
# [4] 8.32
u = Dinv * (A.T.dot(v) - r1)
return u, v
def _get_step(x, d_x, z, d_z, tau, d_tau, kappa, d_kappa, alpha0):
"""
An implementation of [4] equation 8.21
References
----------
.. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
optimizer for linear programming: an implementation of the
homogeneous algorithm." High performance optimization. Springer US,
2000. 197-232.
"""
# [4] 4.3 Equation 8.21, ignoring 8.20 requirement
# same step is taken in primal and dual spaces
# alpha0 is basically beta3 from [4] Table 8.1, but instead of beta3
# the value 1 is used in Mehrota corrector and initial point correction
i_x = d_x < 0
i_z = d_z < 0
alpha_x = alpha0 * np.min(x[i_x] / -d_x[i_x]) if np.any(i_x) else 1
alpha_tau = alpha0 * tau / -d_tau if d_tau < 0 else 1
alpha_z = alpha0 * np.min(z[i_z] / -d_z[i_z]) if np.any(i_z) else 1
alpha_kappa = alpha0 * kappa / -d_kappa if d_kappa < 0 else 1
alpha = np.min([1, alpha_x, alpha_tau, alpha_z, alpha_kappa])
return alpha
def _get_message(status):
"""
Given problem status code, return a more detailed message.
Parameters
----------
status : int
An integer representing the exit status of the optimization::
0 : Optimization terminated successfully
1 : Iteration limit reached
2 : Problem appears to be infeasible
3 : Problem appears to be unbounded
4 : Serious numerical difficulties encountered
Returns
-------
message : str
A string descriptor of the exit status of the optimization.
"""
messages = (
["Optimization terminated successfully.",
"The iteration limit was reached before the algorithm converged.",
"The algorithm terminated successfully and determined that the "
"problem is infeasible.",
"The algorithm terminated successfully and determined that the "
"problem is unbounded.",
"Numerical difficulties were encountered before the problem "
"converged. Please check your problem formulation for errors, "
"independence of linear equality constraints, and reasonable "
"scaling and matrix condition numbers. If you continue to "
"encounter this error, please submit a bug report."
])
return messages[status]
def _do_step(x, y, z, tau, kappa, d_x, d_y, d_z, d_tau, d_kappa, alpha):
"""
An implementation of [4] Equation 8.9
References
----------
.. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
optimizer for linear programming: an implementation of the
homogeneous algorithm." High performance optimization. Springer US,
2000. 197-232.
"""
x = x + alpha * d_x
tau = tau + alpha * d_tau
z = z + alpha * d_z
kappa = kappa + alpha * d_kappa
y = y + alpha * d_y
return x, y, z, tau, kappa
def _get_blind_start(shape):
"""
Return the starting point from [4] 4.4
References
----------
.. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
optimizer for linear programming: an implementation of the
homogeneous algorithm." High performance optimization. Springer US,
2000. 197-232.
"""
m, n = shape
x0 = np.ones(n)
y0 = np.zeros(m)
z0 = np.ones(n)
tau0 = 1
kappa0 = 1
return x0, y0, z0, tau0, kappa0
def _indicators(A, b, c, c0, x, y, z, tau, kappa):
"""
Implementation of several equations from [4] used as indicators of
the status of optimization.
References
----------
.. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
optimizer for linear programming: an implementation of the
homogeneous algorithm." High performance optimization. Springer US,
2000. 197-232.
"""
# residuals for termination are relative to initial values
x0, y0, z0, tau0, kappa0 = _get_blind_start(A.shape)
# See [4], Section 4 - The Homogeneous Algorithm, Equation 8.8
def r_p(x, tau):
return b * tau - A.dot(x)
def r_d(y, z, tau):
return c * tau - A.T.dot(y) - z
def r_g(x, y, kappa):
return kappa + c.dot(x) - b.dot(y)
# np.dot unpacks if they are arrays of size one
def mu(x, tau, z, kappa):
return (x.dot(z) + np.dot(tau, kappa)) / (len(x) + 1)
obj = c.dot(x / tau) + c0
def norm(a):
return np.linalg.norm(a)
# See [4], Section 4.5 - The Stopping Criteria
r_p0 = r_p(x0, tau0)
r_d0 = r_d(y0, z0, tau0)
r_g0 = r_g(x0, y0, kappa0)
mu_0 = mu(x0, tau0, z0, kappa0)
rho_A = norm(c.T.dot(x) - b.T.dot(y)) / (tau + norm(b.T.dot(y)))
rho_p = norm(r_p(x, tau)) / max(1, norm(r_p0))
rho_d = norm(r_d(y, z, tau)) / max(1, norm(r_d0))
rho_g = norm(r_g(x, y, kappa)) / max(1, norm(r_g0))
rho_mu = mu(x, tau, z, kappa) / mu_0
return rho_p, rho_d, rho_A, rho_g, rho_mu, obj
def _display_iter(rho_p, rho_d, rho_g, alpha, rho_mu, obj, header=False):
"""
Print indicators of optimization status to the console.
Parameters
----------
rho_p : float
The (normalized) primal feasibility, see [4] 4.5
rho_d : float
The (normalized) dual feasibility, see [4] 4.5
rho_g : float
The (normalized) duality gap, see [4] 4.5
alpha : float
The step size, see [4] 4.3
rho_mu : float
The (normalized) path parameter, see [4] 4.5
obj : float
The objective function value of the current iterate
header : bool
True if a header is to be printed
References
----------
.. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
optimizer for linear programming: an implementation of the
homogeneous algorithm." High performance optimization. Springer US,
2000. 197-232.
"""
if header:
print("Primal Feasibility ",
"Dual Feasibility ",
"Duality Gap ",
"Step ",
"Path Parameter ",
"Objective ")
# no clue why this works
fmt = '{0:<20.13}{1:<20.13}{2:<20.13}{3:<17.13}{4:<20.13}{5:<20.13}'
print(fmt.format(
float(rho_p),
float(rho_d),
float(rho_g),
alpha if isinstance(alpha, str) else float(alpha),
float(rho_mu),
float(obj)))
def _ip_hsd(A, b, c, c0, alpha0, beta, maxiter, disp, tol, sparse, lstsq,
sym_pos, cholesky, pc, ip, permc_spec, callback, postsolve_args):
r"""
Solve a linear programming problem in standard form:
Minimize::
c @ x
Subject to::
A @ x == b
x >= 0
using the interior point method of [4].
Parameters
----------
A : 2-D array
2-D array such that ``A @ x``, gives the values of the equality
constraints at ``x``.
b : 1-D array
1-D array of values representing the RHS of each equality constraint
(row) in ``A`` (for standard form problem).
c : 1-D array
Coefficients of the linear objective function to be minimized (for
standard form problem).
c0 : float
Constant term in objective function due to fixed (and eliminated)
variables. (Purely for display.)
alpha0 : float
The maximal step size for Mehrota's predictor-corrector search
direction; see :math:`\beta_3`of [4] Table 8.1
beta : float
The desired reduction of the path parameter :math:`\mu` (see [6]_)
maxiter : int
The maximum number of iterations of the algorithm.
disp : bool
Set to ``True`` if indicators of optimization status are to be printed
to the console each iteration.
tol : float
Termination tolerance; see [4]_ Section 4.5.
sparse : bool
Set to ``True`` if the problem is to be treated as sparse. However,
the inputs ``A_eq`` and ``A_ub`` should nonetheless be provided as
(dense) arrays rather than sparse matrices.
lstsq : bool
Set to ``True`` if the problem is expected to be very poorly
conditioned. This should always be left as ``False`` unless severe
numerical difficulties are frequently encountered, and a better option
would be to improve the formulation of the problem.
sym_pos : bool
Leave ``True`` if the problem is expected to yield a well conditioned
symmetric positive definite normal equation matrix (almost always).
cholesky : bool
Set to ``True`` if the normal equations are to be solved by explicit
Cholesky decomposition followed by explicit forward/backward
substitution. This is typically faster for moderate, dense problems
that are numerically well-behaved.
pc : bool
Leave ``True`` if the predictor-corrector method of Mehrota is to be
used. This is almost always (if not always) beneficial.
ip : bool
Set to ``True`` if the improved initial point suggestion due to [4]_
Section 4.3 is desired. It's unclear whether this is beneficial.
permc_spec : str (default = 'MMD_AT_PLUS_A')
(Has effect only with ``sparse = True``, ``lstsq = False``, ``sym_pos =
True``.) A matrix is factorized in each iteration of the algorithm.
This option specifies how to permute the columns of the matrix for
sparsity preservation. Acceptable values are:
- ``NATURAL``: natural ordering.
- ``MMD_ATA``: minimum degree ordering on the structure of A^T A.
- ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A.
- ``COLAMD``: approximate minimum degree column ordering.
This option can impact the convergence of the
interior point algorithm; test different values to determine which
performs best for your problem. For more information, refer to
``scipy.sparse.linalg.splu``.
callback : callable, optional
If a callback function is provided, it will be called within each
iteration of the algorithm. The callback function must accept a single
`scipy.optimize.OptimizeResult` consisting of the following fields:
x : 1-D array
Current solution vector
fun : float
Current value of the objective function
success : bool
True only when an algorithm has completed successfully,
so this is always False as the callback function is called
only while the algorithm is still iterating.
slack : 1-D array
The values of the slack variables. Each slack variable
corresponds to an inequality constraint. If the slack is zero,
the corresponding constraint is active.
con : 1-D array
The (nominally zero) residuals of the equality constraints,
that is, ``b - A_eq @ x``
phase : int
The phase of the algorithm being executed. This is always
1 for the interior-point method because it has only one phase.
status : int
For revised simplex, this is always 0 because if a different
status is detected, the algorithm terminates.
nit : int
The number of iterations performed.
message : str
A string descriptor of the exit status of the optimization.
postsolve_args : tuple
Data needed by _postsolve to convert the solution to the standard-form
problem into the solution to the original problem.
Returns
-------
x_hat : float
Solution vector (for standard form problem).
status : int
An integer representing the exit status of the optimization::
0 : Optimization terminated successfully
1 : Iteration limit reached
2 : Problem appears to be infeasible
3 : Problem appears to be unbounded
4 : Serious numerical difficulties encountered
message : str
A string descriptor of the exit status of the optimization.
iteration : int
The number of iterations taken to solve the problem
References
----------
.. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
optimizer for linear programming: an implementation of the
homogeneous algorithm." High performance optimization. Springer US,
2000. 197-232.
.. [6] Freund, Robert M. "Primal-Dual Interior-Point Methods for Linear
Programming based on Newton's Method." Unpublished Course Notes,
March 2004. Available 2/25/2017 at:
https://ocw.mit.edu/courses/sloan-school-of-management/15-084j-nonlinear-programming-spring-2004/lecture-notes/lec14_int_pt_mthd.pdf
"""
iteration = 0
# default initial point
x, y, z, tau, kappa = _get_blind_start(A.shape)
# first iteration is special improvement of initial point
ip = ip if pc else False
# [4] 4.5
rho_p, rho_d, rho_A, rho_g, rho_mu, obj = _indicators(
A, b, c, c0, x, y, z, tau, kappa)
go = rho_p > tol or rho_d > tol or rho_A > tol # we might get lucky : )
if disp:
_display_iter(rho_p, rho_d, rho_g, "-", rho_mu, obj, header=True)
if callback is not None:
x_o, fun, slack, con = _postsolve(x/tau, postsolve_args)
res = OptimizeResult({'x': x_o, 'fun': fun, 'slack': slack,
'con': con, 'nit': iteration, 'phase': 1,
'complete': False, 'status': 0,
'message': "", 'success': False})
callback(res)
status = 0
message = "Optimization terminated successfully."
if sparse:
A = sps.csc_matrix(A)
A.T = A.transpose() # A.T is defined for sparse matrices but is slow
# Redefine it to avoid calculating again
# This is fine as long as A doesn't change
while go:
iteration += 1
if ip: # initial point
# [4] Section 4.4
gamma = 1
def eta(g):
return 1
else:
# gamma = 0 in predictor step according to [4] 4.1
# if predictor/corrector is off, use mean of complementarity [6]
# 5.1 / [4] Below Figure 10-4
gamma = 0 if pc else beta * np.mean(z * x)
# [4] Section 4.1
def eta(g=gamma):
return 1 - g
try:
# Solve [4] 8.6 and 8.7/8.13/8.23
d_x, d_y, d_z, d_tau, d_kappa = _get_delta(
A, b, c, x, y, z, tau, kappa, gamma, eta,
sparse, lstsq, sym_pos, cholesky, pc, ip, permc_spec)
if ip: # initial point
# [4] 4.4
# Formula after 8.23 takes a full step regardless if this will
# take it negative
alpha = 1.0
x, y, z, tau, kappa = _do_step(
x, y, z, tau, kappa, d_x, d_y,
d_z, d_tau, d_kappa, alpha)
x[x < 1] = 1
z[z < 1] = 1
tau = max(1, tau)
kappa = max(1, kappa)
ip = False # done with initial point
else:
# [4] Section 4.3
alpha = _get_step(x, d_x, z, d_z, tau,
d_tau, kappa, d_kappa, alpha0)
# [4] Equation 8.9
x, y, z, tau, kappa = _do_step(
x, y, z, tau, kappa, d_x, d_y, d_z, d_tau, d_kappa, alpha)
except (LinAlgError, FloatingPointError,
ValueError, ZeroDivisionError):
# this can happen when sparse solver is used and presolve
# is turned off. Also observed ValueError in AppVeyor Python 3.6
# Win32 build (PR #8676). I've never seen it otherwise.
status = 4
message = _get_message(status)
break
# [4] 4.5
rho_p, rho_d, rho_A, rho_g, rho_mu, obj = _indicators(
A, b, c, c0, x, y, z, tau, kappa)
go = rho_p > tol or rho_d > tol or rho_A > tol
if disp:
_display_iter(rho_p, rho_d, rho_g, alpha, rho_mu, obj)
if callback is not None:
x_o, fun, slack, con = _postsolve(x/tau, postsolve_args)
res = OptimizeResult({'x': x_o, 'fun': fun, 'slack': slack,
'con': con, 'nit': iteration, 'phase': 1,
'complete': False, 'status': 0,
'message': "", 'success': False})
callback(res)
# [4] 4.5
inf1 = (rho_p < tol and rho_d < tol and rho_g < tol and tau < tol *
max(1, kappa))
inf2 = rho_mu < tol and tau < tol * min(1, kappa)
if inf1 or inf2:
# [4] Lemma 8.4 / Theorem 8.3
if b.transpose().dot(y) > tol:
status = 2
else: # elif c.T.dot(x) < tol: ? Probably not necessary.
status = 3
message = _get_message(status)
break
elif iteration >= maxiter:
status = 1
message = _get_message(status)
break
x_hat = x / tau
# [4] Statement after Theorem 8.2
return x_hat, status, message, iteration
def _linprog_ip(c, c0, A, b, callback, postsolve_args, maxiter=1000, tol=1e-8,
disp=False, alpha0=.99995, beta=0.1, sparse=False, lstsq=False,
sym_pos=True, cholesky=None, pc=True, ip=False,
permc_spec='MMD_AT_PLUS_A', **unknown_options):
r"""
Minimize a linear objective function subject to linear
equality and non-negativity constraints using the interior point method
of [4]_. Linear programming is intended to solve problems
of the following form:
Minimize::
c @ x
Subject to::
A @ x == b
x >= 0
User-facing documentation is in _linprog_doc.py.
Parameters
----------
c : 1-D array
Coefficients of the linear objective function to be minimized.
c0 : float
Constant term in objective function due to fixed (and eliminated)
variables. (Purely for display.)
A : 2-D array
2-D array such that ``A @ x``, gives the values of the equality
constraints at ``x``.
b : 1-D array
1-D array of values representing the right hand side of each equality
constraint (row) in ``A``.
callback : callable, optional
Callback function to be executed once per iteration.
postsolve_args : tuple
Data needed by _postsolve to convert the solution to the standard-form
problem into the solution to the original problem.
Options
-------
maxiter : int (default = 1000)
The maximum number of iterations of the algorithm.
tol : float (default = 1e-8)
Termination tolerance to be used for all termination criteria;
see [4]_ Section 4.5.
disp : bool (default = False)
Set to ``True`` if indicators of optimization status are to be printed
to the console each iteration.
alpha0 : float (default = 0.99995)
The maximal step size for Mehrota's predictor-corrector search
direction; see :math:`\beta_{3}` of [4]_ Table 8.1.
beta : float (default = 0.1)
The desired reduction of the path parameter :math:`\mu` (see [6]_)
when Mehrota's predictor-corrector is not in use (uncommon).
sparse : bool (default = False)
Set to ``True`` if the problem is to be treated as sparse after
presolve. If either ``A_eq`` or ``A_ub`` is a sparse matrix,
this option will automatically be set ``True``, and the problem
will be treated as sparse even during presolve. If your constraint
matrices contain mostly zeros and the problem is not very small (less
than about 100 constraints or variables), consider setting ``True``
or providing ``A_eq`` and ``A_ub`` as sparse matrices.
lstsq : bool (default = False)
Set to ``True`` if the problem is expected to be very poorly
conditioned. This should always be left ``False`` unless severe
numerical difficulties are encountered. Leave this at the default
unless you receive a warning message suggesting otherwise.
sym_pos : bool (default = True)
Leave ``True`` if the problem is expected to yield a well conditioned
symmetric positive definite normal equation matrix
(almost always). Leave this at the default unless you receive
a warning message suggesting otherwise.
cholesky : bool (default = True)
Set to ``True`` if the normal equations are to be solved by explicit
Cholesky decomposition followed by explicit forward/backward
substitution. This is typically faster for problems
that are numerically well-behaved.
pc : bool (default = True)
Leave ``True`` if the predictor-corrector method of Mehrota is to be
used. This is almost always (if not always) beneficial.
ip : bool (default = False)
Set to ``True`` if the improved initial point suggestion due to [4]_
Section 4.3 is desired. Whether this is beneficial or not
depends on the problem.
permc_spec : str (default = 'MMD_AT_PLUS_A')
(Has effect only with ``sparse = True``, ``lstsq = False``, ``sym_pos =
True``, and no SuiteSparse.)
A matrix is factorized in each iteration of the algorithm.
This option specifies how to permute the columns of the matrix for
sparsity preservation. Acceptable values are:
- ``NATURAL``: natural ordering.
- ``MMD_ATA``: minimum degree ordering on the structure of A^T A.
- ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A.
- ``COLAMD``: approximate minimum degree column ordering.
This option can impact the convergence of the
interior point algorithm; test different values to determine which
performs best for your problem. For more information, refer to
``scipy.sparse.linalg.splu``.
unknown_options : dict
Optional arguments not used by this particular solver. If
`unknown_options` is non-empty a warning is issued listing all
unused options.
Returns
-------
x : 1-D array
Solution vector.
status : int
An integer representing the exit status of the optimization::
0 : Optimization terminated successfully
1 : Iteration limit reached
2 : Problem appears to be infeasible
3 : Problem appears to be unbounded
4 : Serious numerical difficulties encountered
message : str
A string descriptor of the exit status of the optimization.
iteration : int
The number of iterations taken to solve the problem.
Notes
-----
This method implements the algorithm outlined in [4]_ with ideas from [8]_
and a structure inspired by the simpler methods of [6]_.
The primal-dual path following method begins with initial 'guesses' of
the primal and dual variables of the standard form problem and iteratively
attempts to solve the (nonlinear) Karush-Kuhn-Tucker conditions for the
problem with a gradually reduced logarithmic barrier term added to the
objective. This particular implementation uses a homogeneous self-dual
formulation, which provides certificates of infeasibility or unboundedness
where applicable.
The default initial point for the primal and dual variables is that
defined in [4]_ Section 4.4 Equation 8.22. Optionally (by setting initial
point option ``ip=True``), an alternate (potentially improved) starting
point can be calculated according to the additional recommendations of
[4]_ Section 4.4.
A search direction is calculated using the predictor-corrector method
(single correction) proposed by Mehrota and detailed in [4]_ Section 4.1.
(A potential improvement would be to implement the method of multiple
corrections described in [4]_ Section 4.2.) In practice, this is
accomplished by solving the normal equations, [4]_ Section 5.1 Equations
8.31 and 8.32, derived from the Newton equations [4]_ Section 5 Equations
8.25 (compare to [4]_ Section 4 Equations 8.6-8.8). The advantage of
solving the normal equations rather than 8.25 directly is that the
matrices involved are symmetric positive definite, so Cholesky
decomposition can be used rather than the more expensive LU factorization.
With default options, the solver used to perform the factorization depends
on third-party software availability and the conditioning of the problem.
For dense problems, solvers are tried in the following order:
1. ``scipy.linalg.cho_factor``
2. ``scipy.linalg.solve`` with option ``sym_pos=True``
3. ``scipy.linalg.solve`` with option ``sym_pos=False``
4. ``scipy.linalg.lstsq``
For sparse problems:
1. ``sksparse.cholmod.cholesky`` (if scikit-sparse and SuiteSparse are installed)
2. ``scipy.sparse.linalg.factorized`` (if scikit-umfpack and SuiteSparse are installed)
3. ``scipy.sparse.linalg.splu`` (which uses SuperLU distributed with SciPy)
4. ``scipy.sparse.linalg.lsqr``
If the solver fails for any reason, successively more robust (but slower)
solvers are attempted in the order indicated. Attempting, failing, and
re-starting factorization can be time consuming, so if the problem is
numerically challenging, options can be set to bypass solvers that are
failing. Setting ``cholesky=False`` skips to solver 2,
``sym_pos=False`` skips to solver 3, and ``lstsq=True`` skips
to solver 4 for both sparse and dense problems.
Potential improvements for combatting issues associated with dense
columns in otherwise sparse problems are outlined in [4]_ Section 5.3 and
[10]_ Section 4.1-4.2; the latter also discusses the alleviation of
accuracy issues associated with the substitution approach to free
variables.
After calculating the search direction, the maximum possible step size
that does not activate the non-negativity constraints is calculated, and
the smaller of this step size and unity is applied (as in [4]_ Section
4.1.) [4]_ Section 4.3 suggests improvements for choosing the step size.
The new point is tested according to the termination conditions of [4]_
Section 4.5. The same tolerance, which can be set using the ``tol`` option,
is used for all checks. (A potential improvement would be to expose
the different tolerances to be set independently.) If optimality,
unboundedness, or infeasibility is detected, the solve procedure
terminates; otherwise it repeats.
The expected problem formulation differs between the top level ``linprog``
module and the method specific solvers. The method specific solvers expect a
problem in standard form:
Minimize::
c @ x
Subject to::
A @ x == b
x >= 0
Whereas the top level ``linprog`` module expects a problem of form:
Minimize::
c @ x
Subject to::
A_ub @ x <= b_ub
A_eq @ x == b_eq
lb <= x <= ub
where ``lb = 0`` and ``ub = None`` unless set in ``bounds``.
The original problem contains equality, upper-bound and variable constraints
whereas the method specific solver requires equality constraints and
variable non-negativity.
``linprog`` module converts the original problem to standard form by
converting the simple bounds to upper bound constraints, introducing
non-negative slack variables for inequality constraints, and expressing
unbounded variables as the difference between two non-negative variables.
References
----------
.. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
optimizer for linear programming: an implementation of the
homogeneous algorithm." High performance optimization. Springer US,
2000. 197-232.
.. [6] Freund, Robert M. "Primal-Dual Interior-Point Methods for Linear
Programming based on Newton's Method." Unpublished Course Notes,
March 2004. Available 2/25/2017 at
https://ocw.mit.edu/courses/sloan-school-of-management/15-084j-nonlinear-programming-spring-2004/lecture-notes/lec14_int_pt_mthd.pdf
.. [8] Andersen, Erling D., and Knud D. Andersen. "Presolving in linear
programming." Mathematical Programming 71.2 (1995): 221-245.
.. [9] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear
programming." Athena Scientific 1 (1997): 997.
.. [10] Andersen, Erling D., et al. Implementation of interior point methods
for large scale linear programming. HEC/Universite de Geneve, 1996.
"""
_check_unknown_options(unknown_options)
# These should be warnings, not errors
if (cholesky or cholesky is None) and sparse and not has_cholmod:
if cholesky:
warn("Sparse cholesky is only available with scikit-sparse. "
"Setting `cholesky = False`",
OptimizeWarning, stacklevel=3)
cholesky = False
if sparse and lstsq:
warn("Option combination 'sparse':True and 'lstsq':True "
"is not recommended.",
OptimizeWarning, stacklevel=3)
if lstsq and cholesky:
warn("Invalid option combination 'lstsq':True "
"and 'cholesky':True; option 'cholesky' has no effect when "
"'lstsq' is set True.",
OptimizeWarning, stacklevel=3)
valid_permc_spec = ('NATURAL', 'MMD_ATA', 'MMD_AT_PLUS_A', 'COLAMD')
if permc_spec.upper() not in valid_permc_spec:
warn("Invalid permc_spec option: '" + str(permc_spec) + "'. "
"Acceptable values are 'NATURAL', 'MMD_ATA', 'MMD_AT_PLUS_A', "
"and 'COLAMD'. Reverting to default.",
OptimizeWarning, stacklevel=3)
permc_spec = 'MMD_AT_PLUS_A'
# This can be an error
if not sym_pos and cholesky:
raise ValueError(
"Invalid option combination 'sym_pos':False "
"and 'cholesky':True: Cholesky decomposition is only possible "
"for symmetric positive definite matrices.")
cholesky = cholesky or (cholesky is None and sym_pos and not lstsq)
x, status, message, iteration = _ip_hsd(A, b, c, c0, alpha0, beta,
maxiter, disp, tol, sparse,
lstsq, sym_pos, cholesky,
pc, ip, permc_spec, callback,
postsolve_args)
return x, status, message, iteration
| bsd-3-clause | b6721458147900c2e44fc11f27eea40f | 39.703901 | 143 | 0.600666 | 3.936048 | false | false | false | false |
scipy/scipy | scipy/fft/_pocketfft/helper.py | 10 | 5725 | from numbers import Number
import operator
import os
import threading
import contextlib
import numpy as np
# good_size is exposed (and used) from this import
from .pypocketfft import good_size
_config = threading.local()
_cpu_count = os.cpu_count()
def _iterable_of_int(x, name=None):
"""Convert ``x`` to an iterable sequence of int
Parameters
----------
x : value, or sequence of values, convertible to int
name : str, optional
Name of the argument being converted, only used in the error message
Returns
-------
y : ``List[int]``
"""
if isinstance(x, Number):
x = (x,)
try:
x = [operator.index(a) for a in x]
except TypeError as e:
name = name or "value"
raise ValueError("{} must be a scalar or iterable of integers"
.format(name)) from e
return x
def _init_nd_shape_and_axes(x, shape, axes):
"""Handles shape and axes arguments for nd transforms"""
noshape = shape is None
noaxes = axes is None
if not noaxes:
axes = _iterable_of_int(axes, 'axes')
axes = [a + x.ndim if a < 0 else a for a in axes]
if any(a >= x.ndim or a < 0 for a in axes):
raise ValueError("axes exceeds dimensionality of input")
if len(set(axes)) != len(axes):
raise ValueError("all axes must be unique")
if not noshape:
shape = _iterable_of_int(shape, 'shape')
if axes and len(axes) != len(shape):
raise ValueError("when given, axes and shape arguments"
" have to be of the same length")
if noaxes:
if len(shape) > x.ndim:
raise ValueError("shape requires more axes than are present")
axes = range(x.ndim - len(shape), x.ndim)
shape = [x.shape[a] if s == -1 else s for s, a in zip(shape, axes)]
elif noaxes:
shape = list(x.shape)
axes = range(x.ndim)
else:
shape = [x.shape[a] for a in axes]
if any(s < 1 for s in shape):
raise ValueError(
"invalid number of data points ({0}) specified".format(shape))
return shape, axes
def _asfarray(x):
"""
Convert to array with floating or complex dtype.
float16 values are also promoted to float32.
"""
if not hasattr(x, "dtype"):
x = np.asarray(x)
if x.dtype == np.float16:
return np.asarray(x, np.float32)
elif x.dtype.kind not in 'fc':
return np.asarray(x, np.float64)
# Require native byte order
dtype = x.dtype.newbyteorder('=')
# Always align input
copy = not x.flags['ALIGNED']
return np.array(x, dtype=dtype, copy=copy)
def _datacopied(arr, original):
"""
Strict check for `arr` not sharing any data with `original`,
under the assumption that arr = asarray(original)
"""
if arr is original:
return False
if not isinstance(original, np.ndarray) and hasattr(original, '__array__'):
return False
return arr.base is None
def _fix_shape(x, shape, axes):
"""Internal auxiliary function for _raw_fft, _raw_fftnd."""
must_copy = False
# Build an nd slice with the dimensions to be read from x
index = [slice(None)]*x.ndim
for n, ax in zip(shape, axes):
if x.shape[ax] >= n:
index[ax] = slice(0, n)
else:
index[ax] = slice(0, x.shape[ax])
must_copy = True
index = tuple(index)
if not must_copy:
return x[index], False
s = list(x.shape)
for n, axis in zip(shape, axes):
s[axis] = n
z = np.zeros(s, x.dtype)
z[index] = x[index]
return z, True
def _fix_shape_1d(x, n, axis):
if n < 1:
raise ValueError(
"invalid number of data points ({0}) specified".format(n))
return _fix_shape(x, (n,), (axis,))
_NORM_MAP = {None: 0, 'backward': 0, 'ortho': 1, 'forward': 2}
def _normalization(norm, forward):
"""Returns the pypocketfft normalization mode from the norm argument"""
try:
inorm = _NORM_MAP[norm]
return inorm if forward else (2 - inorm)
except KeyError:
raise ValueError(
f'Invalid norm value {norm!r}, should '
'be "backward", "ortho" or "forward"') from None
def _workers(workers):
if workers is None:
return getattr(_config, 'default_workers', 1)
if workers < 0:
if workers >= -_cpu_count:
workers += 1 + _cpu_count
else:
raise ValueError("workers value out of range; got {}, must not be"
" less than {}".format(workers, -_cpu_count))
elif workers == 0:
raise ValueError("workers must not be zero")
return workers
@contextlib.contextmanager
def set_workers(workers):
"""Context manager for the default number of workers used in `scipy.fft`
Parameters
----------
workers : int
The default number of workers to use
Examples
--------
>>> import numpy as np
>>> from scipy import fft, signal
>>> rng = np.random.default_rng()
>>> x = rng.standard_normal((128, 64))
>>> with fft.set_workers(4):
... y = signal.fftconvolve(x, x)
"""
old_workers = get_workers()
_config.default_workers = _workers(operator.index(workers))
try:
yield
finally:
_config.default_workers = old_workers
def get_workers():
"""Returns the default number of workers within the current context
Examples
--------
>>> from scipy import fft
>>> fft.get_workers()
1
>>> with fft.set_workers(4):
... fft.get_workers()
4
"""
return getattr(_config, 'default_workers', 1)
| bsd-3-clause | 073920be125e0f5d5ed992a58021aec2 | 25.50463 | 79 | 0.580611 | 3.776385 | false | false | false | false |
scipy/scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_H.py | 25 | 11278 | # -*- coding: utf-8 -*-
import numpy as np
from numpy import abs, arctan2, asarray, cos, exp, arange, pi, sin, sqrt, sum
from .go_benchmark import Benchmark
class Hansen(Benchmark):
r"""
Hansen objective function.
This class defines the Hansen [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Hansen}}(x) = \left[ \sum_{i=0}^4(i+1)\cos(ix_1+i+1)\right ]
\left[\sum_{j=0}^4(j+1)\cos[(j+2)x_2+j+1])\right ]
with :math:`x_i \in [-10, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = -176.54179` for
:math:`x = [-7.58989583, -7.70831466]`.
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO Jamil #61 is missing the starting value of i.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [[-7.58989583, -7.70831466]]
self.fglob = -176.54179
def fun(self, x, *args):
self.nfev += 1
i = arange(5.)
a = (i + 1) * cos(i * x[0] + i + 1)
b = (i + 1) * cos((i + 2) * x[1] + i + 1)
return sum(a) * sum(b)
class Hartmann3(Benchmark):
r"""
Hartmann3 objective function.
This class defines the Hartmann3 [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Hartmann3}}(x) = -\sum\limits_{i=1}^{4} c_i
e^{-\sum\limits_{j=1}^{n}a_{ij}(x_j - p_{ij})^2}
Where, in this exercise:
.. math::
\begin{array}{l|ccc|c|ccr}
\hline
i & & a_{ij}& & c_i & & p_{ij} & \\
\hline
1 & 3.0 & 10.0 & 30.0 & 1.0 & 0.3689 & 0.1170 & 0.2673 \\
2 & 0.1 & 10.0 & 35.0 & 1.2 & 0.4699 & 0.4387 & 0.7470 \\
3 & 3.0 & 10.0 & 30.0 & 3.0 & 0.1091 & 0.8732 & 0.5547 \\
4 & 0.1 & 10.0 & 35.0 & 3.2 & 0.03815 & 0.5743 & 0.8828 \\
\hline
\end{array}
with :math:`x_i \in [0, 1]` for :math:`i = 1, 2, 3`.
*Global optimum*: :math:`f(x) = -3.8627821478`
for :math:`x = [0.11461292, 0.55564907, 0.85254697]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO Jamil #62 has an incorrect coefficient. p[1, 1] should be 0.4387
"""
def __init__(self, dimensions=3):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([0.0] * self.N, [1.0] * self.N))
self.global_optimum = [[0.11461292, 0.55564907, 0.85254697]]
self.fglob = -3.8627821478
self.a = asarray([[3.0, 10., 30.],
[0.1, 10., 35.],
[3.0, 10., 30.],
[0.1, 10., 35.]])
self.p = asarray([[0.3689, 0.1170, 0.2673],
[0.4699, 0.4387, 0.7470],
[0.1091, 0.8732, 0.5547],
[0.03815, 0.5743, 0.8828]])
self.c = asarray([1., 1.2, 3., 3.2])
def fun(self, x, *args):
self.nfev += 1
XX = np.atleast_2d(x)
d = sum(self.a * (XX - self.p) ** 2, axis=1)
return -sum(self.c * exp(-d))
class Hartmann6(Benchmark):
r"""
Hartmann6 objective function.
This class defines the Hartmann6 [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Hartmann6}}(x) = -\sum\limits_{i=1}^{4} c_i
e^{-\sum\limits_{j=1}^{n}a_{ij}(x_j - p_{ij})^2}
Where, in this exercise:
.. math::
\begin{array}{l|cccccc|r}
\hline
i & & & a_{ij} & & & & c_i \\
\hline
1 & 10.0 & 3.0 & 17.0 & 3.50 & 1.70 & 8.00 & 1.0 \\
2 & 0.05 & 10.0 & 17.0 & 0.10 & 8.00 & 14.00 & 1.2 \\
3 & 3.00 & 3.50 & 1.70 & 10.0 & 17.00 & 8.00 & 3.0 \\
4 & 17.00 & 8.00 & 0.05 & 10.00 & 0.10 & 14.00 & 3.2 \\
\hline
\end{array}
\newline
\
\newline
\begin{array}{l|cccccr}
\hline
i & & & p_{ij} & & & \\
\hline
1 & 0.1312 & 0.1696 & 0.5569 & 0.0124 & 0.8283 & 0.5886 \\
2 & 0.2329 & 0.4135 & 0.8307 & 0.3736 & 0.1004 & 0.9991 \\
3 & 0.2348 & 0.1451 & 0.3522 & 0.2883 & 0.3047 & 0.6650 \\
4 & 0.4047 & 0.8828 & 0.8732 & 0.5743 & 0.1091 & 0.0381 \\
\hline
\end{array}
with :math:`x_i \in [0, 1]` for :math:`i = 1, ..., 6`.
*Global optimum*: :math:`f(x_i) = -3.32236801141551` for
:math:`{x} = [0.20168952, 0.15001069, 0.47687398, 0.27533243, 0.31165162,
0.65730054]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=6):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([0.0] * self.N, [1.0] * self.N))
self.global_optimum = [[0.20168952, 0.15001069, 0.47687398, 0.27533243,
0.31165162, 0.65730054]]
self.fglob = -3.32236801141551
self.a = asarray([[10., 3., 17., 3.5, 1.7, 8.],
[0.05, 10., 17., 0.1, 8., 14.],
[3., 3.5, 1.7, 10., 17., 8.],
[17., 8., 0.05, 10., 0.1, 14.]])
self.p = asarray([[0.1312, 0.1696, 0.5569, 0.0124, 0.8283, 0.5886],
[0.2329, 0.4135, 0.8307, 0.3736, 0.1004, 0.9991],
[0.2348, 0.1451, 0.3522, 0.2883, 0.3047, 0.665],
[0.4047, 0.8828, 0.8732, 0.5743, 0.1091, 0.0381]])
self.c = asarray([1.0, 1.2, 3.0, 3.2])
def fun(self, x, *args):
self.nfev += 1
XX = np.atleast_2d(x)
d = sum(self.a * (XX - self.p) ** 2, axis=1)
return -sum(self.c * exp(-d))
class HelicalValley(Benchmark):
r"""
HelicalValley objective function.
This class defines the HelicalValley [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{HelicalValley}}({x}) = 100{[z-10\Psi(x_1,x_2)]^2
+(\sqrt{x_1^2+x_2^2}-1)^2}+x_3^2
Where, in this exercise:
.. math::
2\pi\Psi(x,y) = \begin{cases} \arctan(y/x) & \textrm{for} x > 0 \\
\pi + \arctan(y/x) & \textrm{for } x < 0 \end{cases}
with :math:`x_i \in [-100, 100]` for :math:`i = 1, 2, 3`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [1, 0, 0]`
.. [1] Fletcher, R. & Powell, M. A Rapidly Convergent Descent Method for
Minimzation, Computer Journal, 1963, 62, 163-168
TODO: Jamil equation is different to original reference. The above paper
can be obtained from
http://galton.uchicago.edu/~lekheng/courses/302/classics/
fletcher-powell.pdf
"""
def __init__(self, dimensions=3):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.] * self.N, [10.] * self.N))
self.global_optimum = [[1.0, 0.0, 0.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
r = sqrt(x[0] ** 2 + x[1] ** 2)
theta = 1 / (2. * pi) * arctan2(x[1], x[0])
return x[2] ** 2 + 100 * ((x[2] - 10 * theta) ** 2 + (r - 1) ** 2)
class HimmelBlau(Benchmark):
r"""
HimmelBlau objective function.
This class defines the HimmelBlau [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{HimmelBlau}}({x}) = (x_1^2 + x_2 - 11)^2 + (x_1 + x_2^2 - 7)^2
with :math:`x_i \in [-6, 6]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [3, 2]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-5.] * self.N, [5.] * self.N))
self.global_optimum = [[3.0, 2.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return (x[0] ** 2 + x[1] - 11) ** 2 + (x[0] + x[1] ** 2 - 7) ** 2
class HolderTable(Benchmark):
r"""
HolderTable objective function.
This class defines the HolderTable [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{HolderTable}}({x}) = - \left|{e^{\left|{1
- \frac{\sqrt{x_{1}^{2} + x_{2}^{2}}}{\pi} }\right|}
\sin\left(x_{1}\right) \cos\left(x_{2}\right)}\right|
with :math:`x_i \in [-10, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = -19.20850256788675` for
:math:`x_i = \pm 9.664590028909654` for :math:`i = 1, 2`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
TODO: Jamil #146 equation is wrong - should be squaring the x1 and x2
terms, but isn't. Gavana does.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [(8.055023472141116, 9.664590028909654),
(-8.055023472141116, 9.664590028909654),
(8.055023472141116, -9.664590028909654),
(-8.055023472141116, -9.664590028909654)]
self.fglob = -19.20850256788675
def fun(self, x, *args):
self.nfev += 1
return -abs(sin(x[0]) * cos(x[1])
* exp(abs(1 - sqrt(x[0] ** 2 + x[1] ** 2) / pi)))
class Hosaki(Benchmark):
r"""
Hosaki objective function.
This class defines the Hosaki [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Hosaki}}(x) = \left ( 1 - 8 x_1 + 7 x_1^2 - \frac{7}{3} x_1^3
+ \frac{1}{4} x_1^4 \right ) x_2^2 e^{-x_1}
with :math:`x_i \in [0, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = -2.3458115` for :math:`x = [4, 2]`.
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = ([0., 5.], [0., 6.])
self.custom_bounds = [(0, 5), (0, 5)]
self.global_optimum = [[4, 2]]
self.fglob = -2.3458115
def fun(self, x, *args):
self.nfev += 1
val = (1 - 8 * x[0] + 7 * x[0] ** 2 - 7 / 3. * x[0] ** 3
+ 0.25 * x[0] ** 4)
return val * x[1] ** 2 * exp(-x[1])
| bsd-3-clause | bef1d1c7cb03efb108e1673e8085d696 | 29.074667 | 80 | 0.51667 | 2.773051 | false | false | false | false |
scipy/scipy | scipy/signal/tests/mpsig.py | 21 | 3308 | """
Some signal functions implemented using mpmath.
"""
try:
import mpmath
except ImportError:
mpmath = None
def _prod(seq):
"""Returns the product of the elements in the sequence `seq`."""
p = 1
for elem in seq:
p *= elem
return p
def _relative_degree(z, p):
"""
Return relative degree of transfer function from zeros and poles.
This is simply len(p) - len(z), which must be nonnegative.
A ValueError is raised if len(p) < len(z).
"""
degree = len(p) - len(z)
if degree < 0:
raise ValueError("Improper transfer function. "
"Must have at least as many poles as zeros.")
return degree
def _zpkbilinear(z, p, k, fs):
"""Bilinear transformation to convert a filter from analog to digital."""
degree = _relative_degree(z, p)
fs2 = 2*fs
# Bilinear transform the poles and zeros
z_z = [(fs2 + z1) / (fs2 - z1) for z1 in z]
p_z = [(fs2 + p1) / (fs2 - p1) for p1 in p]
# Any zeros that were at infinity get moved to the Nyquist frequency
z_z.extend([-1] * degree)
# Compensate for gain change
numer = _prod(fs2 - z1 for z1 in z)
denom = _prod(fs2 - p1 for p1 in p)
k_z = k * numer / denom
return z_z, p_z, k_z.real
def _zpklp2lp(z, p, k, wo=1):
"""Transform a lowpass filter to a different cutoff frequency."""
degree = _relative_degree(z, p)
# Scale all points radially from origin to shift cutoff frequency
z_lp = [wo * z1 for z1 in z]
p_lp = [wo * p1 for p1 in p]
# Each shifted pole decreases gain by wo, each shifted zero increases it.
# Cancel out the net change to keep overall gain the same
k_lp = k * wo**degree
return z_lp, p_lp, k_lp
def _butter_analog_poles(n):
"""
Poles of an analog Butterworth lowpass filter.
This is the same calculation as scipy.signal.buttap(n) or
scipy.signal.butter(n, 1, analog=True, output='zpk'), but mpmath is used,
and only the poles are returned.
"""
poles = [-mpmath.exp(1j*mpmath.pi*k/(2*n)) for k in range(-n+1, n, 2)]
return poles
def butter_lp(n, Wn):
"""
Lowpass Butterworth digital filter design.
This computes the same result as scipy.signal.butter(n, Wn, output='zpk'),
but it uses mpmath, and the results are returned in lists instead of NumPy
arrays.
"""
zeros = []
poles = _butter_analog_poles(n)
k = 1
fs = 2
warped = 2 * fs * mpmath.tan(mpmath.pi * Wn / fs)
z, p, k = _zpklp2lp(zeros, poles, k, wo=warped)
z, p, k = _zpkbilinear(z, p, k, fs=fs)
return z, p, k
def zpkfreqz(z, p, k, worN=None):
"""
Frequency response of a filter in zpk format, using mpmath.
This is the same calculation as scipy.signal.freqz, but the input is in
zpk format, the calculation is performed using mpath, and the results are
returned in lists instead of NumPy arrays.
"""
if worN is None or isinstance(worN, int):
N = worN or 512
ws = [mpmath.pi * mpmath.mpf(j) / N for j in range(N)]
else:
ws = worN
h = []
for wk in ws:
zm1 = mpmath.exp(1j * wk)
numer = _prod([zm1 - t for t in z])
denom = _prod([zm1 - t for t in p])
hk = k * numer / denom
h.append(hk)
return ws, h
| bsd-3-clause | 49bcb629d78fa93d2895f012cfdd7e55 | 26.114754 | 78 | 0.604595 | 3.091589 | false | false | false | false |
scipy/scipy | scipy/integrate/tests/test_quadpack.py | 1 | 27947 | import sys
import math
import numpy as np
from numpy import sqrt, cos, sin, arctan, exp, log, pi, Inf
from numpy.testing import (assert_,
assert_allclose, assert_array_less, assert_almost_equal)
import pytest
from scipy.integrate import quad, dblquad, tplquad, nquad
from scipy.special import erf, erfc
from scipy._lib._ccallback import LowLevelCallable
import ctypes
import ctypes.util
from scipy._lib._ccallback_c import sine_ctypes
import scipy.integrate._test_multivariate as clib_test
def assert_quad(value_and_err, tabled_value, error_tolerance=1.5e-8):
value, err = value_and_err
assert_allclose(value, tabled_value, atol=err, rtol=0)
if error_tolerance is not None:
assert_array_less(err, error_tolerance)
def get_clib_test_routine(name, restype, *argtypes):
ptr = getattr(clib_test, name)
return ctypes.cast(ptr, ctypes.CFUNCTYPE(restype, *argtypes))
class TestCtypesQuad:
def setup_method(self):
if sys.platform == 'win32':
files = ['api-ms-win-crt-math-l1-1-0.dll']
elif sys.platform == 'darwin':
files = ['libm.dylib']
else:
files = ['libm.so', 'libm.so.6']
for file in files:
try:
self.lib = ctypes.CDLL(file)
break
except OSError:
pass
else:
# This test doesn't work on some Linux platforms (Fedora for
# example) that put an ld script in libm.so - see gh-5370
pytest.skip("Ctypes can't import libm.so")
restype = ctypes.c_double
argtypes = (ctypes.c_double,)
for name in ['sin', 'cos', 'tan']:
func = getattr(self.lib, name)
func.restype = restype
func.argtypes = argtypes
def test_typical(self):
assert_quad(quad(self.lib.sin, 0, 5), quad(math.sin, 0, 5)[0])
assert_quad(quad(self.lib.cos, 0, 5), quad(math.cos, 0, 5)[0])
assert_quad(quad(self.lib.tan, 0, 1), quad(math.tan, 0, 1)[0])
def test_ctypes_sine(self):
quad(LowLevelCallable(sine_ctypes), 0, 1)
def test_ctypes_variants(self):
sin_0 = get_clib_test_routine('_sin_0', ctypes.c_double,
ctypes.c_double, ctypes.c_void_p)
sin_1 = get_clib_test_routine('_sin_1', ctypes.c_double,
ctypes.c_int, ctypes.POINTER(ctypes.c_double),
ctypes.c_void_p)
sin_2 = get_clib_test_routine('_sin_2', ctypes.c_double,
ctypes.c_double)
sin_3 = get_clib_test_routine('_sin_3', ctypes.c_double,
ctypes.c_int, ctypes.POINTER(ctypes.c_double))
sin_4 = get_clib_test_routine('_sin_3', ctypes.c_double,
ctypes.c_int, ctypes.c_double)
all_sigs = [sin_0, sin_1, sin_2, sin_3, sin_4]
legacy_sigs = [sin_2, sin_4]
legacy_only_sigs = [sin_4]
# LowLevelCallables work for new signatures
for j, func in enumerate(all_sigs):
callback = LowLevelCallable(func)
if func in legacy_only_sigs:
pytest.raises(ValueError, quad, callback, 0, pi)
else:
assert_allclose(quad(callback, 0, pi)[0], 2.0)
# Plain ctypes items work only for legacy signatures
for j, func in enumerate(legacy_sigs):
if func in legacy_sigs:
assert_allclose(quad(func, 0, pi)[0], 2.0)
else:
pytest.raises(ValueError, quad, func, 0, pi)
class TestMultivariateCtypesQuad:
def setup_method(self):
restype = ctypes.c_double
argtypes = (ctypes.c_int, ctypes.c_double)
for name in ['_multivariate_typical', '_multivariate_indefinite',
'_multivariate_sin']:
func = get_clib_test_routine(name, restype, *argtypes)
setattr(self, name, func)
def test_typical(self):
# 1) Typical function with two extra arguments:
assert_quad(quad(self._multivariate_typical, 0, pi, (2, 1.8)),
0.30614353532540296487)
def test_indefinite(self):
# 2) Infinite integration limits --- Euler's constant
assert_quad(quad(self._multivariate_indefinite, 0, Inf),
0.577215664901532860606512)
def test_threadsafety(self):
# Ensure multivariate ctypes are threadsafe
def threadsafety(y):
return y + quad(self._multivariate_sin, 0, 1)[0]
assert_quad(quad(threadsafety, 0, 1), 0.9596976941318602)
class TestQuad:
def test_typical(self):
# 1) Typical function with two extra arguments:
def myfunc(x, n, z): # Bessel function integrand
return cos(n*x-z*sin(x))/pi
assert_quad(quad(myfunc, 0, pi, (2, 1.8)), 0.30614353532540296487)
def test_indefinite(self):
# 2) Infinite integration limits --- Euler's constant
def myfunc(x): # Euler's constant integrand
return -exp(-x)*log(x)
assert_quad(quad(myfunc, 0, Inf), 0.577215664901532860606512)
def test_singular(self):
# 3) Singular points in region of integration.
def myfunc(x):
if 0 < x < 2.5:
return sin(x)
elif 2.5 <= x <= 5.0:
return exp(-x)
else:
return 0.0
assert_quad(quad(myfunc, 0, 10, points=[2.5, 5.0]),
1 - cos(2.5) + exp(-2.5) - exp(-5.0))
def test_sine_weighted_finite(self):
# 4) Sine weighted integral (finite limits)
def myfunc(x, a):
return exp(a*(x-1))
ome = 2.0**3.4
assert_quad(quad(myfunc, 0, 1, args=20, weight='sin', wvar=ome),
(20*sin(ome)-ome*cos(ome)+ome*exp(-20))/(20**2 + ome**2))
def test_sine_weighted_infinite(self):
# 5) Sine weighted integral (infinite limits)
def myfunc(x, a):
return exp(-x*a)
a = 4.0
ome = 3.0
assert_quad(quad(myfunc, 0, Inf, args=a, weight='sin', wvar=ome),
ome/(a**2 + ome**2))
def test_cosine_weighted_infinite(self):
# 6) Cosine weighted integral (negative infinite limits)
def myfunc(x, a):
return exp(x*a)
a = 2.5
ome = 2.3
assert_quad(quad(myfunc, -Inf, 0, args=a, weight='cos', wvar=ome),
a/(a**2 + ome**2))
def test_algebraic_log_weight(self):
# 6) Algebraic-logarithmic weight.
def myfunc(x, a):
return 1/(1+x+2**(-a))
a = 1.5
assert_quad(quad(myfunc, -1, 1, args=a, weight='alg',
wvar=(-0.5, -0.5)),
pi/sqrt((1+2**(-a))**2 - 1))
def test_cauchypv_weight(self):
# 7) Cauchy prinicpal value weighting w(x) = 1/(x-c)
def myfunc(x, a):
return 2.0**(-a)/((x-1)**2+4.0**(-a))
a = 0.4
tabledValue = ((2.0**(-0.4)*log(1.5) -
2.0**(-1.4)*log((4.0**(-a)+16) / (4.0**(-a)+1)) -
arctan(2.0**(a+2)) -
arctan(2.0**a)) /
(4.0**(-a) + 1))
assert_quad(quad(myfunc, 0, 5, args=0.4, weight='cauchy', wvar=2.0),
tabledValue, error_tolerance=1.9e-8)
def test_b_less_than_a(self):
def f(x, p, q):
return p * np.exp(-q*x)
val_1, err_1 = quad(f, 0, np.inf, args=(2, 3))
val_2, err_2 = quad(f, np.inf, 0, args=(2, 3))
assert_allclose(val_1, -val_2, atol=max(err_1, err_2))
def test_b_less_than_a_2(self):
def f(x, s):
return np.exp(-x**2 / 2 / s) / np.sqrt(2.*s)
val_1, err_1 = quad(f, -np.inf, np.inf, args=(2,))
val_2, err_2 = quad(f, np.inf, -np.inf, args=(2,))
assert_allclose(val_1, -val_2, atol=max(err_1, err_2))
def test_b_less_than_a_3(self):
def f(x):
return 1.0
val_1, err_1 = quad(f, 0, 1, weight='alg', wvar=(0, 0))
val_2, err_2 = quad(f, 1, 0, weight='alg', wvar=(0, 0))
assert_allclose(val_1, -val_2, atol=max(err_1, err_2))
def test_b_less_than_a_full_output(self):
def f(x):
return 1.0
res_1 = quad(f, 0, 1, weight='alg', wvar=(0, 0), full_output=True)
res_2 = quad(f, 1, 0, weight='alg', wvar=(0, 0), full_output=True)
err = max(res_1[1], res_2[1])
assert_allclose(res_1[0], -res_2[0], atol=err)
def test_double_integral(self):
# 8) Double Integral test
def simpfunc(y, x): # Note order of arguments.
return x+y
a, b = 1.0, 2.0
assert_quad(dblquad(simpfunc, a, b, lambda x: x, lambda x: 2*x),
5/6.0 * (b**3.0-a**3.0))
def test_double_integral2(self):
def func(x0, x1, t0, t1):
return x0 + x1 + t0 + t1
g = lambda x: x
h = lambda x: 2 * x
args = 1, 2
assert_quad(dblquad(func, 1, 2, g, h, args=args),35./6 + 9*.5)
def test_double_integral3(self):
def func(x0, x1):
return x0 + x1 + 1 + 2
assert_quad(dblquad(func, 1, 2, 1, 2),6.)
@pytest.mark.parametrize(
"x_lower, x_upper, y_lower, y_upper, expected",
[
# Multiple integration of a function in n = 2 variables: f(x, y, z)
# over domain D = [-inf, 0] for all n.
(-np.inf, 0, -np.inf, 0, np.pi / 4),
# Multiple integration of a function in n = 2 variables: f(x, y, z)
# over domain D = [-inf, -1] for each n (one at a time).
(-np.inf, -1, -np.inf, 0, np.pi / 4 * erfc(1)),
(-np.inf, 0, -np.inf, -1, np.pi / 4 * erfc(1)),
# Multiple integration of a function in n = 2 variables: f(x, y, z)
# over domain D = [-inf, -1] for all n.
(-np.inf, -1, -np.inf, -1, np.pi / 4 * (erfc(1) ** 2)),
# Multiple integration of a function in n = 2 variables: f(x, y, z)
# over domain D = [-inf, 1] for each n (one at a time).
(-np.inf, 1, -np.inf, 0, np.pi / 4 * (erf(1) + 1)),
(-np.inf, 0, -np.inf, 1, np.pi / 4 * (erf(1) + 1)),
# Multiple integration of a function in n = 2 variables: f(x, y, z)
# over domain D = [-inf, 1] for all n.
(-np.inf, 1, -np.inf, 1, np.pi / 4 * ((erf(1) + 1) ** 2)),
# Multiple integration of a function in n = 2 variables: f(x, y, z)
# over domain Dx = [-inf, -1] and Dy = [-inf, 1].
(-np.inf, -1, -np.inf, 1, np.pi / 4 * ((erf(1) + 1) * erfc(1))),
# Multiple integration of a function in n = 2 variables: f(x, y, z)
# over domain Dx = [-inf, 1] and Dy = [-inf, -1].
(-np.inf, 1, -np.inf, -1, np.pi / 4 * ((erf(1) + 1) * erfc(1))),
# Multiple integration of a function in n = 2 variables: f(x, y, z)
# over domain D = [0, inf] for all n.
(0, np.inf, 0, np.inf, np.pi / 4),
# Multiple integration of a function in n = 2 variables: f(x, y, z)
# over domain D = [1, inf] for each n (one at a time).
(1, np.inf, 0, np.inf, np.pi / 4 * erfc(1)),
(0, np.inf, 1, np.inf, np.pi / 4 * erfc(1)),
# Multiple integration of a function in n = 2 variables: f(x, y, z)
# over domain D = [1, inf] for all n.
(1, np.inf, 1, np.inf, np.pi / 4 * (erfc(1) ** 2)),
# Multiple integration of a function in n = 2 variables: f(x, y, z)
# over domain D = [-1, inf] for each n (one at a time).
(-1, np.inf, 0, np.inf, np.pi / 4 * (erf(1) + 1)),
(0, np.inf, -1, np.inf, np.pi / 4 * (erf(1) + 1)),
# Multiple integration of a function in n = 2 variables: f(x, y, z)
# over domain D = [-1, inf] for all n.
(-1, np.inf, -1, np.inf, np.pi / 4 * ((erf(1) + 1) ** 2)),
# Multiple integration of a function in n = 2 variables: f(x, y, z)
# over domain Dx = [-1, inf] and Dy = [1, inf].
(-1, np.inf, 1, np.inf, np.pi / 4 * ((erf(1) + 1) * erfc(1))),
# Multiple integration of a function in n = 2 variables: f(x, y, z)
# over domain Dx = [1, inf] and Dy = [-1, inf].
(1, np.inf, -1, np.inf, np.pi / 4 * ((erf(1) + 1) * erfc(1))),
# Multiple integration of a function in n = 2 variables: f(x, y, z)
# over domain D = [-inf, inf] for all n.
(-np.inf, np.inf, -np.inf, np.inf, np.pi)
]
)
def test_double_integral_improper(
self, x_lower, x_upper, y_lower, y_upper, expected
):
# The Gaussian Integral.
def f(x, y):
return np.exp(-x ** 2 - y ** 2)
assert_quad(
dblquad(f, x_lower, x_upper, y_lower, y_upper),
expected,
error_tolerance=3e-8
)
def test_triple_integral(self):
# 9) Triple Integral test
def simpfunc(z, y, x, t): # Note order of arguments.
return (x+y+z)*t
a, b = 1.0, 2.0
assert_quad(tplquad(simpfunc, a, b,
lambda x: x, lambda x: 2*x,
lambda x, y: x - y, lambda x, y: x + y,
(2.,)),
2*8/3.0 * (b**4.0 - a**4.0))
@pytest.mark.parametrize(
"x_lower, x_upper, y_lower, y_upper, z_lower, z_upper, expected",
[
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain D = [-inf, 0] for all n.
(-np.inf, 0, -np.inf, 0, -np.inf, 0, (np.pi ** (3 / 2)) / 8),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain D = [-inf, -1] for each n (one at a time).
(-np.inf, -1, -np.inf, 0, -np.inf, 0,
(np.pi ** (3 / 2)) / 8 * erfc(1)),
(-np.inf, 0, -np.inf, -1, -np.inf, 0,
(np.pi ** (3 / 2)) / 8 * erfc(1)),
(-np.inf, 0, -np.inf, 0, -np.inf, -1,
(np.pi ** (3 / 2)) / 8 * erfc(1)),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain D = [-inf, -1] for each n (two at a time).
(-np.inf, -1, -np.inf, -1, -np.inf, 0,
(np.pi ** (3 / 2)) / 8 * (erfc(1) ** 2)),
(-np.inf, -1, -np.inf, 0, -np.inf, -1,
(np.pi ** (3 / 2)) / 8 * (erfc(1) ** 2)),
(-np.inf, 0, -np.inf, -1, -np.inf, -1,
(np.pi ** (3 / 2)) / 8 * (erfc(1) ** 2)),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain D = [-inf, -1] for all n.
(-np.inf, -1, -np.inf, -1, -np.inf, -1,
(np.pi ** (3 / 2)) / 8 * (erfc(1) ** 3)),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain Dx = [-inf, -1] and Dy = Dz = [-inf, 1].
(-np.inf, -1, -np.inf, 1, -np.inf, 1,
(np.pi ** (3 / 2)) / 8 * (((erf(1) + 1) ** 2) * erfc(1))),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain Dx = Dy = [-inf, -1] and Dz = [-inf, 1].
(-np.inf, -1, -np.inf, -1, -np.inf, 1,
(np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) * (erfc(1) ** 2))),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain Dx = Dz = [-inf, -1] and Dy = [-inf, 1].
(-np.inf, -1, -np.inf, 1, -np.inf, -1,
(np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) * (erfc(1) ** 2))),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain Dx = [-inf, 1] and Dy = Dz = [-inf, -1].
(-np.inf, 1, -np.inf, -1, -np.inf, -1,
(np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) * (erfc(1) ** 2))),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain Dx = Dy = [-inf, 1] and Dz = [-inf, -1].
(-np.inf, 1, -np.inf, 1, -np.inf, -1,
(np.pi ** (3 / 2)) / 8 * (((erf(1) + 1) ** 2) * erfc(1))),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain Dx = Dz = [-inf, 1] and Dy = [-inf, -1].
(-np.inf, 1, -np.inf, -1, -np.inf, 1,
(np.pi ** (3 / 2)) / 8 * (((erf(1) + 1) ** 2) * erfc(1))),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain D = [-inf, 1] for each n (one at a time).
(-np.inf, 1, -np.inf, 0, -np.inf, 0,
(np.pi ** (3 / 2)) / 8 * (erf(1) + 1)),
(-np.inf, 0, -np.inf, 1, -np.inf, 0,
(np.pi ** (3 / 2)) / 8 * (erf(1) + 1)),
(-np.inf, 0, -np.inf, 0, -np.inf, 1,
(np.pi ** (3 / 2)) / 8 * (erf(1) + 1)),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain D = [-inf, 1] for each n (two at a time).
(-np.inf, 1, -np.inf, 1, -np.inf, 0,
(np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 2)),
(-np.inf, 1, -np.inf, 0, -np.inf, 1,
(np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 2)),
(-np.inf, 0, -np.inf, 1, -np.inf, 1,
(np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 2)),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain D = [-inf, 1] for all n.
(-np.inf, 1, -np.inf, 1, -np.inf, 1,
(np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 3)),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain D = [0, inf] for all n.
(0, np.inf, 0, np.inf, 0, np.inf, (np.pi ** (3 / 2)) / 8),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain D = [1, inf] for each n (one at a time).
(1, np.inf, 0, np.inf, 0, np.inf,
(np.pi ** (3 / 2)) / 8 * erfc(1)),
(0, np.inf, 1, np.inf, 0, np.inf,
(np.pi ** (3 / 2)) / 8 * erfc(1)),
(0, np.inf, 0, np.inf, 1, np.inf,
(np.pi ** (3 / 2)) / 8 * erfc(1)),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain D = [1, inf] for each n (two at a time).
(1, np.inf, 1, np.inf, 0, np.inf,
(np.pi ** (3 / 2)) / 8 * (erfc(1) ** 2)),
(1, np.inf, 0, np.inf, 1, np.inf,
(np.pi ** (3 / 2)) / 8 * (erfc(1) ** 2)),
(0, np.inf, 1, np.inf, 1, np.inf,
(np.pi ** (3 / 2)) / 8 * (erfc(1) ** 2)),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain D = [1, inf] for all n.
(1, np.inf, 1, np.inf, 1, np.inf,
(np.pi ** (3 / 2)) / 8 * (erfc(1) ** 3)),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain D = [-1, inf] for each n (one at a time).
(-1, np.inf, 0, np.inf, 0, np.inf,
(np.pi ** (3 / 2)) / 8 * (erf(1) + 1)),
(0, np.inf, -1, np.inf, 0, np.inf,
(np.pi ** (3 / 2)) / 8 * (erf(1) + 1)),
(0, np.inf, 0, np.inf, -1, np.inf,
(np.pi ** (3 / 2)) / 8 * (erf(1) + 1)),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain D = [-1, inf] for each n (two at a time).
(-1, np.inf, -1, np.inf, 0, np.inf,
(np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 2)),
(-1, np.inf, 0, np.inf, -1, np.inf,
(np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 2)),
(0, np.inf, -1, np.inf, -1, np.inf,
(np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 2)),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain D = [-1, inf] for all n.
(-1, np.inf, -1, np.inf, -1, np.inf,
(np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) ** 3)),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain Dx = [1, inf] and Dy = Dz = [-1, inf].
(1, np.inf, -1, np.inf, -1, np.inf,
(np.pi ** (3 / 2)) / 8 * (((erf(1) + 1) ** 2) * erfc(1))),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain Dx = Dy = [1, inf] and Dz = [-1, inf].
(1, np.inf, 1, np.inf, -1, np.inf,
(np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) * (erfc(1) ** 2))),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain Dx = Dz = [1, inf] and Dy = [-1, inf].
(1, np.inf, -1, np.inf, 1, np.inf,
(np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) * (erfc(1) ** 2))),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain Dx = [-1, inf] and Dy = Dz = [1, inf].
(-1, np.inf, 1, np.inf, 1, np.inf,
(np.pi ** (3 / 2)) / 8 * ((erf(1) + 1) * (erfc(1) ** 2))),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain Dx = Dy = [-1, inf] and Dz = [1, inf].
(-1, np.inf, -1, np.inf, 1, np.inf,
(np.pi ** (3 / 2)) / 8 * (((erf(1) + 1) ** 2) * erfc(1))),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain Dx = Dz = [-1, inf] and Dy = [1, inf].
(-1, np.inf, 1, np.inf, -1, np.inf,
(np.pi ** (3 / 2)) / 8 * (((erf(1) + 1) ** 2) * erfc(1))),
# Multiple integration of a function in n = 3 variables: f(x, y, z)
# over domain D = [-inf, inf] for all n.
(-np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf,
np.pi ** (3 / 2)),
],
)
def test_triple_integral_improper(
self,
x_lower,
x_upper,
y_lower,
y_upper,
z_lower,
z_upper,
expected
):
# The Gaussian Integral.
def f(x, y, z):
return np.exp(-x ** 2 - y ** 2 - z ** 2)
assert_quad(
tplquad(f, x_lower, x_upper, y_lower, y_upper, z_lower, z_upper),
expected,
error_tolerance=6e-8
)
def test_complex(self):
def tfunc(x):
return np.exp(1j*x)
assert np.allclose(
quad(tfunc, 0, np.pi/2, complex_func=True)[0],
1+1j)
# We consider a divergent case in order to force quadpack
# to return an error message. The output is compared
# against what is returned by explicit integration
# of the parts.
kwargs = {'a': 0, 'b': np.inf, 'full_output': True,
'weight': 'cos', 'wvar': 1}
res_c = quad(tfunc, complex_func=True, **kwargs)
res_r = quad(lambda x: np.real(np.exp(1j*x)),
complex_func=False,
**kwargs)
res_i = quad(lambda x: np.imag(np.exp(1j*x)),
complex_func=False,
**kwargs)
np.testing.assert_equal(res_c[0], res_r[0] + 1j*res_i[0])
np.testing.assert_equal(res_c[1], res_r[1] + 1j*res_i[1])
assert len(res_c[2]['real']) == len(res_r[2:]) == 3
assert res_c[2]['real'][2] == res_r[4]
assert res_c[2]['real'][1] == res_r[3]
assert res_c[2]['real'][0]['lst'] == res_r[2]['lst']
assert len(res_c[2]['imag']) == len(res_i[2:]) == 1
assert res_c[2]['imag'][0]['lst'] == res_i[2]['lst']
class TestNQuad:
def test_fixed_limits(self):
def func1(x0, x1, x2, x3):
val = (x0**2 + x1*x2 - x3**3 + np.sin(x0) +
(1 if (x0 - 0.2*x3 - 0.5 - 0.25*x1 > 0) else 0))
return val
def opts_basic(*args):
return {'points': [0.2*args[2] + 0.5 + 0.25*args[0]]}
res = nquad(func1, [[0, 1], [-1, 1], [.13, .8], [-.15, 1]],
opts=[opts_basic, {}, {}, {}], full_output=True)
assert_quad(res[:-1], 1.5267454070738635)
assert_(res[-1]['neval'] > 0 and res[-1]['neval'] < 4e5)
def test_variable_limits(self):
scale = .1
def func2(x0, x1, x2, x3, t0, t1):
val = (x0*x1*x3**2 + np.sin(x2) + 1 +
(1 if x0 + t1*x1 - t0 > 0 else 0))
return val
def lim0(x1, x2, x3, t0, t1):
return [scale * (x1**2 + x2 + np.cos(x3)*t0*t1 + 1) - 1,
scale * (x1**2 + x2 + np.cos(x3)*t0*t1 + 1) + 1]
def lim1(x2, x3, t0, t1):
return [scale * (t0*x2 + t1*x3) - 1,
scale * (t0*x2 + t1*x3) + 1]
def lim2(x3, t0, t1):
return [scale * (x3 + t0**2*t1**3) - 1,
scale * (x3 + t0**2*t1**3) + 1]
def lim3(t0, t1):
return [scale * (t0 + t1) - 1, scale * (t0 + t1) + 1]
def opts0(x1, x2, x3, t0, t1):
return {'points': [t0 - t1*x1]}
def opts1(x2, x3, t0, t1):
return {}
def opts2(x3, t0, t1):
return {}
def opts3(t0, t1):
return {}
res = nquad(func2, [lim0, lim1, lim2, lim3], args=(0, 0),
opts=[opts0, opts1, opts2, opts3])
assert_quad(res, 25.066666666666663)
def test_square_separate_ranges_and_opts(self):
def f(y, x):
return 1.0
assert_quad(nquad(f, [[-1, 1], [-1, 1]], opts=[{}, {}]), 4.0)
def test_square_aliased_ranges_and_opts(self):
def f(y, x):
return 1.0
r = [-1, 1]
opt = {}
assert_quad(nquad(f, [r, r], opts=[opt, opt]), 4.0)
def test_square_separate_fn_ranges_and_opts(self):
def f(y, x):
return 1.0
def fn_range0(*args):
return (-1, 1)
def fn_range1(*args):
return (-1, 1)
def fn_opt0(*args):
return {}
def fn_opt1(*args):
return {}
ranges = [fn_range0, fn_range1]
opts = [fn_opt0, fn_opt1]
assert_quad(nquad(f, ranges, opts=opts), 4.0)
def test_square_aliased_fn_ranges_and_opts(self):
def f(y, x):
return 1.0
def fn_range(*args):
return (-1, 1)
def fn_opt(*args):
return {}
ranges = [fn_range, fn_range]
opts = [fn_opt, fn_opt]
assert_quad(nquad(f, ranges, opts=opts), 4.0)
def test_matching_quad(self):
def func(x):
return x**2 + 1
res, reserr = quad(func, 0, 4)
res2, reserr2 = nquad(func, ranges=[[0, 4]])
assert_almost_equal(res, res2)
assert_almost_equal(reserr, reserr2)
def test_matching_dblquad(self):
def func2d(x0, x1):
return x0**2 + x1**3 - x0 * x1 + 1
res, reserr = dblquad(func2d, -2, 2, lambda x: -3, lambda x: 3)
res2, reserr2 = nquad(func2d, [[-3, 3], (-2, 2)])
assert_almost_equal(res, res2)
assert_almost_equal(reserr, reserr2)
def test_matching_tplquad(self):
def func3d(x0, x1, x2, c0, c1):
return x0**2 + c0 * x1**3 - x0 * x1 + 1 + c1 * np.sin(x2)
res = tplquad(func3d, -1, 2, lambda x: -2, lambda x: 2,
lambda x, y: -np.pi, lambda x, y: np.pi,
args=(2, 3))
res2 = nquad(func3d, [[-np.pi, np.pi], [-2, 2], (-1, 2)], args=(2, 3))
assert_almost_equal(res, res2)
def test_dict_as_opts(self):
try:
nquad(lambda x, y: x * y, [[0, 1], [0, 1]], opts={'epsrel': 0.0001})
except(TypeError):
assert False
| bsd-3-clause | cf70ba4550126005f4c6b93e7a4f19d8 | 40.402963 | 84 | 0.465703 | 2.939933 | false | true | false | false |
scipy/scipy | scipy/stats/_ksstats.py | 9 | 20086 | # Compute the two-sided one-sample Kolmogorov-Smirnov Prob(Dn <= d) where:
# D_n = sup_x{|F_n(x) - F(x)|},
# F_n(x) is the empirical CDF for a sample of size n {x_i: i=1,...,n},
# F(x) is the CDF of a probability distribution.
#
# Exact methods:
# Prob(D_n >= d) can be computed via a matrix algorithm of Durbin[1]
# or a recursion algorithm due to Pomeranz[2].
# Marsaglia, Tsang & Wang[3] gave a computation-efficient way to perform
# the Durbin algorithm.
# D_n >= d <==> D_n+ >= d or D_n- >= d (the one-sided K-S statistics), hence
# Prob(D_n >= d) = 2*Prob(D_n+ >= d) - Prob(D_n+ >= d and D_n- >= d).
# For d > 0.5, the latter intersection probability is 0.
#
# Approximate methods:
# For d close to 0.5, ignoring that intersection term may still give a
# reasonable approximation.
# Li-Chien[4] and Korolyuk[5] gave an asymptotic formula extending
# Kolmogorov's initial asymptotic, suitable for large d. (See
# scipy.special.kolmogorov for that asymptotic)
# Pelz-Good[6] used the functional equation for Jacobi theta functions to
# transform the Li-Chien/Korolyuk formula produce a computational formula
# suitable for small d.
#
# Simard and L'Ecuyer[7] provided an algorithm to decide when to use each of
# the above approaches and it is that which is used here.
#
# Other approaches:
# Carvalho[8] optimizes Durbin's matrix algorithm for large values of d.
# Moscovich and Nadler[9] use FFTs to compute the convolutions.
# References:
# [1] Durbin J (1968).
# "The Probability that the Sample Distribution Function Lies Between Two
# Parallel Straight Lines."
# Annals of Mathematical Statistics, 39, 398-411.
# [2] Pomeranz J (1974).
# "Exact Cumulative Distribution of the Kolmogorov-Smirnov Statistic for
# Small Samples (Algorithm 487)."
# Communications of the ACM, 17(12), 703-704.
# [3] Marsaglia G, Tsang WW, Wang J (2003).
# "Evaluating Kolmogorov's Distribution."
# Journal of Statistical Software, 8(18), 1-4.
# [4] LI-CHIEN, C. (1956).
# "On the exact distribution of the statistics of A. N. Kolmogorov and
# their asymptotic expansion."
# Acta Matematica Sinica, 6, 55-81.
# [5] KOROLYUK, V. S. (1960).
# "Asymptotic analysis of the distribution of the maximum deviation in
# the Bernoulli scheme."
# Theor. Probability Appl., 4, 339-366.
# [6] Pelz W, Good IJ (1976).
# "Approximating the Lower Tail-areas of the Kolmogorov-Smirnov One-sample
# Statistic."
# Journal of the Royal Statistical Society, Series B, 38(2), 152-156.
# [7] Simard, R., L'Ecuyer, P. (2011)
# "Computing the Two-Sided Kolmogorov-Smirnov Distribution",
# Journal of Statistical Software, Vol 39, 11, 1-18.
# [8] Carvalho, Luis (2015)
# "An Improved Evaluation of Kolmogorov's Distribution"
# Journal of Statistical Software, Code Snippets; Vol 65(3), 1-8.
# [9] Amit Moscovich, Boaz Nadler (2017)
# "Fast calculation of boundary crossing probabilities for Poisson
# processes",
# Statistics & Probability Letters, Vol 123, 177-182.
import numpy as np
import scipy.special
import scipy.special._ufuncs as scu
from scipy._lib._finite_differences import _derivative
_E128 = 128
_EP128 = np.ldexp(np.longdouble(1), _E128)
_EM128 = np.ldexp(np.longdouble(1), -_E128)
_SQRT2PI = np.sqrt(2 * np.pi)
_LOG_2PI = np.log(2 * np.pi)
_MIN_LOG = -708
_SQRT3 = np.sqrt(3)
_PI_SQUARED = np.pi ** 2
_PI_FOUR = np.pi ** 4
_PI_SIX = np.pi ** 6
# [Lifted from _loggamma.pxd.] If B_m are the Bernoulli numbers,
# then Stirling coeffs are B_{2j}/(2j)/(2j-1) for j=8,...1.
_STIRLING_COEFFS = [-2.955065359477124183e-2, 6.4102564102564102564e-3,
-1.9175269175269175269e-3, 8.4175084175084175084e-4,
-5.952380952380952381e-4, 7.9365079365079365079e-4,
-2.7777777777777777778e-3, 8.3333333333333333333e-2]
def _log_nfactorial_div_n_pow_n(n):
# Computes n! / n**n
# = (n-1)! / n**(n-1)
# Uses Stirling's approximation, but removes n*log(n) up-front to
# avoid subtractive cancellation.
# = log(n)/2 - n + log(sqrt(2pi)) + sum B_{2j}/(2j)/(2j-1)/n**(2j-1)
rn = 1.0/n
return np.log(n)/2 - n + _LOG_2PI/2 + rn * np.polyval(_STIRLING_COEFFS, rn/n)
def _clip_prob(p):
"""clips a probability to range 0<=p<=1."""
return np.clip(p, 0.0, 1.0)
def _select_and_clip_prob(cdfprob, sfprob, cdf=True):
"""Selects either the CDF or SF, and then clips to range 0<=p<=1."""
p = np.where(cdf, cdfprob, sfprob)
return _clip_prob(p)
def _kolmogn_DMTW(n, d, cdf=True):
r"""Computes the Kolmogorov CDF: Pr(D_n <= d) using the MTW approach to
the Durbin matrix algorithm.
Durbin (1968); Marsaglia, Tsang, Wang (2003). [1], [3].
"""
# Write d = (k-h)/n, where k is positive integer and 0 <= h < 1
# Generate initial matrix H of size m*m where m=(2k-1)
# Compute k-th row of (n!/n^n) * H^n, scaling intermediate results.
# Requires memory O(m^2) and computation O(m^2 log(n)).
# Most suitable for small m.
if d >= 1.0:
return _select_and_clip_prob(1.0, 0.0, cdf)
nd = n * d
if nd <= 0.5:
return _select_and_clip_prob(0.0, 1.0, cdf)
k = int(np.ceil(nd))
h = k - nd
m = 2 * k - 1
H = np.zeros([m, m])
# Initialize: v is first column (and last row) of H
# v[j] = (1-h^(j+1)/(j+1)! (except for v[-1])
# w[j] = 1/(j)!
# q = k-th row of H (actually i!/n^i*H^i)
intm = np.arange(1, m + 1)
v = 1.0 - h ** intm
w = np.empty(m)
fac = 1.0
for j in intm:
w[j - 1] = fac
fac /= j # This might underflow. Isn't a problem.
v[j - 1] *= fac
tt = max(2 * h - 1.0, 0)**m - 2*h**m
v[-1] = (1.0 + tt) * fac
for i in range(1, m):
H[i - 1:, i] = w[:m - i + 1]
H[:, 0] = v
H[-1, :] = np.flip(v, axis=0)
Hpwr = np.eye(np.shape(H)[0]) # Holds intermediate powers of H
nn = n
expnt = 0 # Scaling of Hpwr
Hexpnt = 0 # Scaling of H
while nn > 0:
if nn % 2:
Hpwr = np.matmul(Hpwr, H)
expnt += Hexpnt
H = np.matmul(H, H)
Hexpnt *= 2
# Scale as needed.
if np.abs(H[k - 1, k - 1]) > _EP128:
H /= _EP128
Hexpnt += _E128
nn = nn // 2
p = Hpwr[k - 1, k - 1]
# Multiply by n!/n^n
for i in range(1, n + 1):
p = i * p / n
if np.abs(p) < _EM128:
p *= _EP128
expnt -= _E128
# unscale
if expnt != 0:
p = np.ldexp(p, expnt)
return _select_and_clip_prob(p, 1.0-p, cdf)
def _pomeranz_compute_j1j2(i, n, ll, ceilf, roundf):
"""Compute the endpoints of the interval for row i."""
if i == 0:
j1, j2 = -ll - ceilf - 1, ll + ceilf - 1
else:
# i + 1 = 2*ip1div2 + ip1mod2
ip1div2, ip1mod2 = divmod(i + 1, 2)
if ip1mod2 == 0: # i is odd
if ip1div2 == n + 1:
j1, j2 = n - ll - ceilf - 1, n + ll + ceilf - 1
else:
j1, j2 = ip1div2 - 1 - ll - roundf - 1, ip1div2 + ll - 1 + ceilf - 1
else:
j1, j2 = ip1div2 - 1 - ll - 1, ip1div2 + ll + roundf - 1
return max(j1 + 2, 0), min(j2, n)
def _kolmogn_Pomeranz(n, x, cdf=True):
r"""Computes Pr(D_n <= d) using the Pomeranz recursion algorithm.
Pomeranz (1974) [2]
"""
# V is n*(2n+2) matrix.
# Each row is convolution of the previous row and probabilities from a
# Poisson distribution.
# Desired CDF probability is n! V[n-1, 2n+1] (final entry in final row).
# Only two rows are needed at any given stage:
# - Call them V0 and V1.
# - Swap each iteration
# Only a few (contiguous) entries in each row can be non-zero.
# - Keep track of start and end (j1 and j2 below)
# - V0s and V1s track the start in the two rows
# Scale intermediate results as needed.
# Only a few different Poisson distributions can occur
t = n * x
ll = int(np.floor(t))
f = 1.0 * (t - ll) # fractional part of t
g = min(f, 1.0 - f)
ceilf = (1 if f > 0 else 0)
roundf = (1 if f > 0.5 else 0)
npwrs = 2 * (ll + 1) # Maximum number of powers needed in convolutions
gpower = np.empty(npwrs) # gpower = (g/n)^m/m!
twogpower = np.empty(npwrs) # twogpower = (2g/n)^m/m!
onem2gpower = np.empty(npwrs) # onem2gpower = ((1-2g)/n)^m/m!
# gpower etc are *almost* Poisson probs, just missing normalizing factor.
gpower[0] = 1.0
twogpower[0] = 1.0
onem2gpower[0] = 1.0
expnt = 0
g_over_n, two_g_over_n, one_minus_two_g_over_n = g/n, 2*g/n, (1 - 2*g)/n
for m in range(1, npwrs):
gpower[m] = gpower[m - 1] * g_over_n / m
twogpower[m] = twogpower[m - 1] * two_g_over_n / m
onem2gpower[m] = onem2gpower[m - 1] * one_minus_two_g_over_n / m
V0 = np.zeros([npwrs])
V1 = np.zeros([npwrs])
V1[0] = 1 # first row
V0s, V1s = 0, 0 # start indices of the two rows
j1, j2 = _pomeranz_compute_j1j2(0, n, ll, ceilf, roundf)
for i in range(1, 2 * n + 2):
# Preserve j1, V1, V1s, V0s from last iteration
k1 = j1
V0, V1 = V1, V0
V0s, V1s = V1s, V0s
V1.fill(0.0)
j1, j2 = _pomeranz_compute_j1j2(i, n, ll, ceilf, roundf)
if i == 1 or i == 2 * n + 1:
pwrs = gpower
else:
pwrs = (twogpower if i % 2 else onem2gpower)
ln2 = j2 - k1 + 1
if ln2 > 0:
conv = np.convolve(V0[k1 - V0s:k1 - V0s + ln2], pwrs[:ln2])
conv_start = j1 - k1 # First index to use from conv
conv_len = j2 - j1 + 1 # Number of entries to use from conv
V1[:conv_len] = conv[conv_start:conv_start + conv_len]
# Scale to avoid underflow.
if 0 < np.max(V1) < _EM128:
V1 *= _EP128
expnt -= _E128
V1s = V0s + j1 - k1
# multiply by n!
ans = V1[n - V1s]
for m in range(1, n + 1):
if np.abs(ans) > _EP128:
ans *= _EM128
expnt += _E128
ans *= m
# Undo any intermediate scaling
if expnt != 0:
ans = np.ldexp(ans, expnt)
ans = _select_and_clip_prob(ans, 1.0 - ans, cdf)
return ans
def _kolmogn_PelzGood(n, x, cdf=True):
"""Computes the Pelz-Good approximation to Prob(Dn <= x) with 0<=x<=1.
Start with Li-Chien, Korolyuk approximation:
Prob(Dn <= x) ~ K0(z) + K1(z)/sqrt(n) + K2(z)/n + K3(z)/n**1.5
where z = x*sqrt(n).
Transform each K_(z) using Jacobi theta functions into a form suitable
for small z.
Pelz-Good (1976). [6]
"""
if x <= 0.0:
return _select_and_clip_prob(0.0, 1.0, cdf=cdf)
if x >= 1.0:
return _select_and_clip_prob(1.0, 0.0, cdf=cdf)
z = np.sqrt(n) * x
zsquared, zthree, zfour, zsix = z**2, z**3, z**4, z**6
qlog = -_PI_SQUARED / 8 / zsquared
if qlog < _MIN_LOG: # z ~ 0.041743441416853426
return _select_and_clip_prob(0.0, 1.0, cdf=cdf)
q = np.exp(qlog)
# Coefficients of terms in the sums for K1, K2 and K3
k1a = -zsquared
k1b = _PI_SQUARED / 4
k2a = 6 * zsix + 2 * zfour
k2b = (2 * zfour - 5 * zsquared) * _PI_SQUARED / 4
k2c = _PI_FOUR * (1 - 2 * zsquared) / 16
k3d = _PI_SIX * (5 - 30 * zsquared) / 64
k3c = _PI_FOUR * (-60 * zsquared + 212 * zfour) / 16
k3b = _PI_SQUARED * (135 * zfour - 96 * zsix) / 4
k3a = -30 * zsix - 90 * z**8
K0to3 = np.zeros(4)
# Use a Horner scheme to evaluate sum c_i q^(i^2)
# Reduces to a sum over odd integers.
maxk = int(np.ceil(16 * z / np.pi))
for k in range(maxk, 0, -1):
m = 2 * k - 1
msquared, mfour, msix = m**2, m**4, m**6
qpower = np.power(q, 8 * k)
coeffs = np.array([1.0,
k1a + k1b*msquared,
k2a + k2b*msquared + k2c*mfour,
k3a + k3b*msquared + k3c*mfour + k3d*msix])
K0to3 *= qpower
K0to3 += coeffs
K0to3 *= q
K0to3 *= _SQRT2PI
# z**10 > 0 as z > 0.04
K0to3 /= np.array([z, 6 * zfour, 72 * z**7, 6480 * z**10])
# Now do the other sum over the other terms, all integers k
# K_2: (pi^2 k^2) q^(k^2),
# K_3: (3pi^2 k^2 z^2 - pi^4 k^4)*q^(k^2)
# Don't expect much subtractive cancellation so use direct calculation
q = np.exp(-_PI_SQUARED / 2 / zsquared)
ks = np.arange(maxk, 0, -1)
ksquared = ks ** 2
sqrt3z = _SQRT3 * z
kspi = np.pi * ks
qpwers = q ** ksquared
k2extra = np.sum(ksquared * qpwers)
k2extra *= _PI_SQUARED * _SQRT2PI/(-36 * zthree)
K0to3[2] += k2extra
k3extra = np.sum((sqrt3z + kspi) * (sqrt3z - kspi) * ksquared * qpwers)
k3extra *= _PI_SQUARED * _SQRT2PI/(216 * zsix)
K0to3[3] += k3extra
powers_of_n = np.power(n * 1.0, np.arange(len(K0to3)) / 2.0)
K0to3 /= powers_of_n
if not cdf:
K0to3 *= -1
K0to3[0] += 1
Ksum = sum(K0to3)
return Ksum
def _kolmogn(n, x, cdf=True):
"""Computes the CDF(or SF) for the two-sided Kolmogorov-Smirnov statistic.
x must be of type float, n of type integer.
Simard & L'Ecuyer (2011) [7].
"""
if np.isnan(n):
return n # Keep the same type of nan
if int(n) != n or n <= 0:
return np.nan
if x >= 1.0:
return _select_and_clip_prob(1.0, 0.0, cdf=cdf)
if x <= 0.0:
return _select_and_clip_prob(0.0, 1.0, cdf=cdf)
t = n * x
if t <= 1.0: # Ruben-Gambino: 1/2n <= x <= 1/n
if t <= 0.5:
return _select_and_clip_prob(0.0, 1.0, cdf=cdf)
if n <= 140:
prob = np.prod(np.arange(1, n+1) * (1.0/n) * (2*t - 1))
else:
prob = np.exp(_log_nfactorial_div_n_pow_n(n) + n * np.log(2*t-1))
return _select_and_clip_prob(prob, 1.0 - prob, cdf=cdf)
if t >= n - 1: # Ruben-Gambino
prob = 2 * (1.0 - x)**n
return _select_and_clip_prob(1 - prob, prob, cdf=cdf)
if x >= 0.5: # Exact: 2 * smirnov
prob = 2 * scipy.special.smirnov(n, x)
return _select_and_clip_prob(1.0 - prob, prob, cdf=cdf)
nxsquared = t * x
if n <= 140:
if nxsquared <= 0.754693:
prob = _kolmogn_DMTW(n, x, cdf=True)
return _select_and_clip_prob(prob, 1.0 - prob, cdf=cdf)
if nxsquared <= 4:
prob = _kolmogn_Pomeranz(n, x, cdf=True)
return _select_and_clip_prob(prob, 1.0 - prob, cdf=cdf)
# Now use Miller approximation of 2*smirnov
prob = 2 * scipy.special.smirnov(n, x)
return _select_and_clip_prob(1.0 - prob, prob, cdf=cdf)
# Split CDF and SF as they have different cutoffs on nxsquared.
if not cdf:
if nxsquared >= 370.0:
return 0.0
if nxsquared >= 2.2:
prob = 2 * scipy.special.smirnov(n, x)
return _clip_prob(prob)
# Fall through and compute the SF as 1.0-CDF
if nxsquared >= 18.0:
cdfprob = 1.0
elif n <= 100000 and n * x**1.5 <= 1.4:
cdfprob = _kolmogn_DMTW(n, x, cdf=True)
else:
cdfprob = _kolmogn_PelzGood(n, x, cdf=True)
return _select_and_clip_prob(cdfprob, 1.0 - cdfprob, cdf=cdf)
def _kolmogn_p(n, x):
"""Computes the PDF for the two-sided Kolmogorov-Smirnov statistic.
x must be of type float, n of type integer.
"""
if np.isnan(n):
return n # Keep the same type of nan
if int(n) != n or n <= 0:
return np.nan
if x >= 1.0 or x <= 0:
return 0
t = n * x
if t <= 1.0:
# Ruben-Gambino: n!/n^n * (2t-1)^n -> 2 n!/n^n * n^2 * (2t-1)^(n-1)
if t <= 0.5:
return 0.0
if n <= 140:
prd = np.prod(np.arange(1, n) * (1.0 / n) * (2 * t - 1))
else:
prd = np.exp(_log_nfactorial_div_n_pow_n(n) + (n-1) * np.log(2 * t - 1))
return prd * 2 * n**2
if t >= n - 1:
# Ruben-Gambino : 1-2(1-x)**n -> 2n*(1-x)**(n-1)
return 2 * (1.0 - x) ** (n-1) * n
if x >= 0.5:
return 2 * scipy.stats.ksone.pdf(x, n)
# Just take a small delta.
# Ideally x +/- delta would stay within [i/n, (i+1)/n] for some integer a.
# as the CDF is a piecewise degree n polynomial.
# It has knots at 1/n, 2/n, ... (n-1)/n
# and is not a C-infinity function at the knots
delta = x / 2.0**16
delta = min(delta, x - 1.0/n)
delta = min(delta, 0.5 - x)
def _kk(_x):
return kolmogn(n, _x)
return _derivative(_kk, x, dx=delta, order=5)
def _kolmogni(n, p, q):
"""Computes the PPF/ISF of kolmogn.
n of type integer, n>= 1
p is the CDF, q the SF, p+q=1
"""
if np.isnan(n):
return n # Keep the same type of nan
if int(n) != n or n <= 0:
return np.nan
if p <= 0:
return 1.0/n
if q <= 0:
return 1.0
delta = np.exp((np.log(p) - scipy.special.loggamma(n+1))/n)
if delta <= 1.0/n:
return (delta + 1.0 / n) / 2
x = -np.expm1(np.log(q/2.0)/n)
if x >= 1 - 1.0/n:
return x
x1 = scu._kolmogci(p)/np.sqrt(n)
x1 = min(x1, 1.0 - 1.0/n)
_f = lambda x: _kolmogn(n, x) - p
return scipy.optimize.brentq(_f, 1.0/n, x1, xtol=1e-14)
def kolmogn(n, x, cdf=True):
"""Computes the CDF for the two-sided Kolmogorov-Smirnov distribution.
The two-sided Kolmogorov-Smirnov distribution has as its CDF Pr(D_n <= x),
for a sample of size n drawn from a distribution with CDF F(t), where
D_n &= sup_t |F_n(t) - F(t)|, and
F_n(t) is the Empirical Cumulative Distribution Function of the sample.
Parameters
----------
n : integer, array_like
the number of samples
x : float, array_like
The K-S statistic, float between 0 and 1
cdf : bool, optional
whether to compute the CDF(default=true) or the SF.
Returns
-------
cdf : ndarray
CDF (or SF it cdf is False) at the specified locations.
The return value has shape the result of numpy broadcasting n and x.
"""
it = np.nditer([n, x, cdf, None],
op_dtypes=[None, np.float64, np.bool_, np.float64])
for _n, _x, _cdf, z in it:
if np.isnan(_n):
z[...] = _n
continue
if int(_n) != _n:
raise ValueError(f'n is not integral: {_n}')
z[...] = _kolmogn(int(_n), _x, cdf=_cdf)
result = it.operands[-1]
return result
def kolmognp(n, x):
"""Computes the PDF for the two-sided Kolmogorov-Smirnov distribution.
Parameters
----------
n : integer, array_like
the number of samples
x : float, array_like
The K-S statistic, float between 0 and 1
Returns
-------
pdf : ndarray
The PDF at the specified locations
The return value has shape the result of numpy broadcasting n and x.
"""
it = np.nditer([n, x, None])
for _n, _x, z in it:
if np.isnan(_n):
z[...] = _n
continue
if int(_n) != _n:
raise ValueError(f'n is not integral: {_n}')
z[...] = _kolmogn_p(int(_n), _x)
result = it.operands[-1]
return result
def kolmogni(n, q, cdf=True):
"""Computes the PPF(or ISF) for the two-sided Kolmogorov-Smirnov distribution.
Parameters
----------
n : integer, array_like
the number of samples
q : float, array_like
Probabilities, float between 0 and 1
cdf : bool, optional
whether to compute the PPF(default=true) or the ISF.
Returns
-------
ppf : ndarray
PPF (or ISF if cdf is False) at the specified locations
The return value has shape the result of numpy broadcasting n and x.
"""
it = np.nditer([n, q, cdf, None])
for _n, _q, _cdf, z in it:
if np.isnan(_n):
z[...] = _n
continue
if int(_n) != _n:
raise ValueError(f'n is not integral: {_n}')
_pcdf, _psf = (_q, 1-_q) if _cdf else (1-_q, _q)
z[...] = _kolmogni(int(_n), _pcdf, _psf)
result = it.operands[-1]
return result
| bsd-3-clause | 264c86af90a6593085c4467eefbc15b4 | 32.701342 | 84 | 0.554267 | 2.672432 | false | false | false | false |
scipy/scipy | scipy/optimize/_nonlin.py | 2 | 49030 | # Copyright (C) 2009, Pauli Virtanen <pav@iki.fi>
# Distributed under the same license as SciPy.
import sys
import numpy as np
from scipy.linalg import norm, solve, inv, qr, svd, LinAlgError
from numpy import asarray, dot, vdot
import scipy.sparse.linalg
import scipy.sparse
from scipy.linalg import get_blas_funcs
import inspect
from scipy._lib._util import getfullargspec_no_self as _getfullargspec
from ._linesearch import scalar_search_wolfe1, scalar_search_armijo
__all__ = [
'broyden1', 'broyden2', 'anderson', 'linearmixing',
'diagbroyden', 'excitingmixing', 'newton_krylov',
'BroydenFirst', 'KrylovJacobian', 'InverseJacobian']
#------------------------------------------------------------------------------
# Utility functions
#------------------------------------------------------------------------------
class NoConvergence(Exception):
pass
def maxnorm(x):
return np.absolute(x).max()
def _as_inexact(x):
"""Return `x` as an array, of either floats or complex floats"""
x = asarray(x)
if not np.issubdtype(x.dtype, np.inexact):
return asarray(x, dtype=np.float_)
return x
def _array_like(x, x0):
"""Return ndarray `x` as same array subclass and shape as `x0`"""
x = np.reshape(x, np.shape(x0))
wrap = getattr(x0, '__array_wrap__', x.__array_wrap__)
return wrap(x)
def _safe_norm(v):
if not np.isfinite(v).all():
return np.array(np.inf)
return norm(v)
#------------------------------------------------------------------------------
# Generic nonlinear solver machinery
#------------------------------------------------------------------------------
_doc_parts = dict(
params_basic="""
F : function(x) -> f
Function whose root to find; should take and return an array-like
object.
xin : array_like
Initial guess for the solution
""".strip(),
params_extra="""
iter : int, optional
Number of iterations to make. If omitted (default), make as many
as required to meet tolerances.
verbose : bool, optional
Print status to stdout on every iteration.
maxiter : int, optional
Maximum number of iterations to make. If more are needed to
meet convergence, `NoConvergence` is raised.
f_tol : float, optional
Absolute tolerance (in max-norm) for the residual.
If omitted, default is 6e-6.
f_rtol : float, optional
Relative tolerance for the residual. If omitted, not used.
x_tol : float, optional
Absolute minimum step size, as determined from the Jacobian
approximation. If the step size is smaller than this, optimization
is terminated as successful. If omitted, not used.
x_rtol : float, optional
Relative minimum step size. If omitted, not used.
tol_norm : function(vector) -> scalar, optional
Norm to use in convergence check. Default is the maximum norm.
line_search : {None, 'armijo' (default), 'wolfe'}, optional
Which type of a line search to use to determine the step size in the
direction given by the Jacobian approximation. Defaults to 'armijo'.
callback : function, optional
Optional callback function. It is called on every iteration as
``callback(x, f)`` where `x` is the current solution and `f`
the corresponding residual.
Returns
-------
sol : ndarray
An array (of similar array type as `x0`) containing the final solution.
Raises
------
NoConvergence
When a solution was not found.
""".strip()
)
def _set_doc(obj):
if obj.__doc__:
obj.__doc__ = obj.__doc__ % _doc_parts
def nonlin_solve(F, x0, jacobian='krylov', iter=None, verbose=False,
maxiter=None, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
tol_norm=None, line_search='armijo', callback=None,
full_output=False, raise_exception=True):
"""
Find a root of a function, in a way suitable for large-scale problems.
Parameters
----------
%(params_basic)s
jacobian : Jacobian
A Jacobian approximation: `Jacobian` object or something that
`asjacobian` can transform to one. Alternatively, a string specifying
which of the builtin Jacobian approximations to use:
krylov, broyden1, broyden2, anderson
diagbroyden, linearmixing, excitingmixing
%(params_extra)s
full_output : bool
If true, returns a dictionary `info` containing convergence
information.
raise_exception : bool
If True, a `NoConvergence` exception is raise if no solution is found.
See Also
--------
asjacobian, Jacobian
Notes
-----
This algorithm implements the inexact Newton method, with
backtracking or full line searches. Several Jacobian
approximations are available, including Krylov and Quasi-Newton
methods.
References
----------
.. [KIM] C. T. Kelley, \"Iterative Methods for Linear and Nonlinear
Equations\". Society for Industrial and Applied Mathematics. (1995)
https://archive.siam.org/books/kelley/fr16/
"""
# Can't use default parameters because it's being explicitly passed as None
# from the calling function, so we need to set it here.
tol_norm = maxnorm if tol_norm is None else tol_norm
condition = TerminationCondition(f_tol=f_tol, f_rtol=f_rtol,
x_tol=x_tol, x_rtol=x_rtol,
iter=iter, norm=tol_norm)
x0 = _as_inexact(x0)
func = lambda z: _as_inexact(F(_array_like(z, x0))).flatten()
x = x0.flatten()
dx = np.full_like(x, np.inf)
Fx = func(x)
Fx_norm = norm(Fx)
jacobian = asjacobian(jacobian)
jacobian.setup(x.copy(), Fx, func)
if maxiter is None:
if iter is not None:
maxiter = iter + 1
else:
maxiter = 100*(x.size+1)
if line_search is True:
line_search = 'armijo'
elif line_search is False:
line_search = None
if line_search not in (None, 'armijo', 'wolfe'):
raise ValueError("Invalid line search")
# Solver tolerance selection
gamma = 0.9
eta_max = 0.9999
eta_treshold = 0.1
eta = 1e-3
for n in range(maxiter):
status = condition.check(Fx, x, dx)
if status:
break
# The tolerance, as computed for scipy.sparse.linalg.* routines
tol = min(eta, eta*Fx_norm)
dx = -jacobian.solve(Fx, tol=tol)
if norm(dx) == 0:
raise ValueError("Jacobian inversion yielded zero vector. "
"This indicates a bug in the Jacobian "
"approximation.")
# Line search, or Newton step
if line_search:
s, x, Fx, Fx_norm_new = _nonlin_line_search(func, x, Fx, dx,
line_search)
else:
s = 1.0
x = x + dx
Fx = func(x)
Fx_norm_new = norm(Fx)
jacobian.update(x.copy(), Fx)
if callback:
callback(x, Fx)
# Adjust forcing parameters for inexact methods
eta_A = gamma * Fx_norm_new**2 / Fx_norm**2
if gamma * eta**2 < eta_treshold:
eta = min(eta_max, eta_A)
else:
eta = min(eta_max, max(eta_A, gamma*eta**2))
Fx_norm = Fx_norm_new
# Print status
if verbose:
sys.stdout.write("%d: |F(x)| = %g; step %g\n" % (
n, tol_norm(Fx), s))
sys.stdout.flush()
else:
if raise_exception:
raise NoConvergence(_array_like(x, x0))
else:
status = 2
if full_output:
info = {'nit': condition.iteration,
'fun': Fx,
'status': status,
'success': status == 1,
'message': {1: 'A solution was found at the specified '
'tolerance.',
2: 'The maximum number of iterations allowed '
'has been reached.'
}[status]
}
return _array_like(x, x0), info
else:
return _array_like(x, x0)
_set_doc(nonlin_solve)
def _nonlin_line_search(func, x, Fx, dx, search_type='armijo', rdiff=1e-8,
smin=1e-2):
tmp_s = [0]
tmp_Fx = [Fx]
tmp_phi = [norm(Fx)**2]
s_norm = norm(x) / norm(dx)
def phi(s, store=True):
if s == tmp_s[0]:
return tmp_phi[0]
xt = x + s*dx
v = func(xt)
p = _safe_norm(v)**2
if store:
tmp_s[0] = s
tmp_phi[0] = p
tmp_Fx[0] = v
return p
def derphi(s):
ds = (abs(s) + s_norm + 1) * rdiff
return (phi(s+ds, store=False) - phi(s)) / ds
if search_type == 'wolfe':
s, phi1, phi0 = scalar_search_wolfe1(phi, derphi, tmp_phi[0],
xtol=1e-2, amin=smin)
elif search_type == 'armijo':
s, phi1 = scalar_search_armijo(phi, tmp_phi[0], -tmp_phi[0],
amin=smin)
if s is None:
# XXX: No suitable step length found. Take the full Newton step,
# and hope for the best.
s = 1.0
x = x + s*dx
if s == tmp_s[0]:
Fx = tmp_Fx[0]
else:
Fx = func(x)
Fx_norm = norm(Fx)
return s, x, Fx, Fx_norm
class TerminationCondition:
"""
Termination condition for an iteration. It is terminated if
- |F| < f_rtol*|F_0|, AND
- |F| < f_tol
AND
- |dx| < x_rtol*|x|, AND
- |dx| < x_tol
"""
def __init__(self, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
iter=None, norm=maxnorm):
if f_tol is None:
f_tol = np.finfo(np.float_).eps ** (1./3)
if f_rtol is None:
f_rtol = np.inf
if x_tol is None:
x_tol = np.inf
if x_rtol is None:
x_rtol = np.inf
self.x_tol = x_tol
self.x_rtol = x_rtol
self.f_tol = f_tol
self.f_rtol = f_rtol
self.norm = norm
self.iter = iter
self.f0_norm = None
self.iteration = 0
def check(self, f, x, dx):
self.iteration += 1
f_norm = self.norm(f)
x_norm = self.norm(x)
dx_norm = self.norm(dx)
if self.f0_norm is None:
self.f0_norm = f_norm
if f_norm == 0:
return 1
if self.iter is not None:
# backwards compatibility with SciPy 0.6.0
return 2 * (self.iteration > self.iter)
# NB: condition must succeed for rtol=inf even if norm == 0
return int((f_norm <= self.f_tol
and f_norm/self.f_rtol <= self.f0_norm)
and (dx_norm <= self.x_tol
and dx_norm/self.x_rtol <= x_norm))
#------------------------------------------------------------------------------
# Generic Jacobian approximation
#------------------------------------------------------------------------------
class Jacobian:
"""
Common interface for Jacobians or Jacobian approximations.
The optional methods come useful when implementing trust region
etc., algorithms that often require evaluating transposes of the
Jacobian.
Methods
-------
solve
Returns J^-1 * v
update
Updates Jacobian to point `x` (where the function has residual `Fx`)
matvec : optional
Returns J * v
rmatvec : optional
Returns A^H * v
rsolve : optional
Returns A^-H * v
matmat : optional
Returns A * V, where V is a dense matrix with dimensions (N,K).
todense : optional
Form the dense Jacobian matrix. Necessary for dense trust region
algorithms, and useful for testing.
Attributes
----------
shape
Matrix dimensions (M, N)
dtype
Data type of the matrix.
func : callable, optional
Function the Jacobian corresponds to
"""
def __init__(self, **kw):
names = ["solve", "update", "matvec", "rmatvec", "rsolve",
"matmat", "todense", "shape", "dtype"]
for name, value in kw.items():
if name not in names:
raise ValueError("Unknown keyword argument %s" % name)
if value is not None:
setattr(self, name, kw[name])
if hasattr(self, 'todense'):
self.__array__ = lambda: self.todense()
def aspreconditioner(self):
return InverseJacobian(self)
def solve(self, v, tol=0):
raise NotImplementedError
def update(self, x, F):
pass
def setup(self, x, F, func):
self.func = func
self.shape = (F.size, x.size)
self.dtype = F.dtype
if self.__class__.setup is Jacobian.setup:
# Call on the first point unless overridden
self.update(x, F)
class InverseJacobian:
def __init__(self, jacobian):
self.jacobian = jacobian
self.matvec = jacobian.solve
self.update = jacobian.update
if hasattr(jacobian, 'setup'):
self.setup = jacobian.setup
if hasattr(jacobian, 'rsolve'):
self.rmatvec = jacobian.rsolve
@property
def shape(self):
return self.jacobian.shape
@property
def dtype(self):
return self.jacobian.dtype
def asjacobian(J):
"""
Convert given object to one suitable for use as a Jacobian.
"""
spsolve = scipy.sparse.linalg.spsolve
if isinstance(J, Jacobian):
return J
elif inspect.isclass(J) and issubclass(J, Jacobian):
return J()
elif isinstance(J, np.ndarray):
if J.ndim > 2:
raise ValueError('array must have rank <= 2')
J = np.atleast_2d(np.asarray(J))
if J.shape[0] != J.shape[1]:
raise ValueError('array must be square')
return Jacobian(matvec=lambda v: dot(J, v),
rmatvec=lambda v: dot(J.conj().T, v),
solve=lambda v: solve(J, v),
rsolve=lambda v: solve(J.conj().T, v),
dtype=J.dtype, shape=J.shape)
elif scipy.sparse.isspmatrix(J):
if J.shape[0] != J.shape[1]:
raise ValueError('matrix must be square')
return Jacobian(matvec=lambda v: J*v,
rmatvec=lambda v: J.conj().T * v,
solve=lambda v: spsolve(J, v),
rsolve=lambda v: spsolve(J.conj().T, v),
dtype=J.dtype, shape=J.shape)
elif hasattr(J, 'shape') and hasattr(J, 'dtype') and hasattr(J, 'solve'):
return Jacobian(matvec=getattr(J, 'matvec'),
rmatvec=getattr(J, 'rmatvec'),
solve=J.solve,
rsolve=getattr(J, 'rsolve'),
update=getattr(J, 'update'),
setup=getattr(J, 'setup'),
dtype=J.dtype,
shape=J.shape)
elif callable(J):
# Assume it's a function J(x) that returns the Jacobian
class Jac(Jacobian):
def update(self, x, F):
self.x = x
def solve(self, v, tol=0):
m = J(self.x)
if isinstance(m, np.ndarray):
return solve(m, v)
elif scipy.sparse.isspmatrix(m):
return spsolve(m, v)
else:
raise ValueError("Unknown matrix type")
def matvec(self, v):
m = J(self.x)
if isinstance(m, np.ndarray):
return dot(m, v)
elif scipy.sparse.isspmatrix(m):
return m*v
else:
raise ValueError("Unknown matrix type")
def rsolve(self, v, tol=0):
m = J(self.x)
if isinstance(m, np.ndarray):
return solve(m.conj().T, v)
elif scipy.sparse.isspmatrix(m):
return spsolve(m.conj().T, v)
else:
raise ValueError("Unknown matrix type")
def rmatvec(self, v):
m = J(self.x)
if isinstance(m, np.ndarray):
return dot(m.conj().T, v)
elif scipy.sparse.isspmatrix(m):
return m.conj().T * v
else:
raise ValueError("Unknown matrix type")
return Jac()
elif isinstance(J, str):
return dict(broyden1=BroydenFirst,
broyden2=BroydenSecond,
anderson=Anderson,
diagbroyden=DiagBroyden,
linearmixing=LinearMixing,
excitingmixing=ExcitingMixing,
krylov=KrylovJacobian)[J]()
else:
raise TypeError('Cannot convert object to a Jacobian')
#------------------------------------------------------------------------------
# Broyden
#------------------------------------------------------------------------------
class GenericBroyden(Jacobian):
def setup(self, x0, f0, func):
Jacobian.setup(self, x0, f0, func)
self.last_f = f0
self.last_x = x0
if hasattr(self, 'alpha') and self.alpha is None:
# Autoscale the initial Jacobian parameter
# unless we have already guessed the solution.
normf0 = norm(f0)
if normf0:
self.alpha = 0.5*max(norm(x0), 1) / normf0
else:
self.alpha = 1.0
def _update(self, x, f, dx, df, dx_norm, df_norm):
raise NotImplementedError
def update(self, x, f):
df = f - self.last_f
dx = x - self.last_x
self._update(x, f, dx, df, norm(dx), norm(df))
self.last_f = f
self.last_x = x
class LowRankMatrix:
r"""
A matrix represented as
.. math:: \alpha I + \sum_{n=0}^{n=M} c_n d_n^\dagger
However, if the rank of the matrix reaches the dimension of the vectors,
full matrix representation will be used thereon.
"""
def __init__(self, alpha, n, dtype):
self.alpha = alpha
self.cs = []
self.ds = []
self.n = n
self.dtype = dtype
self.collapsed = None
@staticmethod
def _matvec(v, alpha, cs, ds):
axpy, scal, dotc = get_blas_funcs(['axpy', 'scal', 'dotc'],
cs[:1] + [v])
w = alpha * v
for c, d in zip(cs, ds):
a = dotc(d, v)
w = axpy(c, w, w.size, a)
return w
@staticmethod
def _solve(v, alpha, cs, ds):
"""Evaluate w = M^-1 v"""
if len(cs) == 0:
return v/alpha
# (B + C D^H)^-1 = B^-1 - B^-1 C (I + D^H B^-1 C)^-1 D^H B^-1
axpy, dotc = get_blas_funcs(['axpy', 'dotc'], cs[:1] + [v])
c0 = cs[0]
A = alpha * np.identity(len(cs), dtype=c0.dtype)
for i, d in enumerate(ds):
for j, c in enumerate(cs):
A[i,j] += dotc(d, c)
q = np.zeros(len(cs), dtype=c0.dtype)
for j, d in enumerate(ds):
q[j] = dotc(d, v)
q /= alpha
q = solve(A, q)
w = v/alpha
for c, qc in zip(cs, q):
w = axpy(c, w, w.size, -qc)
return w
def matvec(self, v):
"""Evaluate w = M v"""
if self.collapsed is not None:
return np.dot(self.collapsed, v)
return LowRankMatrix._matvec(v, self.alpha, self.cs, self.ds)
def rmatvec(self, v):
"""Evaluate w = M^H v"""
if self.collapsed is not None:
return np.dot(self.collapsed.T.conj(), v)
return LowRankMatrix._matvec(v, np.conj(self.alpha), self.ds, self.cs)
def solve(self, v, tol=0):
"""Evaluate w = M^-1 v"""
if self.collapsed is not None:
return solve(self.collapsed, v)
return LowRankMatrix._solve(v, self.alpha, self.cs, self.ds)
def rsolve(self, v, tol=0):
"""Evaluate w = M^-H v"""
if self.collapsed is not None:
return solve(self.collapsed.T.conj(), v)
return LowRankMatrix._solve(v, np.conj(self.alpha), self.ds, self.cs)
def append(self, c, d):
if self.collapsed is not None:
self.collapsed += c[:,None] * d[None,:].conj()
return
self.cs.append(c)
self.ds.append(d)
if len(self.cs) > c.size:
self.collapse()
def __array__(self):
if self.collapsed is not None:
return self.collapsed
Gm = self.alpha*np.identity(self.n, dtype=self.dtype)
for c, d in zip(self.cs, self.ds):
Gm += c[:,None]*d[None,:].conj()
return Gm
def collapse(self):
"""Collapse the low-rank matrix to a full-rank one."""
self.collapsed = np.array(self)
self.cs = None
self.ds = None
self.alpha = None
def restart_reduce(self, rank):
"""
Reduce the rank of the matrix by dropping all vectors.
"""
if self.collapsed is not None:
return
assert rank > 0
if len(self.cs) > rank:
del self.cs[:]
del self.ds[:]
def simple_reduce(self, rank):
"""
Reduce the rank of the matrix by dropping oldest vectors.
"""
if self.collapsed is not None:
return
assert rank > 0
while len(self.cs) > rank:
del self.cs[0]
del self.ds[0]
def svd_reduce(self, max_rank, to_retain=None):
"""
Reduce the rank of the matrix by retaining some SVD components.
This corresponds to the \"Broyden Rank Reduction Inverse\"
algorithm described in [1]_.
Note that the SVD decomposition can be done by solving only a
problem whose size is the effective rank of this matrix, which
is viable even for large problems.
Parameters
----------
max_rank : int
Maximum rank of this matrix after reduction.
to_retain : int, optional
Number of SVD components to retain when reduction is done
(ie. rank > max_rank). Default is ``max_rank - 2``.
References
----------
.. [1] B.A. van der Rotten, PhD thesis,
\"A limited memory Broyden method to solve high-dimensional
systems of nonlinear equations\". Mathematisch Instituut,
Universiteit Leiden, The Netherlands (2003).
https://web.archive.org/web/20161022015821/http://www.math.leidenuniv.nl/scripties/Rotten.pdf
"""
if self.collapsed is not None:
return
p = max_rank
if to_retain is not None:
q = to_retain
else:
q = p - 2
if self.cs:
p = min(p, len(self.cs[0]))
q = max(0, min(q, p-1))
m = len(self.cs)
if m < p:
# nothing to do
return
C = np.array(self.cs).T
D = np.array(self.ds).T
D, R = qr(D, mode='economic')
C = dot(C, R.T.conj())
U, S, WH = svd(C, full_matrices=False)
C = dot(C, inv(WH))
D = dot(D, WH.T.conj())
for k in range(q):
self.cs[k] = C[:,k].copy()
self.ds[k] = D[:,k].copy()
del self.cs[q:]
del self.ds[q:]
_doc_parts['broyden_params'] = """
alpha : float, optional
Initial guess for the Jacobian is ``(-1/alpha)``.
reduction_method : str or tuple, optional
Method used in ensuring that the rank of the Broyden matrix
stays low. Can either be a string giving the name of the method,
or a tuple of the form ``(method, param1, param2, ...)``
that gives the name of the method and values for additional parameters.
Methods available:
- ``restart``: drop all matrix columns. Has no extra parameters.
- ``simple``: drop oldest matrix column. Has no extra parameters.
- ``svd``: keep only the most significant SVD components.
Takes an extra parameter, ``to_retain``, which determines the
number of SVD components to retain when rank reduction is done.
Default is ``max_rank - 2``.
max_rank : int, optional
Maximum rank for the Broyden matrix.
Default is infinity (i.e., no rank reduction).
""".strip()
class BroydenFirst(GenericBroyden):
r"""
Find a root of a function, using Broyden's first Jacobian approximation.
This method is also known as \"Broyden's good method\".
Parameters
----------
%(params_basic)s
%(broyden_params)s
%(params_extra)s
See Also
--------
root : Interface to root finding algorithms for multivariate
functions. See ``method='broyden1'`` in particular.
Notes
-----
This algorithm implements the inverse Jacobian Quasi-Newton update
.. math:: H_+ = H + (dx - H df) dx^\dagger H / ( dx^\dagger H df)
which corresponds to Broyden's first Jacobian update
.. math:: J_+ = J + (df - J dx) dx^\dagger / dx^\dagger dx
References
----------
.. [1] B.A. van der Rotten, PhD thesis,
\"A limited memory Broyden method to solve high-dimensional
systems of nonlinear equations\". Mathematisch Instituut,
Universiteit Leiden, The Netherlands (2003).
https://web.archive.org/web/20161022015821/http://www.math.leidenuniv.nl/scripties/Rotten.pdf
Examples
--------
The following functions define a system of nonlinear equations
>>> def fun(x):
... return [x[0] + 0.5 * (x[0] - x[1])**3 - 1.0,
... 0.5 * (x[1] - x[0])**3 + x[1]]
A solution can be obtained as follows.
>>> from scipy import optimize
>>> sol = optimize.broyden1(fun, [0, 0])
>>> sol
array([0.84116396, 0.15883641])
"""
def __init__(self, alpha=None, reduction_method='restart', max_rank=None):
GenericBroyden.__init__(self)
self.alpha = alpha
self.Gm = None
if max_rank is None:
max_rank = np.inf
self.max_rank = max_rank
if isinstance(reduction_method, str):
reduce_params = ()
else:
reduce_params = reduction_method[1:]
reduction_method = reduction_method[0]
reduce_params = (max_rank - 1,) + reduce_params
if reduction_method == 'svd':
self._reduce = lambda: self.Gm.svd_reduce(*reduce_params)
elif reduction_method == 'simple':
self._reduce = lambda: self.Gm.simple_reduce(*reduce_params)
elif reduction_method == 'restart':
self._reduce = lambda: self.Gm.restart_reduce(*reduce_params)
else:
raise ValueError("Unknown rank reduction method '%s'" %
reduction_method)
def setup(self, x, F, func):
GenericBroyden.setup(self, x, F, func)
self.Gm = LowRankMatrix(-self.alpha, self.shape[0], self.dtype)
def todense(self):
return inv(self.Gm)
def solve(self, f, tol=0):
r = self.Gm.matvec(f)
if not np.isfinite(r).all():
# singular; reset the Jacobian approximation
self.setup(self.last_x, self.last_f, self.func)
return self.Gm.matvec(f)
return r
def matvec(self, f):
return self.Gm.solve(f)
def rsolve(self, f, tol=0):
return self.Gm.rmatvec(f)
def rmatvec(self, f):
return self.Gm.rsolve(f)
def _update(self, x, f, dx, df, dx_norm, df_norm):
self._reduce() # reduce first to preserve secant condition
v = self.Gm.rmatvec(dx)
c = dx - self.Gm.matvec(df)
d = v / vdot(df, v)
self.Gm.append(c, d)
class BroydenSecond(BroydenFirst):
"""
Find a root of a function, using Broyden\'s second Jacobian approximation.
This method is also known as \"Broyden's bad method\".
Parameters
----------
%(params_basic)s
%(broyden_params)s
%(params_extra)s
See Also
--------
root : Interface to root finding algorithms for multivariate
functions. See ``method='broyden2'`` in particular.
Notes
-----
This algorithm implements the inverse Jacobian Quasi-Newton update
.. math:: H_+ = H + (dx - H df) df^\\dagger / ( df^\\dagger df)
corresponding to Broyden's second method.
References
----------
.. [1] B.A. van der Rotten, PhD thesis,
\"A limited memory Broyden method to solve high-dimensional
systems of nonlinear equations\". Mathematisch Instituut,
Universiteit Leiden, The Netherlands (2003).
https://web.archive.org/web/20161022015821/http://www.math.leidenuniv.nl/scripties/Rotten.pdf
Examples
--------
The following functions define a system of nonlinear equations
>>> def fun(x):
... return [x[0] + 0.5 * (x[0] - x[1])**3 - 1.0,
... 0.5 * (x[1] - x[0])**3 + x[1]]
A solution can be obtained as follows.
>>> from scipy import optimize
>>> sol = optimize.broyden2(fun, [0, 0])
>>> sol
array([0.84116365, 0.15883529])
"""
def _update(self, x, f, dx, df, dx_norm, df_norm):
self._reduce() # reduce first to preserve secant condition
v = df
c = dx - self.Gm.matvec(df)
d = v / df_norm**2
self.Gm.append(c, d)
#------------------------------------------------------------------------------
# Broyden-like (restricted memory)
#------------------------------------------------------------------------------
class Anderson(GenericBroyden):
"""
Find a root of a function, using (extended) Anderson mixing.
The Jacobian is formed by for a 'best' solution in the space
spanned by last `M` vectors. As a result, only a MxM matrix
inversions and MxN multiplications are required. [Ey]_
Parameters
----------
%(params_basic)s
alpha : float, optional
Initial guess for the Jacobian is (-1/alpha).
M : float, optional
Number of previous vectors to retain. Defaults to 5.
w0 : float, optional
Regularization parameter for numerical stability.
Compared to unity, good values of the order of 0.01.
%(params_extra)s
See Also
--------
root : Interface to root finding algorithms for multivariate
functions. See ``method='anderson'`` in particular.
References
----------
.. [Ey] V. Eyert, J. Comp. Phys., 124, 271 (1996).
Examples
--------
The following functions define a system of nonlinear equations
>>> def fun(x):
... return [x[0] + 0.5 * (x[0] - x[1])**3 - 1.0,
... 0.5 * (x[1] - x[0])**3 + x[1]]
A solution can be obtained as follows.
>>> from scipy import optimize
>>> sol = optimize.anderson(fun, [0, 0])
>>> sol
array([0.84116588, 0.15883789])
"""
# Note:
#
# Anderson method maintains a rank M approximation of the inverse Jacobian,
#
# J^-1 v ~ -v*alpha + (dX + alpha dF) A^-1 dF^H v
# A = W + dF^H dF
# W = w0^2 diag(dF^H dF)
#
# so that for w0 = 0 the secant condition applies for last M iterates, i.e.,
#
# J^-1 df_j = dx_j
#
# for all j = 0 ... M-1.
#
# Moreover, (from Sherman-Morrison-Woodbury formula)
#
# J v ~ [ b I - b^2 C (I + b dF^H A^-1 C)^-1 dF^H ] v
# C = (dX + alpha dF) A^-1
# b = -1/alpha
#
# and after simplification
#
# J v ~ -v/alpha + (dX/alpha + dF) (dF^H dX - alpha W)^-1 dF^H v
#
def __init__(self, alpha=None, w0=0.01, M=5):
GenericBroyden.__init__(self)
self.alpha = alpha
self.M = M
self.dx = []
self.df = []
self.gamma = None
self.w0 = w0
def solve(self, f, tol=0):
dx = -self.alpha*f
n = len(self.dx)
if n == 0:
return dx
df_f = np.empty(n, dtype=f.dtype)
for k in range(n):
df_f[k] = vdot(self.df[k], f)
try:
gamma = solve(self.a, df_f)
except LinAlgError:
# singular; reset the Jacobian approximation
del self.dx[:]
del self.df[:]
return dx
for m in range(n):
dx += gamma[m]*(self.dx[m] + self.alpha*self.df[m])
return dx
def matvec(self, f):
dx = -f/self.alpha
n = len(self.dx)
if n == 0:
return dx
df_f = np.empty(n, dtype=f.dtype)
for k in range(n):
df_f[k] = vdot(self.df[k], f)
b = np.empty((n, n), dtype=f.dtype)
for i in range(n):
for j in range(n):
b[i,j] = vdot(self.df[i], self.dx[j])
if i == j and self.w0 != 0:
b[i,j] -= vdot(self.df[i], self.df[i])*self.w0**2*self.alpha
gamma = solve(b, df_f)
for m in range(n):
dx += gamma[m]*(self.df[m] + self.dx[m]/self.alpha)
return dx
def _update(self, x, f, dx, df, dx_norm, df_norm):
if self.M == 0:
return
self.dx.append(dx)
self.df.append(df)
while len(self.dx) > self.M:
self.dx.pop(0)
self.df.pop(0)
n = len(self.dx)
a = np.zeros((n, n), dtype=f.dtype)
for i in range(n):
for j in range(i, n):
if i == j:
wd = self.w0**2
else:
wd = 0
a[i,j] = (1+wd)*vdot(self.df[i], self.df[j])
a += np.triu(a, 1).T.conj()
self.a = a
#------------------------------------------------------------------------------
# Simple iterations
#------------------------------------------------------------------------------
class DiagBroyden(GenericBroyden):
"""
Find a root of a function, using diagonal Broyden Jacobian approximation.
The Jacobian approximation is derived from previous iterations, by
retaining only the diagonal of Broyden matrices.
.. warning::
This algorithm may be useful for specific problems, but whether
it will work may depend strongly on the problem.
Parameters
----------
%(params_basic)s
alpha : float, optional
Initial guess for the Jacobian is (-1/alpha).
%(params_extra)s
See Also
--------
root : Interface to root finding algorithms for multivariate
functions. See ``method='diagbroyden'`` in particular.
Examples
--------
The following functions define a system of nonlinear equations
>>> def fun(x):
... return [x[0] + 0.5 * (x[0] - x[1])**3 - 1.0,
... 0.5 * (x[1] - x[0])**3 + x[1]]
A solution can be obtained as follows.
>>> from scipy import optimize
>>> sol = optimize.diagbroyden(fun, [0, 0])
>>> sol
array([0.84116403, 0.15883384])
"""
def __init__(self, alpha=None):
GenericBroyden.__init__(self)
self.alpha = alpha
def setup(self, x, F, func):
GenericBroyden.setup(self, x, F, func)
self.d = np.full((self.shape[0],), 1 / self.alpha, dtype=self.dtype)
def solve(self, f, tol=0):
return -f / self.d
def matvec(self, f):
return -f * self.d
def rsolve(self, f, tol=0):
return -f / self.d.conj()
def rmatvec(self, f):
return -f * self.d.conj()
def todense(self):
return np.diag(-self.d)
def _update(self, x, f, dx, df, dx_norm, df_norm):
self.d -= (df + self.d*dx)*dx/dx_norm**2
class LinearMixing(GenericBroyden):
"""
Find a root of a function, using a scalar Jacobian approximation.
.. warning::
This algorithm may be useful for specific problems, but whether
it will work may depend strongly on the problem.
Parameters
----------
%(params_basic)s
alpha : float, optional
The Jacobian approximation is (-1/alpha).
%(params_extra)s
See Also
--------
root : Interface to root finding algorithms for multivariate
functions. See ``method='linearmixing'`` in particular.
"""
def __init__(self, alpha=None):
GenericBroyden.__init__(self)
self.alpha = alpha
def solve(self, f, tol=0):
return -f*self.alpha
def matvec(self, f):
return -f/self.alpha
def rsolve(self, f, tol=0):
return -f*np.conj(self.alpha)
def rmatvec(self, f):
return -f/np.conj(self.alpha)
def todense(self):
return np.diag(np.full(self.shape[0], -1/self.alpha))
def _update(self, x, f, dx, df, dx_norm, df_norm):
pass
class ExcitingMixing(GenericBroyden):
"""
Find a root of a function, using a tuned diagonal Jacobian approximation.
The Jacobian matrix is diagonal and is tuned on each iteration.
.. warning::
This algorithm may be useful for specific problems, but whether
it will work may depend strongly on the problem.
See Also
--------
root : Interface to root finding algorithms for multivariate
functions. See ``method='excitingmixing'`` in particular.
Parameters
----------
%(params_basic)s
alpha : float, optional
Initial Jacobian approximation is (-1/alpha).
alphamax : float, optional
The entries of the diagonal Jacobian are kept in the range
``[alpha, alphamax]``.
%(params_extra)s
"""
def __init__(self, alpha=None, alphamax=1.0):
GenericBroyden.__init__(self)
self.alpha = alpha
self.alphamax = alphamax
self.beta = None
def setup(self, x, F, func):
GenericBroyden.setup(self, x, F, func)
self.beta = np.full((self.shape[0],), self.alpha, dtype=self.dtype)
def solve(self, f, tol=0):
return -f*self.beta
def matvec(self, f):
return -f/self.beta
def rsolve(self, f, tol=0):
return -f*self.beta.conj()
def rmatvec(self, f):
return -f/self.beta.conj()
def todense(self):
return np.diag(-1/self.beta)
def _update(self, x, f, dx, df, dx_norm, df_norm):
incr = f*self.last_f > 0
self.beta[incr] += self.alpha
self.beta[~incr] = self.alpha
np.clip(self.beta, 0, self.alphamax, out=self.beta)
#------------------------------------------------------------------------------
# Iterative/Krylov approximated Jacobians
#------------------------------------------------------------------------------
class KrylovJacobian(Jacobian):
r"""
Find a root of a function, using Krylov approximation for inverse Jacobian.
This method is suitable for solving large-scale problems.
Parameters
----------
%(params_basic)s
rdiff : float, optional
Relative step size to use in numerical differentiation.
method : str or callable, optional
Krylov method to use to approximate the Jacobian. Can be a string,
or a function implementing the same interface as the iterative
solvers in `scipy.sparse.linalg`. If a string, needs to be one of:
``'lgmres'``, ``'gmres'``, ``'bicgstab'``, ``'cgs'``, ``'minres'``,
``'tfqmr'``.
The default is `scipy.sparse.linalg.lgmres`.
inner_maxiter : int, optional
Parameter to pass to the "inner" Krylov solver: maximum number of
iterations. Iteration will stop after maxiter steps even if the
specified tolerance has not been achieved.
inner_M : LinearOperator or InverseJacobian
Preconditioner for the inner Krylov iteration.
Note that you can use also inverse Jacobians as (adaptive)
preconditioners. For example,
>>> from scipy.optimize import BroydenFirst, KrylovJacobian
>>> from scipy.optimize import InverseJacobian
>>> jac = BroydenFirst()
>>> kjac = KrylovJacobian(inner_M=InverseJacobian(jac))
If the preconditioner has a method named 'update', it will be called
as ``update(x, f)`` after each nonlinear step, with ``x`` giving
the current point, and ``f`` the current function value.
outer_k : int, optional
Size of the subspace kept across LGMRES nonlinear iterations.
See `scipy.sparse.linalg.lgmres` for details.
inner_kwargs : kwargs
Keyword parameters for the "inner" Krylov solver
(defined with `method`). Parameter names must start with
the `inner_` prefix which will be stripped before passing on
the inner method. See, e.g., `scipy.sparse.linalg.gmres` for details.
%(params_extra)s
See Also
--------
root : Interface to root finding algorithms for multivariate
functions. See ``method='krylov'`` in particular.
scipy.sparse.linalg.gmres
scipy.sparse.linalg.lgmres
Notes
-----
This function implements a Newton-Krylov solver. The basic idea is
to compute the inverse of the Jacobian with an iterative Krylov
method. These methods require only evaluating the Jacobian-vector
products, which are conveniently approximated by a finite difference:
.. math:: J v \approx (f(x + \omega*v/|v|) - f(x)) / \omega
Due to the use of iterative matrix inverses, these methods can
deal with large nonlinear problems.
SciPy's `scipy.sparse.linalg` module offers a selection of Krylov
solvers to choose from. The default here is `lgmres`, which is a
variant of restarted GMRES iteration that reuses some of the
information obtained in the previous Newton steps to invert
Jacobians in subsequent steps.
For a review on Newton-Krylov methods, see for example [1]_,
and for the LGMRES sparse inverse method, see [2]_.
References
----------
.. [1] C. T. Kelley, Solving Nonlinear Equations with Newton's Method,
SIAM, pp.57-83, 2003.
:doi:`10.1137/1.9780898718898.ch3`
.. [2] D.A. Knoll and D.E. Keyes, J. Comp. Phys. 193, 357 (2004).
:doi:`10.1016/j.jcp.2003.08.010`
.. [3] A.H. Baker and E.R. Jessup and T. Manteuffel,
SIAM J. Matrix Anal. Appl. 26, 962 (2005).
:doi:`10.1137/S0895479803422014`
Examples
--------
The following functions define a system of nonlinear equations
>>> def fun(x):
... return [x[0] + 0.5 * x[1] - 1.0,
... 0.5 * (x[1] - x[0]) ** 2]
A solution can be obtained as follows.
>>> from scipy import optimize
>>> sol = optimize.newton_krylov(fun, [0, 0])
>>> sol
array([0.66731771, 0.66536458])
"""
def __init__(self, rdiff=None, method='lgmres', inner_maxiter=20,
inner_M=None, outer_k=10, **kw):
self.preconditioner = inner_M
self.rdiff = rdiff
# Note that this retrieves one of the named functions, or otherwise
# uses `method` as is (i.e., for a user-provided callable).
self.method = dict(
bicgstab=scipy.sparse.linalg.bicgstab,
gmres=scipy.sparse.linalg.gmres,
lgmres=scipy.sparse.linalg.lgmres,
cgs=scipy.sparse.linalg.cgs,
minres=scipy.sparse.linalg.minres,
tfqmr=scipy.sparse.linalg.tfqmr,
).get(method, method)
self.method_kw = dict(maxiter=inner_maxiter, M=self.preconditioner)
if self.method is scipy.sparse.linalg.gmres:
# Replace GMRES's outer iteration with Newton steps
self.method_kw['restart'] = inner_maxiter
self.method_kw['maxiter'] = 1
self.method_kw.setdefault('atol', 0)
elif self.method in (scipy.sparse.linalg.gcrotmk,
scipy.sparse.linalg.bicgstab,
scipy.sparse.linalg.cgs):
self.method_kw.setdefault('atol', 0)
elif self.method is scipy.sparse.linalg.lgmres:
self.method_kw['outer_k'] = outer_k
# Replace LGMRES's outer iteration with Newton steps
self.method_kw['maxiter'] = 1
# Carry LGMRES's `outer_v` vectors across nonlinear iterations
self.method_kw.setdefault('outer_v', [])
self.method_kw.setdefault('prepend_outer_v', True)
# But don't carry the corresponding Jacobian*v products, in case
# the Jacobian changes a lot in the nonlinear step
#
# XXX: some trust-region inspired ideas might be more efficient...
# See e.g., Brown & Saad. But needs to be implemented separately
# since it's not an inexact Newton method.
self.method_kw.setdefault('store_outer_Av', False)
self.method_kw.setdefault('atol', 0)
for key, value in kw.items():
if not key.startswith('inner_'):
raise ValueError("Unknown parameter %s" % key)
self.method_kw[key[6:]] = value
def _update_diff_step(self):
mx = abs(self.x0).max()
mf = abs(self.f0).max()
self.omega = self.rdiff * max(1, mx) / max(1, mf)
def matvec(self, v):
nv = norm(v)
if nv == 0:
return 0*v
sc = self.omega / nv
r = (self.func(self.x0 + sc*v) - self.f0) / sc
if not np.all(np.isfinite(r)) and np.all(np.isfinite(v)):
raise ValueError('Function returned non-finite results')
return r
def solve(self, rhs, tol=0):
if 'tol' in self.method_kw:
sol, info = self.method(self.op, rhs, **self.method_kw)
else:
sol, info = self.method(self.op, rhs, tol=tol, **self.method_kw)
return sol
def update(self, x, f):
self.x0 = x
self.f0 = f
self._update_diff_step()
# Update also the preconditioner, if possible
if self.preconditioner is not None:
if hasattr(self.preconditioner, 'update'):
self.preconditioner.update(x, f)
def setup(self, x, f, func):
Jacobian.setup(self, x, f, func)
self.x0 = x
self.f0 = f
self.op = scipy.sparse.linalg.aslinearoperator(self)
if self.rdiff is None:
self.rdiff = np.finfo(x.dtype).eps ** (1./2)
self._update_diff_step()
# Setup also the preconditioner, if possible
if self.preconditioner is not None:
if hasattr(self.preconditioner, 'setup'):
self.preconditioner.setup(x, f, func)
#------------------------------------------------------------------------------
# Wrapper functions
#------------------------------------------------------------------------------
def _nonlin_wrapper(name, jac):
"""
Construct a solver wrapper with given name and Jacobian approx.
It inspects the keyword arguments of ``jac.__init__``, and allows to
use the same arguments in the wrapper function, in addition to the
keyword arguments of `nonlin_solve`
"""
signature = _getfullargspec(jac.__init__)
args, varargs, varkw, defaults, kwonlyargs, kwdefaults, _ = signature
kwargs = list(zip(args[-len(defaults):], defaults))
kw_str = ", ".join(["%s=%r" % (k, v) for k, v in kwargs])
if kw_str:
kw_str = ", " + kw_str
kwkw_str = ", ".join(["%s=%s" % (k, k) for k, v in kwargs])
if kwkw_str:
kwkw_str = kwkw_str + ", "
if kwonlyargs:
raise ValueError('Unexpected signature %s' % signature)
# Construct the wrapper function so that its keyword arguments
# are visible in pydoc.help etc.
wrapper = """
def %(name)s(F, xin, iter=None %(kw)s, verbose=False, maxiter=None,
f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
tol_norm=None, line_search='armijo', callback=None, **kw):
jac = %(jac)s(%(kwkw)s **kw)
return nonlin_solve(F, xin, jac, iter, verbose, maxiter,
f_tol, f_rtol, x_tol, x_rtol, tol_norm, line_search,
callback)
"""
wrapper = wrapper % dict(name=name, kw=kw_str, jac=jac.__name__,
kwkw=kwkw_str)
ns = {}
ns.update(globals())
exec(wrapper, ns)
func = ns[name]
func.__doc__ = jac.__doc__
_set_doc(func)
return func
broyden1 = _nonlin_wrapper('broyden1', BroydenFirst)
broyden2 = _nonlin_wrapper('broyden2', BroydenSecond)
anderson = _nonlin_wrapper('anderson', Anderson)
linearmixing = _nonlin_wrapper('linearmixing', LinearMixing)
diagbroyden = _nonlin_wrapper('diagbroyden', DiagBroyden)
excitingmixing = _nonlin_wrapper('excitingmixing', ExcitingMixing)
newton_krylov = _nonlin_wrapper('newton_krylov', KrylovJacobian)
| bsd-3-clause | ebf11bff8f5ae4e84cbc5a6a9bf272e4 | 30.309068 | 104 | 0.543484 | 3.686466 | false | false | false | false |
scipy/scipy | scipy/sparse/linalg/_isolve/minres.py | 10 | 11425 | from numpy import inner, zeros, inf, finfo
from numpy.linalg import norm
from math import sqrt
from .utils import make_system
__all__ = ['minres']
def minres(A, b, x0=None, shift=0.0, tol=1e-5, maxiter=None,
M=None, callback=None, show=False, check=False):
"""
Use MINimum RESidual iteration to solve Ax=b
MINRES minimizes norm(Ax - b) for a real symmetric matrix A. Unlike
the Conjugate Gradient method, A can be indefinite or singular.
If shift != 0 then the method solves (A - shift*I)x = b
Parameters
----------
A : {sparse matrix, ndarray, LinearOperator}
The real symmetric N-by-N matrix of the linear system
Alternatively, ``A`` can be a linear operator which can
produce ``Ax`` using, e.g.,
``scipy.sparse.linalg.LinearOperator``.
b : ndarray
Right hand side of the linear system. Has shape (N,) or (N,1).
Returns
-------
x : ndarray
The converged solution.
info : integer
Provides convergence information:
0 : successful exit
>0 : convergence to tolerance not achieved, number of iterations
<0 : illegal input or breakdown
Other Parameters
----------------
x0 : ndarray
Starting guess for the solution.
shift : float
Value to apply to the system ``(A - shift * I)x = b``. Default is 0.
tol : float
Tolerance to achieve. The algorithm terminates when the relative
residual is below `tol`.
maxiter : integer
Maximum number of iterations. Iteration will stop after maxiter
steps even if the specified tolerance has not been achieved.
M : {sparse matrix, ndarray, LinearOperator}
Preconditioner for A. The preconditioner should approximate the
inverse of A. Effective preconditioning dramatically improves the
rate of convergence, which implies that fewer iterations are needed
to reach a given error tolerance.
callback : function
User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector.
show : bool
If ``True``, print out a summary and metrics related to the solution
during iterations. Default is ``False``.
check : bool
If ``True``, run additional input validation to check that `A` and
`M` (if specified) are symmetric. Default is ``False``.
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csc_matrix
>>> from scipy.sparse.linalg import minres
>>> A = csc_matrix([[3, 2, 0], [1, -1, 0], [0, 5, 1]], dtype=float)
>>> A = A + A.T
>>> b = np.array([2, 4, -1], dtype=float)
>>> x, exitCode = minres(A, b)
>>> print(exitCode) # 0 indicates successful convergence
0
>>> np.allclose(A.dot(x), b)
True
References
----------
Solution of sparse indefinite systems of linear equations,
C. C. Paige and M. A. Saunders (1975),
SIAM J. Numer. Anal. 12(4), pp. 617-629.
https://web.stanford.edu/group/SOL/software/minres/
This file is a translation of the following MATLAB implementation:
https://web.stanford.edu/group/SOL/software/minres/minres-matlab.zip
"""
A, M, x, b, postprocess = make_system(A, M, x0, b)
matvec = A.matvec
psolve = M.matvec
first = 'Enter minres. '
last = 'Exit minres. '
n = A.shape[0]
if maxiter is None:
maxiter = 5 * n
msg = [' beta2 = 0. If M = I, b and x are eigenvectors ', # -1
' beta1 = 0. The exact solution is x0 ', # 0
' A solution to Ax = b was found, given rtol ', # 1
' A least-squares solution was found, given rtol ', # 2
' Reasonable accuracy achieved, given eps ', # 3
' x has converged to an eigenvector ', # 4
' acond has exceeded 0.1/eps ', # 5
' The iteration limit was reached ', # 6
' A does not define a symmetric matrix ', # 7
' M does not define a symmetric matrix ', # 8
' M does not define a pos-def preconditioner '] # 9
if show:
print(first + 'Solution of symmetric Ax = b')
print(first + 'n = %3g shift = %23.14e' % (n,shift))
print(first + 'itnlim = %3g rtol = %11.2e' % (maxiter,tol))
print()
istop = 0
itn = 0
Anorm = 0
Acond = 0
rnorm = 0
ynorm = 0
xtype = x.dtype
eps = finfo(xtype).eps
# Set up y and v for the first Lanczos vector v1.
# y = beta1 P' v1, where P = C**(-1).
# v is really P' v1.
if x0 is None:
r1 = b.copy()
else:
r1 = b - A@x
y = psolve(r1)
beta1 = inner(r1, y)
if beta1 < 0:
raise ValueError('indefinite preconditioner')
elif beta1 == 0:
return (postprocess(x), 0)
bnorm = norm(b)
if bnorm == 0:
x = b
return (postprocess(x), 0)
beta1 = sqrt(beta1)
if check:
# are these too strict?
# see if A is symmetric
w = matvec(y)
r2 = matvec(w)
s = inner(w,w)
t = inner(y,r2)
z = abs(s - t)
epsa = (s + eps) * eps**(1.0/3.0)
if z > epsa:
raise ValueError('non-symmetric matrix')
# see if M is symmetric
r2 = psolve(y)
s = inner(y,y)
t = inner(r1,r2)
z = abs(s - t)
epsa = (s + eps) * eps**(1.0/3.0)
if z > epsa:
raise ValueError('non-symmetric preconditioner')
# Initialize other quantities
oldb = 0
beta = beta1
dbar = 0
epsln = 0
qrnorm = beta1
phibar = beta1
rhs1 = beta1
rhs2 = 0
tnorm2 = 0
gmax = 0
gmin = finfo(xtype).max
cs = -1
sn = 0
w = zeros(n, dtype=xtype)
w2 = zeros(n, dtype=xtype)
r2 = r1
if show:
print()
print()
print(' Itn x(1) Compatible LS norm(A) cond(A) gbar/|A|')
while itn < maxiter:
itn += 1
s = 1.0/beta
v = s*y
y = matvec(v)
y = y - shift * v
if itn >= 2:
y = y - (beta/oldb)*r1
alfa = inner(v,y)
y = y - (alfa/beta)*r2
r1 = r2
r2 = y
y = psolve(r2)
oldb = beta
beta = inner(r2,y)
if beta < 0:
raise ValueError('non-symmetric matrix')
beta = sqrt(beta)
tnorm2 += alfa**2 + oldb**2 + beta**2
if itn == 1:
if beta/beta1 <= 10*eps:
istop = -1 # Terminate later
# Apply previous rotation Qk-1 to get
# [deltak epslnk+1] = [cs sn][dbark 0 ]
# [gbar k dbar k+1] [sn -cs][alfak betak+1].
oldeps = epsln
delta = cs * dbar + sn * alfa # delta1 = 0 deltak
gbar = sn * dbar - cs * alfa # gbar 1 = alfa1 gbar k
epsln = sn * beta # epsln2 = 0 epslnk+1
dbar = - cs * beta # dbar 2 = beta2 dbar k+1
root = norm([gbar, dbar])
Arnorm = phibar * root
# Compute the next plane rotation Qk
gamma = norm([gbar, beta]) # gammak
gamma = max(gamma, eps)
cs = gbar / gamma # ck
sn = beta / gamma # sk
phi = cs * phibar # phik
phibar = sn * phibar # phibark+1
# Update x.
denom = 1.0/gamma
w1 = w2
w2 = w
w = (v - oldeps*w1 - delta*w2) * denom
x = x + phi*w
# Go round again.
gmax = max(gmax, gamma)
gmin = min(gmin, gamma)
z = rhs1 / gamma
rhs1 = rhs2 - delta*z
rhs2 = - epsln*z
# Estimate various norms and test for convergence.
Anorm = sqrt(tnorm2)
ynorm = norm(x)
epsa = Anorm * eps
epsx = Anorm * ynorm * eps
epsr = Anorm * ynorm * tol
diag = gbar
if diag == 0:
diag = epsa
qrnorm = phibar
rnorm = qrnorm
if ynorm == 0 or Anorm == 0:
test1 = inf
else:
test1 = rnorm / (Anorm*ynorm) # ||r|| / (||A|| ||x||)
if Anorm == 0:
test2 = inf
else:
test2 = root / Anorm # ||Ar|| / (||A|| ||r||)
# Estimate cond(A).
# In this version we look at the diagonals of R in the
# factorization of the lower Hessenberg matrix, Q @ H = R,
# where H is the tridiagonal matrix from Lanczos with one
# extra row, beta(k+1) e_k^T.
Acond = gmax/gmin
# See if any of the stopping criteria are satisfied.
# In rare cases, istop is already -1 from above (Abar = const*I).
if istop == 0:
t1 = 1 + test1 # These tests work if tol < eps
t2 = 1 + test2
if t2 <= 1:
istop = 2
if t1 <= 1:
istop = 1
if itn >= maxiter:
istop = 6
if Acond >= 0.1/eps:
istop = 4
if epsx >= beta1:
istop = 3
# if rnorm <= epsx : istop = 2
# if rnorm <= epsr : istop = 1
if test2 <= tol:
istop = 2
if test1 <= tol:
istop = 1
# See if it is time to print something.
prnt = False
if n <= 40:
prnt = True
if itn <= 10:
prnt = True
if itn >= maxiter-10:
prnt = True
if itn % 10 == 0:
prnt = True
if qrnorm <= 10*epsx:
prnt = True
if qrnorm <= 10*epsr:
prnt = True
if Acond <= 1e-2/eps:
prnt = True
if istop != 0:
prnt = True
if show and prnt:
str1 = '%6g %12.5e %10.3e' % (itn, x[0], test1)
str2 = ' %10.3e' % (test2,)
str3 = ' %8.1e %8.1e %8.1e' % (Anorm, Acond, gbar/Anorm)
print(str1 + str2 + str3)
if itn % 10 == 0:
print()
if callback is not None:
callback(x)
if istop != 0:
break # TODO check this
if show:
print()
print(last + ' istop = %3g itn =%5g' % (istop,itn))
print(last + ' Anorm = %12.4e Acond = %12.4e' % (Anorm,Acond))
print(last + ' rnorm = %12.4e ynorm = %12.4e' % (rnorm,ynorm))
print(last + ' Arnorm = %12.4e' % (Arnorm,))
print(last + msg[istop+1])
if istop == 6:
info = maxiter
else:
info = 0
return (postprocess(x),info)
if __name__ == '__main__':
from numpy import arange
from scipy.sparse import spdiags
n = 10
residuals = []
def cb(x):
residuals.append(norm(b - A@x))
# A = poisson((10,),format='csr')
A = spdiags([arange(1,n+1,dtype=float)], [0], n, n, format='csr')
M = spdiags([1.0/arange(1,n+1,dtype=float)], [0], n, n, format='csr')
A.psolve = M.matvec
b = zeros(A.shape[0])
x = minres(A,b,tol=1e-12,maxiter=None,callback=cb)
# x = cg(A,b,x0=b,tol=1e-12,maxiter=None,callback=cb)[0]
| bsd-3-clause | db05e97b1fe86b1d3de08c86435fac15 | 28.145408 | 85 | 0.498206 | 3.40435 | false | false | false | false |
scipy/scipy | scipy/optimize/_trustregion_constr/tests/test_report.py | 17 | 1088 | import numpy as np
from scipy.optimize import minimize, Bounds
def test_gh10880():
# checks that verbose reporting works with trust-constr for
# bound-contrained problems
bnds = Bounds(1, 2)
opts = {'maxiter': 1000, 'verbose': 2}
minimize(lambda x: x**2, x0=2., method='trust-constr',
bounds=bnds, options=opts)
opts = {'maxiter': 1000, 'verbose': 3}
minimize(lambda x: x**2, x0=2., method='trust-constr',
bounds=bnds, options=opts)
def test_gh12922():
# checks that verbose reporting works with trust-constr for
# general constraints
def objective(x):
return np.array([(np.sum((x+1)**4))])
cons = {'type': 'ineq', 'fun': lambda x: -x[0]**2}
n = 25
x0 = np.linspace(-5, 5, n)
opts = {'maxiter': 1000, 'verbose': 2}
result = minimize(objective, x0=x0, method='trust-constr',
constraints=cons, options=opts)
opts = {'maxiter': 1000, 'verbose': 3}
result = minimize(objective, x0=x0, method='trust-constr',
constraints=cons, options=opts)
| bsd-3-clause | 57ce8f267153742d9a20ce5584c94e53 | 33 | 63 | 0.602941 | 3.378882 | false | true | false | false |
scipy/scipy | tools/lint_diff.py | 12 | 2676 | #!/usr/bin/env python
import os
import sys
import subprocess
from argparse import ArgumentParser
CONFIG = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
'lint_diff.ini',
)
def rev_list(branch, num_commits):
"""List commits in reverse chronological order.
Only the first `num_commits` are shown.
"""
res = subprocess.run(
[
'git',
'rev-list',
'--max-count',
f'{num_commits}',
'--first-parent',
branch
],
stdout=subprocess.PIPE,
encoding='utf-8',
)
res.check_returncode()
return res.stdout.rstrip('\n').split('\n')
def find_branch_point(branch):
"""Find when the current branch split off from the given branch.
It is based off of this Stackoverflow post:
https://stackoverflow.com/questions/1527234/finding-a-branch-point-with-git#4991675
"""
branch_commits = rev_list('HEAD', 1000)
main_commits = set(rev_list(branch, 1000))
for branch_commit in branch_commits:
if branch_commit in main_commits:
return branch_commit
# If a branch split off over 1000 commits ago we will fail to find
# the ancestor.
raise RuntimeError(
'Failed to find a common ancestor in the last 1000 commits')
def find_diff(sha, files=None):
"""Find the diff since the given sha."""
if files:
for file_or_dir in files:
msg = f"{file_or_dir} doesn't exist. Please provide a valid path."
assert os.path.exists(file_or_dir), msg
else:
files = ['*.py']
res = subprocess.run(
['git', 'diff', '--unified=0', sha, '--'] + files,
stdout=subprocess.PIPE,
encoding='utf-8'
)
res.check_returncode()
return res.stdout
def run_flake8(diff):
"""Run flake8 on the given diff."""
res = subprocess.run(
['flake8', '--diff', '--config', CONFIG],
input=diff,
stdout=subprocess.PIPE,
encoding='utf-8',
)
return res.returncode, res.stdout
def main():
parser = ArgumentParser()
parser.add_argument("--branch", type=str, default='main',
help="The branch to diff against")
parser.add_argument("--files", type=str, nargs='+', default=None,
help="The files or directories to diff against")
args = parser.parse_args()
branch_point = find_branch_point(args.branch)
diff = find_diff(branch_point, args.files)
rc, errors = run_flake8(diff)
if errors:
print(errors)
else:
print("No lint errors found.")
sys.exit(rc)
if __name__ == '__main__':
main()
| bsd-3-clause | add1e5b02775861c6106f586cbbda514 | 25.235294 | 87 | 0.589312 | 3.817404 | false | false | false | false |
scipy/scipy | scipy/fft/_pocketfft/tests/test_basic.py | 16 | 35706 | # Created by Pearu Peterson, September 2002
from numpy.testing import (assert_, assert_equal, assert_array_almost_equal,
assert_array_almost_equal_nulp, assert_array_less,
assert_allclose)
import pytest
from pytest import raises as assert_raises
from scipy.fft._pocketfft import (ifft, fft, fftn, ifftn,
rfft, irfft, rfftn, irfftn, fft2,
hfft, ihfft, hfftn, ihfftn)
from numpy import (arange, add, array, asarray, zeros, dot, exp, pi,
swapaxes, cdouble)
import numpy as np
import numpy.fft
from numpy.random import rand
# "large" composite numbers supported by FFT._PYPOCKETFFT
LARGE_COMPOSITE_SIZES = [
2**13,
2**5 * 3**5,
2**3 * 3**3 * 5**2,
]
SMALL_COMPOSITE_SIZES = [
2,
2*3*5,
2*2*3*3,
]
# prime
LARGE_PRIME_SIZES = [
2011
]
SMALL_PRIME_SIZES = [
29
]
def _assert_close_in_norm(x, y, rtol, size, rdt):
# helper function for testing
err_msg = "size: %s rdt: %s" % (size, rdt)
assert_array_less(np.linalg.norm(x - y), rtol*np.linalg.norm(x), err_msg)
def random(size):
return rand(*size)
def swap_byteorder(arr):
"""Returns the same array with swapped byteorder"""
dtype = arr.dtype.newbyteorder('S')
return arr.astype(dtype)
def get_mat(n):
data = arange(n)
data = add.outer(data, data)
return data
def direct_dft(x):
x = asarray(x)
n = len(x)
y = zeros(n, dtype=cdouble)
w = -arange(n)*(2j*pi/n)
for i in range(n):
y[i] = dot(exp(i*w), x)
return y
def direct_idft(x):
x = asarray(x)
n = len(x)
y = zeros(n, dtype=cdouble)
w = arange(n)*(2j*pi/n)
for i in range(n):
y[i] = dot(exp(i*w), x)/n
return y
def direct_dftn(x):
x = asarray(x)
for axis in range(x.ndim):
x = fft(x, axis=axis)
return x
def direct_idftn(x):
x = asarray(x)
for axis in range(x.ndim):
x = ifft(x, axis=axis)
return x
def direct_rdft(x):
x = asarray(x)
n = len(x)
w = -arange(n)*(2j*pi/n)
y = zeros(n//2+1, dtype=cdouble)
for i in range(n//2+1):
y[i] = dot(exp(i*w), x)
return y
def direct_irdft(x, n):
x = asarray(x)
x1 = zeros(n, dtype=cdouble)
for i in range(n//2+1):
x1[i] = x[i]
if i > 0 and 2*i < n:
x1[n-i] = np.conj(x[i])
return direct_idft(x1).real
def direct_rdftn(x):
return fftn(rfft(x), axes=range(x.ndim - 1))
class _TestFFTBase:
def setup_method(self):
self.cdt = None
self.rdt = None
np.random.seed(1234)
def test_definition(self):
x = np.array([1,2,3,4+1j,1,2,3,4+2j], dtype=self.cdt)
y = fft(x)
assert_equal(y.dtype, self.cdt)
y1 = direct_dft(x)
assert_array_almost_equal(y,y1)
x = np.array([1,2,3,4+0j,5], dtype=self.cdt)
assert_array_almost_equal(fft(x),direct_dft(x))
def test_n_argument_real(self):
x1 = np.array([1,2,3,4], dtype=self.rdt)
x2 = np.array([1,2,3,4], dtype=self.rdt)
y = fft([x1,x2],n=4)
assert_equal(y.dtype, self.cdt)
assert_equal(y.shape,(2,4))
assert_array_almost_equal(y[0],direct_dft(x1))
assert_array_almost_equal(y[1],direct_dft(x2))
def _test_n_argument_complex(self):
x1 = np.array([1,2,3,4+1j], dtype=self.cdt)
x2 = np.array([1,2,3,4+1j], dtype=self.cdt)
y = fft([x1,x2],n=4)
assert_equal(y.dtype, self.cdt)
assert_equal(y.shape,(2,4))
assert_array_almost_equal(y[0],direct_dft(x1))
assert_array_almost_equal(y[1],direct_dft(x2))
def test_djbfft(self):
for i in range(2,14):
n = 2**i
x = np.arange(n)
y = fft(x.astype(complex))
y2 = numpy.fft.fft(x)
assert_array_almost_equal(y,y2)
y = fft(x)
assert_array_almost_equal(y,y2)
def test_invalid_sizes(self):
assert_raises(ValueError, fft, [])
assert_raises(ValueError, fft, [[1,1],[2,2]], -5)
class TestLongDoubleFFT(_TestFFTBase):
def setup_method(self):
self.cdt = np.longcomplex
self.rdt = np.longdouble
class TestDoubleFFT(_TestFFTBase):
def setup_method(self):
self.cdt = np.cdouble
self.rdt = np.double
class TestSingleFFT(_TestFFTBase):
def setup_method(self):
self.cdt = np.complex64
self.rdt = np.float32
class TestFloat16FFT:
def test_1_argument_real(self):
x1 = np.array([1, 2, 3, 4], dtype=np.float16)
y = fft(x1, n=4)
assert_equal(y.dtype, np.complex64)
assert_equal(y.shape, (4, ))
assert_array_almost_equal(y, direct_dft(x1.astype(np.float32)))
def test_n_argument_real(self):
x1 = np.array([1, 2, 3, 4], dtype=np.float16)
x2 = np.array([1, 2, 3, 4], dtype=np.float16)
y = fft([x1, x2], n=4)
assert_equal(y.dtype, np.complex64)
assert_equal(y.shape, (2, 4))
assert_array_almost_equal(y[0], direct_dft(x1.astype(np.float32)))
assert_array_almost_equal(y[1], direct_dft(x2.astype(np.float32)))
class _TestIFFTBase:
def setup_method(self):
np.random.seed(1234)
def test_definition(self):
x = np.array([1,2,3,4+1j,1,2,3,4+2j], self.cdt)
y = ifft(x)
y1 = direct_idft(x)
assert_equal(y.dtype, self.cdt)
assert_array_almost_equal(y,y1)
x = np.array([1,2,3,4+0j,5], self.cdt)
assert_array_almost_equal(ifft(x),direct_idft(x))
def test_definition_real(self):
x = np.array([1,2,3,4,1,2,3,4], self.rdt)
y = ifft(x)
assert_equal(y.dtype, self.cdt)
y1 = direct_idft(x)
assert_array_almost_equal(y,y1)
x = np.array([1,2,3,4,5], dtype=self.rdt)
assert_equal(y.dtype, self.cdt)
assert_array_almost_equal(ifft(x),direct_idft(x))
def test_djbfft(self):
for i in range(2,14):
n = 2**i
x = np.arange(n)
y = ifft(x.astype(self.cdt))
y2 = numpy.fft.ifft(x)
assert_allclose(y,y2, rtol=self.rtol, atol=self.atol)
y = ifft(x)
assert_allclose(y,y2, rtol=self.rtol, atol=self.atol)
def test_random_complex(self):
for size in [1,51,111,100,200,64,128,256,1024]:
x = random([size]).astype(self.cdt)
x = random([size]).astype(self.cdt) + 1j*x
y1 = ifft(fft(x))
y2 = fft(ifft(x))
assert_equal(y1.dtype, self.cdt)
assert_equal(y2.dtype, self.cdt)
assert_array_almost_equal(y1, x)
assert_array_almost_equal(y2, x)
def test_random_real(self):
for size in [1,51,111,100,200,64,128,256,1024]:
x = random([size]).astype(self.rdt)
y1 = ifft(fft(x))
y2 = fft(ifft(x))
assert_equal(y1.dtype, self.cdt)
assert_equal(y2.dtype, self.cdt)
assert_array_almost_equal(y1, x)
assert_array_almost_equal(y2, x)
def test_size_accuracy(self):
# Sanity check for the accuracy for prime and non-prime sized inputs
for size in LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES:
np.random.seed(1234)
x = np.random.rand(size).astype(self.rdt)
y = ifft(fft(x))
_assert_close_in_norm(x, y, self.rtol, size, self.rdt)
y = fft(ifft(x))
_assert_close_in_norm(x, y, self.rtol, size, self.rdt)
x = (x + 1j*np.random.rand(size)).astype(self.cdt)
y = ifft(fft(x))
_assert_close_in_norm(x, y, self.rtol, size, self.rdt)
y = fft(ifft(x))
_assert_close_in_norm(x, y, self.rtol, size, self.rdt)
def test_invalid_sizes(self):
assert_raises(ValueError, ifft, [])
assert_raises(ValueError, ifft, [[1,1],[2,2]], -5)
@pytest.mark.skipif(np.longdouble is np.float64,
reason="Long double is aliased to double")
class TestLongDoubleIFFT(_TestIFFTBase):
def setup_method(self):
self.cdt = np.longcomplex
self.rdt = np.longdouble
self.rtol = 1e-10
self.atol = 1e-10
class TestDoubleIFFT(_TestIFFTBase):
def setup_method(self):
self.cdt = np.cdouble
self.rdt = np.double
self.rtol = 1e-10
self.atol = 1e-10
class TestSingleIFFT(_TestIFFTBase):
def setup_method(self):
self.cdt = np.complex64
self.rdt = np.float32
self.rtol = 1e-5
self.atol = 1e-4
class _TestRFFTBase:
def setup_method(self):
np.random.seed(1234)
def test_definition(self):
for t in [[1, 2, 3, 4, 1, 2, 3, 4], [1, 2, 3, 4, 1, 2, 3, 4, 5]]:
x = np.array(t, dtype=self.rdt)
y = rfft(x)
y1 = direct_rdft(x)
assert_array_almost_equal(y,y1)
assert_equal(y.dtype, self.cdt)
def test_djbfft(self):
for i in range(2,14):
n = 2**i
x = np.arange(n)
y1 = np.fft.rfft(x)
y = rfft(x)
assert_array_almost_equal(y,y1)
def test_invalid_sizes(self):
assert_raises(ValueError, rfft, [])
assert_raises(ValueError, rfft, [[1,1],[2,2]], -5)
def test_complex_input(self):
x = np.zeros(10, dtype=self.cdt)
with assert_raises(TypeError, match="x must be a real sequence"):
rfft(x)
# See gh-5790
class MockSeries:
def __init__(self, data):
self.data = np.asarray(data)
def __getattr__(self, item):
try:
return getattr(self.data, item)
except AttributeError as e:
raise AttributeError(("'MockSeries' object "
"has no attribute '{attr}'".
format(attr=item))) from e
def test_non_ndarray_with_dtype(self):
x = np.array([1., 2., 3., 4., 5.])
xs = _TestRFFTBase.MockSeries(x)
expected = [1, 2, 3, 4, 5]
rfft(xs)
# Data should not have been overwritten
assert_equal(x, expected)
assert_equal(xs.data, expected)
@pytest.mark.skipif(np.longfloat is np.float64,
reason="Long double is aliased to double")
class TestRFFTLongDouble(_TestRFFTBase):
def setup_method(self):
self.cdt = np.longcomplex
self.rdt = np.longfloat
class TestRFFTDouble(_TestRFFTBase):
def setup_method(self):
self.cdt = np.cdouble
self.rdt = np.double
class TestRFFTSingle(_TestRFFTBase):
def setup_method(self):
self.cdt = np.complex64
self.rdt = np.float32
class _TestIRFFTBase:
def setup_method(self):
np.random.seed(1234)
def test_definition(self):
x1 = [1,2+3j,4+1j,1+2j,3+4j]
x1_1 = [1,2+3j,4+1j,2+3j,4,2-3j,4-1j,2-3j]
x1 = x1_1[:5]
x2_1 = [1,2+3j,4+1j,2+3j,4+5j,4-5j,2-3j,4-1j,2-3j]
x2 = x2_1[:5]
def _test(x, xr):
y = irfft(np.array(x, dtype=self.cdt), n=len(xr))
y1 = direct_irdft(x, len(xr))
assert_equal(y.dtype, self.rdt)
assert_array_almost_equal(y,y1, decimal=self.ndec)
assert_array_almost_equal(y,ifft(xr), decimal=self.ndec)
_test(x1, x1_1)
_test(x2, x2_1)
def test_djbfft(self):
for i in range(2,14):
n = 2**i
x = np.arange(-1, n, 2) + 1j * np.arange(0, n+1, 2)
x[0] = 0
if n % 2 == 0:
x[-1] = np.real(x[-1])
y1 = np.fft.irfft(x)
y = irfft(x)
assert_array_almost_equal(y,y1)
def test_random_real(self):
for size in [1,51,111,100,200,64,128,256,1024]:
x = random([size]).astype(self.rdt)
y1 = irfft(rfft(x), n=size)
y2 = rfft(irfft(x, n=(size*2-1)))
assert_equal(y1.dtype, self.rdt)
assert_equal(y2.dtype, self.cdt)
assert_array_almost_equal(y1, x, decimal=self.ndec,
err_msg="size=%d" % size)
assert_array_almost_equal(y2, x, decimal=self.ndec,
err_msg="size=%d" % size)
def test_size_accuracy(self):
# Sanity check for the accuracy for prime and non-prime sized inputs
if self.rdt == np.float32:
rtol = 1e-5
elif self.rdt == np.float64:
rtol = 1e-10
for size in LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES:
np.random.seed(1234)
x = np.random.rand(size).astype(self.rdt)
y = irfft(rfft(x), len(x))
_assert_close_in_norm(x, y, rtol, size, self.rdt)
y = rfft(irfft(x, 2 * len(x) - 1))
_assert_close_in_norm(x, y, rtol, size, self.rdt)
def test_invalid_sizes(self):
assert_raises(ValueError, irfft, [])
assert_raises(ValueError, irfft, [[1,1],[2,2]], -5)
# self.ndec is bogus; we should have a assert_array_approx_equal for number of
# significant digits
@pytest.mark.skipif(np.longfloat is np.float64,
reason="Long double is aliased to double")
class TestIRFFTLongDouble(_TestIRFFTBase):
def setup_method(self):
self.cdt = np.cdouble
self.rdt = np.double
self.ndec = 14
class TestIRFFTDouble(_TestIRFFTBase):
def setup_method(self):
self.cdt = np.cdouble
self.rdt = np.double
self.ndec = 14
class TestIRFFTSingle(_TestIRFFTBase):
def setup_method(self):
self.cdt = np.complex64
self.rdt = np.float32
self.ndec = 5
class Testfft2:
def setup_method(self):
np.random.seed(1234)
def test_regression_244(self):
"""FFT returns wrong result with axes parameter."""
# fftn (and hence fft2) used to break when both axes and shape were
# used
x = numpy.ones((4, 4, 2))
y = fft2(x, s=(8, 8), axes=(-3, -2))
y_r = numpy.fft.fftn(x, s=(8, 8), axes=(-3, -2))
assert_array_almost_equal(y, y_r)
def test_invalid_sizes(self):
assert_raises(ValueError, fft2, [[]])
assert_raises(ValueError, fft2, [[1, 1], [2, 2]], (4, -3))
class TestFftnSingle:
def setup_method(self):
np.random.seed(1234)
def test_definition(self):
x = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
y = fftn(np.array(x, np.float32))
assert_(y.dtype == np.complex64,
msg="double precision output with single precision")
y_r = np.array(fftn(x), np.complex64)
assert_array_almost_equal_nulp(y, y_r)
@pytest.mark.parametrize('size', SMALL_COMPOSITE_SIZES + SMALL_PRIME_SIZES)
def test_size_accuracy_small(self, size):
x = np.random.rand(size, size) + 1j*np.random.rand(size, size)
y1 = fftn(x.real.astype(np.float32))
y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)
assert_equal(y1.dtype, np.complex64)
assert_array_almost_equal_nulp(y1, y2, 2000)
@pytest.mark.parametrize('size', LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES)
def test_size_accuracy_large(self, size):
x = np.random.rand(size, 3) + 1j*np.random.rand(size, 3)
y1 = fftn(x.real.astype(np.float32))
y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)
assert_equal(y1.dtype, np.complex64)
assert_array_almost_equal_nulp(y1, y2, 2000)
def test_definition_float16(self):
x = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
y = fftn(np.array(x, np.float16))
assert_equal(y.dtype, np.complex64)
y_r = np.array(fftn(x), np.complex64)
assert_array_almost_equal_nulp(y, y_r)
@pytest.mark.parametrize('size', SMALL_COMPOSITE_SIZES + SMALL_PRIME_SIZES)
def test_float16_input_small(self, size):
x = np.random.rand(size, size) + 1j*np.random.rand(size, size)
y1 = fftn(x.real.astype(np.float16))
y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)
assert_equal(y1.dtype, np.complex64)
assert_array_almost_equal_nulp(y1, y2, 5e5)
@pytest.mark.parametrize('size', LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES)
def test_float16_input_large(self, size):
x = np.random.rand(size, 3) + 1j*np.random.rand(size, 3)
y1 = fftn(x.real.astype(np.float16))
y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)
assert_equal(y1.dtype, np.complex64)
assert_array_almost_equal_nulp(y1, y2, 2e6)
class TestFftn:
def setup_method(self):
np.random.seed(1234)
def test_definition(self):
x = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
y = fftn(x)
assert_array_almost_equal(y, direct_dftn(x))
x = random((20, 26))
assert_array_almost_equal(fftn(x), direct_dftn(x))
x = random((5, 4, 3, 20))
assert_array_almost_equal(fftn(x), direct_dftn(x))
def test_axes_argument(self):
# plane == ji_plane, x== kji_space
plane1 = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
plane2 = [[10, 11, 12],
[13, 14, 15],
[16, 17, 18]]
plane3 = [[19, 20, 21],
[22, 23, 24],
[25, 26, 27]]
ki_plane1 = [[1, 2, 3],
[10, 11, 12],
[19, 20, 21]]
ki_plane2 = [[4, 5, 6],
[13, 14, 15],
[22, 23, 24]]
ki_plane3 = [[7, 8, 9],
[16, 17, 18],
[25, 26, 27]]
jk_plane1 = [[1, 10, 19],
[4, 13, 22],
[7, 16, 25]]
jk_plane2 = [[2, 11, 20],
[5, 14, 23],
[8, 17, 26]]
jk_plane3 = [[3, 12, 21],
[6, 15, 24],
[9, 18, 27]]
kj_plane1 = [[1, 4, 7],
[10, 13, 16], [19, 22, 25]]
kj_plane2 = [[2, 5, 8],
[11, 14, 17], [20, 23, 26]]
kj_plane3 = [[3, 6, 9],
[12, 15, 18], [21, 24, 27]]
ij_plane1 = [[1, 4, 7],
[2, 5, 8],
[3, 6, 9]]
ij_plane2 = [[10, 13, 16],
[11, 14, 17],
[12, 15, 18]]
ij_plane3 = [[19, 22, 25],
[20, 23, 26],
[21, 24, 27]]
ik_plane1 = [[1, 10, 19],
[2, 11, 20],
[3, 12, 21]]
ik_plane2 = [[4, 13, 22],
[5, 14, 23],
[6, 15, 24]]
ik_plane3 = [[7, 16, 25],
[8, 17, 26],
[9, 18, 27]]
ijk_space = [jk_plane1, jk_plane2, jk_plane3]
ikj_space = [kj_plane1, kj_plane2, kj_plane3]
jik_space = [ik_plane1, ik_plane2, ik_plane3]
jki_space = [ki_plane1, ki_plane2, ki_plane3]
kij_space = [ij_plane1, ij_plane2, ij_plane3]
x = array([plane1, plane2, plane3])
assert_array_almost_equal(fftn(x),
fftn(x, axes=(-3, -2, -1))) # kji_space
assert_array_almost_equal(fftn(x), fftn(x, axes=(0, 1, 2)))
assert_array_almost_equal(fftn(x, axes=(0, 2)), fftn(x, axes=(0, -1)))
y = fftn(x, axes=(2, 1, 0)) # ijk_space
assert_array_almost_equal(swapaxes(y, -1, -3), fftn(ijk_space))
y = fftn(x, axes=(2, 0, 1)) # ikj_space
assert_array_almost_equal(swapaxes(swapaxes(y, -1, -3), -1, -2),
fftn(ikj_space))
y = fftn(x, axes=(1, 2, 0)) # jik_space
assert_array_almost_equal(swapaxes(swapaxes(y, -1, -3), -3, -2),
fftn(jik_space))
y = fftn(x, axes=(1, 0, 2)) # jki_space
assert_array_almost_equal(swapaxes(y, -2, -3), fftn(jki_space))
y = fftn(x, axes=(0, 2, 1)) # kij_space
assert_array_almost_equal(swapaxes(y, -2, -1), fftn(kij_space))
y = fftn(x, axes=(-2, -1)) # ji_plane
assert_array_almost_equal(fftn(plane1), y[0])
assert_array_almost_equal(fftn(plane2), y[1])
assert_array_almost_equal(fftn(plane3), y[2])
y = fftn(x, axes=(1, 2)) # ji_plane
assert_array_almost_equal(fftn(plane1), y[0])
assert_array_almost_equal(fftn(plane2), y[1])
assert_array_almost_equal(fftn(plane3), y[2])
y = fftn(x, axes=(-3, -2)) # kj_plane
assert_array_almost_equal(fftn(x[:, :, 0]), y[:, :, 0])
assert_array_almost_equal(fftn(x[:, :, 1]), y[:, :, 1])
assert_array_almost_equal(fftn(x[:, :, 2]), y[:, :, 2])
y = fftn(x, axes=(-3, -1)) # ki_plane
assert_array_almost_equal(fftn(x[:, 0, :]), y[:, 0, :])
assert_array_almost_equal(fftn(x[:, 1, :]), y[:, 1, :])
assert_array_almost_equal(fftn(x[:, 2, :]), y[:, 2, :])
y = fftn(x, axes=(-1, -2)) # ij_plane
assert_array_almost_equal(fftn(ij_plane1), swapaxes(y[0], -2, -1))
assert_array_almost_equal(fftn(ij_plane2), swapaxes(y[1], -2, -1))
assert_array_almost_equal(fftn(ij_plane3), swapaxes(y[2], -2, -1))
y = fftn(x, axes=(-1, -3)) # ik_plane
assert_array_almost_equal(fftn(ik_plane1),
swapaxes(y[:, 0, :], -1, -2))
assert_array_almost_equal(fftn(ik_plane2),
swapaxes(y[:, 1, :], -1, -2))
assert_array_almost_equal(fftn(ik_plane3),
swapaxes(y[:, 2, :], -1, -2))
y = fftn(x, axes=(-2, -3)) # jk_plane
assert_array_almost_equal(fftn(jk_plane1),
swapaxes(y[:, :, 0], -1, -2))
assert_array_almost_equal(fftn(jk_plane2),
swapaxes(y[:, :, 1], -1, -2))
assert_array_almost_equal(fftn(jk_plane3),
swapaxes(y[:, :, 2], -1, -2))
y = fftn(x, axes=(-1,)) # i_line
for i in range(3):
for j in range(3):
assert_array_almost_equal(fft(x[i, j, :]), y[i, j, :])
y = fftn(x, axes=(-2,)) # j_line
for i in range(3):
for j in range(3):
assert_array_almost_equal(fft(x[i, :, j]), y[i, :, j])
y = fftn(x, axes=(0,)) # k_line
for i in range(3):
for j in range(3):
assert_array_almost_equal(fft(x[:, i, j]), y[:, i, j])
y = fftn(x, axes=()) # point
assert_array_almost_equal(y, x)
def test_shape_argument(self):
small_x = [[1, 2, 3],
[4, 5, 6]]
large_x1 = [[1, 2, 3, 0],
[4, 5, 6, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]]
y = fftn(small_x, s=(4, 4))
assert_array_almost_equal(y, fftn(large_x1))
y = fftn(small_x, s=(3, 4))
assert_array_almost_equal(y, fftn(large_x1[:-1]))
def test_shape_axes_argument(self):
small_x = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
large_x1 = array([[1, 2, 3, 0],
[4, 5, 6, 0],
[7, 8, 9, 0],
[0, 0, 0, 0]])
y = fftn(small_x, s=(4, 4), axes=(-2, -1))
assert_array_almost_equal(y, fftn(large_x1))
y = fftn(small_x, s=(4, 4), axes=(-1, -2))
assert_array_almost_equal(y, swapaxes(
fftn(swapaxes(large_x1, -1, -2)), -1, -2))
def test_shape_axes_argument2(self):
# Change shape of the last axis
x = numpy.random.random((10, 5, 3, 7))
y = fftn(x, axes=(-1,), s=(8,))
assert_array_almost_equal(y, fft(x, axis=-1, n=8))
# Change shape of an arbitrary axis which is not the last one
x = numpy.random.random((10, 5, 3, 7))
y = fftn(x, axes=(-2,), s=(8,))
assert_array_almost_equal(y, fft(x, axis=-2, n=8))
# Change shape of axes: cf #244, where shape and axes were mixed up
x = numpy.random.random((4, 4, 2))
y = fftn(x, axes=(-3, -2), s=(8, 8))
assert_array_almost_equal(y,
numpy.fft.fftn(x, axes=(-3, -2), s=(8, 8)))
def test_shape_argument_more(self):
x = zeros((4, 4, 2))
with assert_raises(ValueError,
match="shape requires more axes than are present"):
fftn(x, s=(8, 8, 2, 1))
def test_invalid_sizes(self):
with assert_raises(ValueError,
match="invalid number of data points"
r" \(\[1, 0\]\) specified"):
fftn([[]])
with assert_raises(ValueError,
match="invalid number of data points"
r" \(\[4, -3\]\) specified"):
fftn([[1, 1], [2, 2]], (4, -3))
def test_no_axes(self):
x = numpy.random.random((2,2,2))
assert_allclose(fftn(x, axes=[]), x, atol=1e-7)
class TestIfftn:
dtype = None
cdtype = None
def setup_method(self):
np.random.seed(1234)
@pytest.mark.parametrize('dtype,cdtype,maxnlp',
[(np.float64, np.complex128, 2000),
(np.float32, np.complex64, 3500)])
def test_definition(self, dtype, cdtype, maxnlp):
x = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]], dtype=dtype)
y = ifftn(x)
assert_equal(y.dtype, cdtype)
assert_array_almost_equal_nulp(y, direct_idftn(x), maxnlp)
x = random((20, 26))
assert_array_almost_equal_nulp(ifftn(x), direct_idftn(x), maxnlp)
x = random((5, 4, 3, 20))
assert_array_almost_equal_nulp(ifftn(x), direct_idftn(x), maxnlp)
@pytest.mark.parametrize('maxnlp', [2000, 3500])
@pytest.mark.parametrize('size', [1, 2, 51, 32, 64, 92])
def test_random_complex(self, maxnlp, size):
x = random([size, size]) + 1j*random([size, size])
assert_array_almost_equal_nulp(ifftn(fftn(x)), x, maxnlp)
assert_array_almost_equal_nulp(fftn(ifftn(x)), x, maxnlp)
def test_invalid_sizes(self):
with assert_raises(ValueError,
match="invalid number of data points"
r" \(\[1, 0\]\) specified"):
ifftn([[]])
with assert_raises(ValueError,
match="invalid number of data points"
r" \(\[4, -3\]\) specified"):
ifftn([[1, 1], [2, 2]], (4, -3))
def test_no_axes(self):
x = numpy.random.random((2,2,2))
assert_allclose(ifftn(x, axes=[]), x, atol=1e-7)
class TestRfftn:
dtype = None
cdtype = None
def setup_method(self):
np.random.seed(1234)
@pytest.mark.parametrize('dtype,cdtype,maxnlp',
[(np.float64, np.complex128, 2000),
(np.float32, np.complex64, 3500)])
def test_definition(self, dtype, cdtype, maxnlp):
x = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]], dtype=dtype)
y = rfftn(x)
assert_equal(y.dtype, cdtype)
assert_array_almost_equal_nulp(y, direct_rdftn(x), maxnlp)
x = random((20, 26))
assert_array_almost_equal_nulp(rfftn(x), direct_rdftn(x), maxnlp)
x = random((5, 4, 3, 20))
assert_array_almost_equal_nulp(rfftn(x), direct_rdftn(x), maxnlp)
@pytest.mark.parametrize('size', [1, 2, 51, 32, 64, 92])
def test_random(self, size):
x = random([size, size])
assert_allclose(irfftn(rfftn(x), x.shape), x, atol=1e-10)
@pytest.mark.parametrize('func', [rfftn, irfftn])
def test_invalid_sizes(self, func):
with assert_raises(ValueError,
match="invalid number of data points"
r" \(\[1, 0\]\) specified"):
func([[]])
with assert_raises(ValueError,
match="invalid number of data points"
r" \(\[4, -3\]\) specified"):
func([[1, 1], [2, 2]], (4, -3))
@pytest.mark.parametrize('func', [rfftn, irfftn])
def test_no_axes(self, func):
with assert_raises(ValueError,
match="at least 1 axis must be transformed"):
func([], axes=[])
def test_complex_input(self):
with assert_raises(TypeError, match="x must be a real sequence"):
rfftn(np.zeros(10, dtype=np.complex64))
class FakeArray:
def __init__(self, data):
self._data = data
self.__array_interface__ = data.__array_interface__
class FakeArray2:
def __init__(self, data):
self._data = data
def __array__(self):
return self._data
# TODO: Is this test actually valuable? The behavior it's testing shouldn't be
# relied upon by users except for overwrite_x = False
class TestOverwrite:
"""Check input overwrite behavior of the FFT functions."""
real_dtypes = [np.float32, np.float64, np.longfloat]
dtypes = real_dtypes + [np.complex64, np.complex128, np.longcomplex]
fftsizes = [8, 16, 32]
def _check(self, x, routine, fftsize, axis, overwrite_x, should_overwrite):
x2 = x.copy()
for fake in [lambda x: x, FakeArray, FakeArray2]:
routine(fake(x2), fftsize, axis, overwrite_x=overwrite_x)
sig = "%s(%s%r, %r, axis=%r, overwrite_x=%r)" % (
routine.__name__, x.dtype, x.shape, fftsize, axis, overwrite_x)
if not should_overwrite:
assert_equal(x2, x, err_msg="spurious overwrite in %s" % sig)
def _check_1d(self, routine, dtype, shape, axis, overwritable_dtypes,
fftsize, overwrite_x):
np.random.seed(1234)
if np.issubdtype(dtype, np.complexfloating):
data = np.random.randn(*shape) + 1j*np.random.randn(*shape)
else:
data = np.random.randn(*shape)
data = data.astype(dtype)
should_overwrite = (overwrite_x
and dtype in overwritable_dtypes
and fftsize <= shape[axis])
self._check(data, routine, fftsize, axis,
overwrite_x=overwrite_x,
should_overwrite=should_overwrite)
@pytest.mark.parametrize('dtype', dtypes)
@pytest.mark.parametrize('fftsize', fftsizes)
@pytest.mark.parametrize('overwrite_x', [True, False])
@pytest.mark.parametrize('shape,axes', [((16,), -1),
((16, 2), 0),
((2, 16), 1)])
def test_fft_ifft(self, dtype, fftsize, overwrite_x, shape, axes):
overwritable = (np.longcomplex, np.complex128, np.complex64)
self._check_1d(fft, dtype, shape, axes, overwritable,
fftsize, overwrite_x)
self._check_1d(ifft, dtype, shape, axes, overwritable,
fftsize, overwrite_x)
@pytest.mark.parametrize('dtype', real_dtypes)
@pytest.mark.parametrize('fftsize', fftsizes)
@pytest.mark.parametrize('overwrite_x', [True, False])
@pytest.mark.parametrize('shape,axes', [((16,), -1),
((16, 2), 0),
((2, 16), 1)])
def test_rfft_irfft(self, dtype, fftsize, overwrite_x, shape, axes):
overwritable = self.real_dtypes
self._check_1d(irfft, dtype, shape, axes, overwritable,
fftsize, overwrite_x)
self._check_1d(rfft, dtype, shape, axes, overwritable,
fftsize, overwrite_x)
def _check_nd_one(self, routine, dtype, shape, axes, overwritable_dtypes,
overwrite_x):
np.random.seed(1234)
if np.issubdtype(dtype, np.complexfloating):
data = np.random.randn(*shape) + 1j*np.random.randn(*shape)
else:
data = np.random.randn(*shape)
data = data.astype(dtype)
def fftshape_iter(shp):
if len(shp) <= 0:
yield ()
else:
for j in (shp[0]//2, shp[0], shp[0]*2):
for rest in fftshape_iter(shp[1:]):
yield (j,) + rest
def part_shape(shape, axes):
if axes is None:
return shape
else:
return tuple(np.take(shape, axes))
def should_overwrite(data, shape, axes):
s = part_shape(data.shape, axes)
return (overwrite_x and
np.prod(shape) <= np.prod(s)
and dtype in overwritable_dtypes)
for fftshape in fftshape_iter(part_shape(shape, axes)):
self._check(data, routine, fftshape, axes,
overwrite_x=overwrite_x,
should_overwrite=should_overwrite(data, fftshape, axes))
if data.ndim > 1:
# check fortran order
self._check(data.T, routine, fftshape, axes,
overwrite_x=overwrite_x,
should_overwrite=should_overwrite(
data.T, fftshape, axes))
@pytest.mark.parametrize('dtype', dtypes)
@pytest.mark.parametrize('overwrite_x', [True, False])
@pytest.mark.parametrize('shape,axes', [((16,), None),
((16,), (0,)),
((16, 2), (0,)),
((2, 16), (1,)),
((8, 16), None),
((8, 16), (0, 1)),
((8, 16, 2), (0, 1)),
((8, 16, 2), (1, 2)),
((8, 16, 2), (0,)),
((8, 16, 2), (1,)),
((8, 16, 2), (2,)),
((8, 16, 2), None),
((8, 16, 2), (0, 1, 2))])
def test_fftn_ifftn(self, dtype, overwrite_x, shape, axes):
overwritable = (np.longcomplex, np.complex128, np.complex64)
self._check_nd_one(fftn, dtype, shape, axes, overwritable,
overwrite_x)
self._check_nd_one(ifftn, dtype, shape, axes, overwritable,
overwrite_x)
@pytest.mark.parametrize('func', [fft, ifft, fftn, ifftn,
rfft, irfft, rfftn, irfftn])
def test_invalid_norm(func):
x = np.arange(10, dtype=float)
with assert_raises(ValueError,
match='Invalid norm value \'o\', should be'
' "backward", "ortho" or "forward"'):
func(x, norm='o')
@pytest.mark.parametrize('func', [fft, ifft, fftn, ifftn,
irfft, irfftn, hfft, hfftn])
def test_swapped_byte_order_complex(func):
rng = np.random.RandomState(1234)
x = rng.rand(10) + 1j * rng.rand(10)
assert_allclose(func(swap_byteorder(x)), func(x))
@pytest.mark.parametrize('func', [ihfft, ihfftn, rfft, rfftn])
def test_swapped_byte_order_real(func):
rng = np.random.RandomState(1234)
x = rng.rand(10)
assert_allclose(func(swap_byteorder(x)), func(x))
| bsd-3-clause | 9bb716cf3064c9011a4566267076ea6c | 33.937378 | 80 | 0.507814 | 3.094376 | false | true | false | false |
scipy/scipy | scipy/interpolate/_rgi.py | 1 | 28186 | __all__ = ['RegularGridInterpolator', 'interpn']
import itertools
import numpy as np
from .interpnd import _ndim_coords_from_arrays
from ._cubic import PchipInterpolator
from ._rgi_cython import evaluate_linear_2d, find_indices
from ._bsplines import make_interp_spline
from ._fitpack2 import RectBivariateSpline
def _make_points_and_values_ascending(points, values):
# create ascending points
sorted_indexes = tuple(np.argsort(point) for point in points)
points_asc = tuple(
np.asarray(point)[sort_index] for (point, sort_index) in
zip(points, sorted_indexes))
# create ascending values
ordered_indexes = tuple([*range(len(x))] for x in sorted_indexes)
ordered_indexes_array = np.array(
[i.flatten() for i in np.meshgrid(*ordered_indexes)]).transpose()
sorted_indexes_array = np.array(
[i.flatten() for i in np.meshgrid(*sorted_indexes)]).transpose()
values_asc = np.zeros_like(np.asarray(values))
for o, s in zip(ordered_indexes_array, sorted_indexes_array):
values_asc[tuple(o)] = values[tuple(s)]
return points_asc, values_asc
class RegularGridInterpolator:
"""
Interpolation on a regular or rectilinear grid in arbitrary dimensions.
The data must be defined on a rectilinear grid; that is, a rectangular
grid with even or uneven spacing. Linear, nearest-neighbor, spline
interpolations are supported. After setting up the interpolator object,
the interpolation method may be chosen at each evaluation.
Parameters
----------
points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )
The points defining the regular grid in n dimensions. The points in
each dimension (i.e. every elements of the points tuple) must be
strictly ascending or descending.
values : array_like, shape (m1, ..., mn, ...)
The data on the regular grid in n dimensions. Complex data can be
acceptable.
method : str, optional
The method of interpolation to perform. Supported are "linear",
"nearest", "slinear", "cubic", "quintic" and "pchip". This
parameter will become the default for the object's ``__call__``
method. Default is "linear".
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data, a ValueError is raised.
If False, then `fill_value` is used.
Default is True.
fill_value : float or None, optional
The value to use for points outside of the interpolation domain.
If None, values outside the domain are extrapolated.
Default is ``np.nan``.
Methods
-------
__call__
Attributes
----------
grid : tuple of ndarrays
The points defining the regular grid in n dimensions.
This tuple defines the full grid via
``np.meshgrid(*grid, indexing='ij')``
values : ndarray
Data values at the grid.
method : str
Interpolation method.
fill_value : float or ``None``
Use this value for out-of-bounds arguments to `__call__`.
bounds_error : bool
If ``True``, out-of-bounds argument raise a ``ValueError``.
Notes
-----
Contrary to `LinearNDInterpolator` and `NearestNDInterpolator`, this class
avoids expensive triangulation of the input data by taking advantage of the
regular grid structure.
In other words, this class assumes that the data is defined on a
*rectilinear* grid.
.. versionadded:: 0.14
The 'slinear'(k=1), 'cubic'(k=3), and 'quintic'(k=5) methods are
tensor-product spline interpolators, where `k` is the spline degree,
If any dimension has fewer points than `k` + 1, an error will be raised.
.. versionadded:: 1.9
If the input data is such that dimensions have incommensurate
units and differ by many orders of magnitude, the interpolant may have
numerical artifacts. Consider rescaling the data before interpolating.
Examples
--------
**Evaluate a function on the points of a 3-D grid**
As a first example, we evaluate a simple example function on the points of
a 3-D grid:
>>> from scipy.interpolate import RegularGridInterpolator
>>> import numpy as np
>>> def f(x, y, z):
... return 2 * x**3 + 3 * y**2 - z
>>> x = np.linspace(1, 4, 11)
>>> y = np.linspace(4, 7, 22)
>>> z = np.linspace(7, 9, 33)
>>> xg, yg ,zg = np.meshgrid(x, y, z, indexing='ij', sparse=True)
>>> data = f(xg, yg, zg)
``data`` is now a 3-D array with ``data[i, j, k] = f(x[i], y[j], z[k])``.
Next, define an interpolating function from this data:
>>> interp = RegularGridInterpolator((x, y, z), data)
Evaluate the interpolating function at the two points
``(x,y,z) = (2.1, 6.2, 8.3)`` and ``(3.3, 5.2, 7.1)``:
>>> pts = np.array([[2.1, 6.2, 8.3],
... [3.3, 5.2, 7.1]])
>>> interp(pts)
array([ 125.80469388, 146.30069388])
which is indeed a close approximation to
>>> f(2.1, 6.2, 8.3), f(3.3, 5.2, 7.1)
(125.54200000000002, 145.894)
**Interpolate and extrapolate a 2D dataset**
As a second example, we interpolate and extrapolate a 2D data set:
>>> x, y = np.array([-2, 0, 4]), np.array([-2, 0, 2, 5])
>>> def ff(x, y):
... return x**2 + y**2
>>> xg, yg = np.meshgrid(x, y, indexing='ij')
>>> data = ff(xg, yg)
>>> interp = RegularGridInterpolator((x, y), data,
... bounds_error=False, fill_value=None)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax = fig.add_subplot(projection='3d')
>>> ax.scatter(xg.ravel(), yg.ravel(), data.ravel(),
... s=60, c='k', label='data')
Evaluate and plot the interpolator on a finer grid
>>> xx = np.linspace(-4, 9, 31)
>>> yy = np.linspace(-4, 9, 31)
>>> X, Y = np.meshgrid(xx, yy, indexing='ij')
>>> # interpolator
>>> ax.plot_wireframe(X, Y, interp((X, Y)), rstride=3, cstride=3,
... alpha=0.4, color='m', label='linear interp')
>>> # ground truth
>>> ax.plot_wireframe(X, Y, ff(X, Y), rstride=3, cstride=3,
... alpha=0.4, label='ground truth')
>>> plt.legend()
>>> plt.show()
Other examples are given
:ref:`in the tutorial <tutorial-interpolate_regular_grid_interpolator>`.
See Also
--------
NearestNDInterpolator : Nearest neighbor interpolation on *unstructured*
data in N dimensions
LinearNDInterpolator : Piecewise linear interpolant on *unstructured* data
in N dimensions
interpn : a convenience function which wraps `RegularGridInterpolator`
scipy.ndimage.map_coordinates : interpolation on grids with equal spacing
(suitable for e.g., N-D image resampling)
References
----------
.. [1] Python package *regulargrid* by Johannes Buchner, see
https://pypi.python.org/pypi/regulargrid/
.. [2] Wikipedia, "Trilinear interpolation",
https://en.wikipedia.org/wiki/Trilinear_interpolation
.. [3] Weiser, Alan, and Sergio E. Zarantonello. "A note on piecewise linear
and multilinear table interpolation in many dimensions." MATH.
COMPUT. 50.181 (1988): 189-196.
https://www.ams.org/journals/mcom/1988-50-181/S0025-5718-1988-0917826-0/S0025-5718-1988-0917826-0.pdf
:doi:`10.1090/S0025-5718-1988-0917826-0`
"""
# this class is based on code originally programmed by Johannes Buchner,
# see https://github.com/JohannesBuchner/regulargrid
_SPLINE_DEGREE_MAP = {"slinear": 1, "cubic": 3, "quintic": 5, 'pchip': 3}
_SPLINE_METHODS = list(_SPLINE_DEGREE_MAP.keys())
_ALL_METHODS = ["linear", "nearest"] + _SPLINE_METHODS
def __init__(self, points, values, method="linear", bounds_error=True,
fill_value=np.nan):
if method not in self._ALL_METHODS:
raise ValueError("Method '%s' is not defined" % method)
elif method in self._SPLINE_METHODS:
self._validate_grid_dimensions(points, method)
self.method = method
self.bounds_error = bounds_error
self.grid, self._descending_dimensions = self._check_points(points)
self.values = self._check_values(values)
self._check_dimensionality(self.grid, self.values)
self.fill_value = self._check_fill_value(self.values, fill_value)
if self._descending_dimensions:
self.values = np.flip(values, axis=self._descending_dimensions)
def _check_dimensionality(self, points, values):
if len(points) > values.ndim:
raise ValueError("There are %d point arrays, but values has %d "
"dimensions" % (len(points), values.ndim))
for i, p in enumerate(points):
if not np.asarray(p).ndim == 1:
raise ValueError("The points in dimension %d must be "
"1-dimensional" % i)
if not values.shape[i] == len(p):
raise ValueError("There are %d points and %d values in "
"dimension %d" % (len(p), values.shape[i], i))
def _check_values(self, values):
if not hasattr(values, 'ndim'):
# allow reasonable duck-typed values
values = np.asarray(values)
if hasattr(values, 'dtype') and hasattr(values, 'astype'):
if not np.issubdtype(values.dtype, np.inexact):
values = values.astype(float)
return values
def _check_fill_value(self, values, fill_value):
if fill_value is not None:
fill_value_dtype = np.asarray(fill_value).dtype
if (hasattr(values, 'dtype') and not
np.can_cast(fill_value_dtype, values.dtype,
casting='same_kind')):
raise ValueError("fill_value must be either 'None' or "
"of a type compatible with values")
return fill_value
def _check_points(self, points):
descending_dimensions = []
grid = []
for i, p in enumerate(points):
# early make points float
# see https://github.com/scipy/scipy/pull/17230
p = np.asarray(p, dtype=float)
if not np.all(p[1:] > p[:-1]):
if np.all(p[1:] < p[:-1]):
# input is descending, so make it ascending
descending_dimensions.append(i)
p = np.flip(p)
p = np.ascontiguousarray(p)
else:
raise ValueError(
"The points in dimension %d must be strictly "
"ascending or descending" % i)
grid.append(p)
return tuple(grid), tuple(descending_dimensions)
def __call__(self, xi, method=None):
"""
Interpolation at coordinates.
Parameters
----------
xi : ndarray of shape (..., ndim)
The coordinates to evaluate the interpolator at.
method : str, optional
The method of interpolation to perform. Supported are "linear",
"nearest", "slinear", "cubic", "quintic" and "pchip". Default is
the method chosen when the interpolator was created.
Returns
-------
values_x : ndarray, shape xi.shape[:-1] + values.shape[ndim:]
Interpolated values at `xi`. See notes for behaviour when
``xi.ndim == 1``.
Notes
-----
In the case that ``xi.ndim == 1`` a new axis is inserted into
the 0 position of the returned array, values_x, so its shape is
instead ``(1,) + values.shape[ndim:]``.
Examples
--------
Here we define a nearest-neighbor interpolator of a simple function
>>> import numpy as np
>>> x, y = np.array([0, 1, 2]), np.array([1, 3, 7])
>>> def f(x, y):
... return x**2 + y**2
>>> data = f(*np.meshgrid(x, y, indexing='ij', sparse=True))
>>> from scipy.interpolate import RegularGridInterpolator
>>> interp = RegularGridInterpolator((x, y), data, method='nearest')
By construction, the interpolator uses the nearest-neighbor
interpolation
>>> interp([[1.5, 1.3], [0.3, 4.5]])
array([2., 9.])
We can however evaluate the linear interpolant by overriding the
`method` parameter
>>> interp([[1.5, 1.3], [0.3, 4.5]], method='linear')
array([ 4.7, 24.3])
"""
is_method_changed = self.method != method
method = self.method if method is None else method
if method not in self._ALL_METHODS:
raise ValueError("Method '%s' is not defined" % method)
xi, xi_shape, ndim, nans, out_of_bounds = self._prepare_xi(xi)
if method == "linear":
indices, norm_distances = self._find_indices(xi.T)
if (ndim == 2 and hasattr(self.values, 'dtype') and
self.values.ndim == 2):
# a fast path
out = np.empty(indices.shape[1], dtype=self.values.dtype)
result = evaluate_linear_2d(self.values,
indices,
norm_distances,
self.grid,
out)
else:
result = self._evaluate_linear(indices, norm_distances)
elif method == "nearest":
indices, norm_distances = self._find_indices(xi.T)
result = self._evaluate_nearest(indices, norm_distances)
elif method in self._SPLINE_METHODS:
if is_method_changed:
self._validate_grid_dimensions(self.grid, method)
result = self._evaluate_spline(xi, method)
if not self.bounds_error and self.fill_value is not None:
result[out_of_bounds] = self.fill_value
# f(nan) = nan, if any
if np.any(nans):
result[nans] = np.nan
return result.reshape(xi_shape[:-1] + self.values.shape[ndim:])
def _prepare_xi(self, xi):
ndim = len(self.grid)
xi = _ndim_coords_from_arrays(xi, ndim=ndim)
if xi.shape[-1] != len(self.grid):
raise ValueError("The requested sample points xi have dimension "
f"{xi.shape[-1]} but this "
f"RegularGridInterpolator has dimension {ndim}")
xi_shape = xi.shape
xi = xi.reshape(-1, xi_shape[-1])
xi = np.asarray(xi, dtype=float)
# find nans in input
nans = np.any(np.isnan(xi), axis=-1)
if self.bounds_error:
for i, p in enumerate(xi.T):
if not np.logical_and(np.all(self.grid[i][0] <= p),
np.all(p <= self.grid[i][-1])):
raise ValueError("One of the requested xi is out of bounds "
"in dimension %d" % i)
out_of_bounds = None
else:
out_of_bounds = self._find_out_of_bounds(xi.T)
return xi, xi_shape, ndim, nans, out_of_bounds
def _evaluate_linear(self, indices, norm_distances):
# slice for broadcasting over trailing dimensions in self.values
vslice = (slice(None),) + (None,)*(self.values.ndim - len(indices))
# Compute shifting up front before zipping everything together
shift_norm_distances = [1 - yi for yi in norm_distances]
shift_indices = [i + 1 for i in indices]
# The formula for linear interpolation in 2d takes the form:
# values = self.values[(i0, i1)] * (1 - y0) * (1 - y1) + \
# self.values[(i0, i1 + 1)] * (1 - y0) * y1 + \
# self.values[(i0 + 1, i1)] * y0 * (1 - y1) + \
# self.values[(i0 + 1, i1 + 1)] * y0 * y1
# We pair i with 1 - yi (zipped1) and i + 1 with yi (zipped2)
zipped1 = zip(indices, shift_norm_distances)
zipped2 = zip(shift_indices, norm_distances)
# Take all products of zipped1 and zipped2 and iterate over them
# to get the terms in the above formula. This corresponds to iterating
# over the vertices of a hypercube.
hypercube = itertools.product(*zip(zipped1, zipped2))
value = np.array([0.])
for h in hypercube:
edge_indices, weights = zip(*h)
weight = np.array([1.])
for w in weights:
weight = weight * w
term = np.asarray(self.values[edge_indices]) * weight[vslice]
value = value + term # cannot use += because broadcasting
return value
def _evaluate_nearest(self, indices, norm_distances):
idx_res = [np.where(yi <= .5, i, i + 1)
for i, yi in zip(indices, norm_distances)]
return self.values[tuple(idx_res)]
def _validate_grid_dimensions(self, points, method):
k = self._SPLINE_DEGREE_MAP[method]
for i, point in enumerate(points):
ndim = len(np.atleast_1d(point))
if ndim <= k:
raise ValueError(f"There are {ndim} points in dimension {i},"
f" but method {method} requires at least "
f" {k+1} points per dimension.")
def _evaluate_spline(self, xi, method):
# ensure xi is 2D list of points to evaluate (`m` is the number of
# points and `n` is the number of interpolation dimensions,
# ``n == len(self.grid)``.)
if xi.ndim == 1:
xi = xi.reshape((1, xi.size))
m, n = xi.shape
# Reorder the axes: n-dimensional process iterates over the
# interpolation axes from the last axis downwards: E.g. for a 4D grid
# the order of axes is 3, 2, 1, 0. Each 1D interpolation works along
# the 0th axis of its argument array (for 1D routine it's its ``y``
# array). Thus permute the interpolation axes of `values` *and keep
# trailing dimensions trailing*.
axes = tuple(range(self.values.ndim))
axx = axes[:n][::-1] + axes[n:]
values = self.values.transpose(axx)
if method == 'pchip':
_eval_func = self._do_pchip
else:
_eval_func = self._do_spline_fit
k = self._SPLINE_DEGREE_MAP[method]
# Non-stationary procedure: difficult to vectorize this part entirely
# into numpy-level operations. Unfortunately this requires explicit
# looping over each point in xi.
# can at least vectorize the first pass across all points in the
# last variable of xi.
last_dim = n - 1
first_values = _eval_func(self.grid[last_dim],
values,
xi[:, last_dim],
k)
# the rest of the dimensions have to be on a per point-in-xi basis
shape = (m, *self.values.shape[n:])
result = np.empty(shape, dtype=self.values.dtype)
for j in range(m):
# Main process: Apply 1D interpolate in each dimension
# sequentially, starting with the last dimension.
# These are then "folded" into the next dimension in-place.
folded_values = first_values[j, ...]
for i in range(last_dim-1, -1, -1):
# Interpolate for each 1D from the last dimensions.
# This collapses each 1D sequence into a scalar.
folded_values = _eval_func(self.grid[i],
folded_values,
xi[j, i],
k)
result[j, ...] = folded_values
return result
@staticmethod
def _do_spline_fit(x, y, pt, k):
local_interp = make_interp_spline(x, y, k=k, axis=0)
values = local_interp(pt)
return values
@staticmethod
def _do_pchip(x, y, pt, k):
local_interp = PchipInterpolator(x, y, axis=0)
values = local_interp(pt)
return values
def _find_indices(self, xi):
return find_indices(self.grid, xi)
def _find_out_of_bounds(self, xi):
# check for out of bounds xi
out_of_bounds = np.zeros((xi.shape[1]), dtype=bool)
# iterate through dimensions
for x, grid in zip(xi, self.grid):
out_of_bounds += x < grid[0]
out_of_bounds += x > grid[-1]
return out_of_bounds
def interpn(points, values, xi, method="linear", bounds_error=True,
fill_value=np.nan):
"""
Multidimensional interpolation on regular or rectilinear grids.
Strictly speaking, not all regular grids are supported - this function
works on *rectilinear* grids, that is, a rectangular grid with even or
uneven spacing.
Parameters
----------
points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )
The points defining the regular grid in n dimensions. The points in
each dimension (i.e. every elements of the points tuple) must be
strictly ascending or descending.
values : array_like, shape (m1, ..., mn, ...)
The data on the regular grid in n dimensions. Complex data can be
acceptable.
xi : ndarray of shape (..., ndim)
The coordinates to sample the gridded data at
method : str, optional
The method of interpolation to perform. Supported are "linear",
"nearest", "slinear", "cubic", "quintic", "pchip", and "splinef2d".
"splinef2d" is only supported for 2-dimensional data.
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data, a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If None, values outside
the domain are extrapolated. Extrapolation is not supported by method
"splinef2d".
Returns
-------
values_x : ndarray, shape xi.shape[:-1] + values.shape[ndim:]
Interpolated values at `xi`. See notes for behaviour when
``xi.ndim == 1``.
Notes
-----
.. versionadded:: 0.14
In the case that ``xi.ndim == 1`` a new axis is inserted into
the 0 position of the returned array, values_x, so its shape is
instead ``(1,) + values.shape[ndim:]``.
If the input data is such that input dimensions have incommensurate
units and differ by many orders of magnitude, the interpolant may have
numerical artifacts. Consider rescaling the data before interpolation.
Examples
--------
Evaluate a simple example function on the points of a regular 3-D grid:
>>> import numpy as np
>>> from scipy.interpolate import interpn
>>> def value_func_3d(x, y, z):
... return 2 * x + 3 * y - z
>>> x = np.linspace(0, 4, 5)
>>> y = np.linspace(0, 5, 6)
>>> z = np.linspace(0, 6, 7)
>>> points = (x, y, z)
>>> values = value_func_3d(*np.meshgrid(*points, indexing='ij'))
Evaluate the interpolating function at a point
>>> point = np.array([2.21, 3.12, 1.15])
>>> print(interpn(points, values, point))
[12.63]
See Also
--------
NearestNDInterpolator : Nearest neighbor interpolation on unstructured
data in N dimensions
LinearNDInterpolator : Piecewise linear interpolant on unstructured data
in N dimensions
RegularGridInterpolator : interpolation on a regular or rectilinear grid
in arbitrary dimensions (`interpn` wraps this
class).
RectBivariateSpline : Bivariate spline approximation over a rectangular mesh
scipy.ndimage.map_coordinates : interpolation on grids with equal spacing
(suitable for e.g., N-D image resampling)
"""
# sanity check 'method' kwarg
if method not in ["linear", "nearest", "cubic", "quintic", "pchip",
"splinef2d", "slinear"]:
raise ValueError("interpn only understands the methods 'linear', "
"'nearest', 'slinear', 'cubic', 'quintic', 'pchip', "
f"and 'splinef2d'. You provided {method}.")
if not hasattr(values, 'ndim'):
values = np.asarray(values)
ndim = values.ndim
if ndim > 2 and method == "splinef2d":
raise ValueError("The method splinef2d can only be used for "
"2-dimensional input data")
if not bounds_error and fill_value is None and method == "splinef2d":
raise ValueError("The method splinef2d does not support extrapolation.")
# sanity check consistency of input dimensions
if len(points) > ndim:
raise ValueError("There are %d point arrays, but values has %d "
"dimensions" % (len(points), ndim))
if len(points) != ndim and method == 'splinef2d':
raise ValueError("The method splinef2d can only be used for "
"scalar data with one point per coordinate")
# sanity check input grid
for i, p in enumerate(points):
diff_p = np.diff(p)
if not np.all(diff_p > 0.):
if np.all(diff_p < 0.):
# input is descending, so make it ascending
points, values = _make_points_and_values_ascending(points,
values)
else:
raise ValueError("The points in dimension %d must be strictly "
"ascending or descending" % i)
if not np.asarray(p).ndim == 1:
raise ValueError("The points in dimension %d must be "
"1-dimensional" % i)
if not values.shape[i] == len(p):
raise ValueError("There are %d points and %d values in "
"dimension %d" % (len(p), values.shape[i], i))
grid = tuple([np.asarray(p) for p in points])
# sanity check requested xi
xi = _ndim_coords_from_arrays(xi, ndim=len(grid))
if xi.shape[-1] != len(grid):
raise ValueError("The requested sample points xi have dimension "
"%d, but this RegularGridInterpolator has "
"dimension %d" % (xi.shape[-1], len(grid)))
if bounds_error:
for i, p in enumerate(xi.T):
if not np.logical_and(np.all(grid[i][0] <= p),
np.all(p <= grid[i][-1])):
raise ValueError("One of the requested xi is out of bounds "
"in dimension %d" % i)
# perform interpolation
if method in ["linear", "nearest", "slinear", "cubic", "quintic", "pchip"]:
interp = RegularGridInterpolator(points, values, method=method,
bounds_error=bounds_error,
fill_value=fill_value)
return interp(xi)
elif method == "splinef2d":
xi_shape = xi.shape
xi = xi.reshape(-1, xi.shape[-1])
# RectBivariateSpline doesn't support fill_value; we need to wrap here
idx_valid = np.all((grid[0][0] <= xi[:, 0], xi[:, 0] <= grid[0][-1],
grid[1][0] <= xi[:, 1], xi[:, 1] <= grid[1][-1]),
axis=0)
result = np.empty_like(xi[:, 0])
# make a copy of values for RectBivariateSpline
interp = RectBivariateSpline(points[0], points[1], values[:])
result[idx_valid] = interp.ev(xi[idx_valid, 0], xi[idx_valid, 1])
result[np.logical_not(idx_valid)] = fill_value
return result.reshape(xi_shape[:-1])
| bsd-3-clause | ef2c5d2751858a77a25f1c500a579dbc | 39.323319 | 112 | 0.569396 | 3.954265 | false | false | false | false |
scipy/scipy | scipy/optimize/_linprog_doc.py | 1 | 61967 | # -*- coding: utf-8 -*-
"""
Created on Sat Aug 22 19:49:17 2020
@author: matth
"""
def _linprog_highs_doc(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None,
bounds=None, method='highs', callback=None,
maxiter=None, disp=False, presolve=True,
time_limit=None,
dual_feasibility_tolerance=None,
primal_feasibility_tolerance=None,
ipm_optimality_tolerance=None,
simplex_dual_edge_weight_strategy=None,
mip_rel_gap=None,
**unknown_options):
r"""
Linear programming: minimize a linear objective function subject to linear
equality and inequality constraints using one of the HiGHS solvers.
Linear programming solves problems of the following form:
.. math::
\min_x \ & c^T x \\
\mbox{such that} \ & A_{ub} x \leq b_{ub},\\
& A_{eq} x = b_{eq},\\
& l \leq x \leq u ,
where :math:`x` is a vector of decision variables; :math:`c`,
:math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and
:math:`A_{ub}` and :math:`A_{eq}` are matrices.
Alternatively, that's:
minimize::
c @ x
such that::
A_ub @ x <= b_ub
A_eq @ x == b_eq
lb <= x <= ub
Note that by default ``lb = 0`` and ``ub = None`` unless specified with
``bounds``.
Parameters
----------
c : 1-D array
The coefficients of the linear objective function to be minimized.
A_ub : 2-D array, optional
The inequality constraint matrix. Each row of ``A_ub`` specifies the
coefficients of a linear inequality constraint on ``x``.
b_ub : 1-D array, optional
The inequality constraint vector. Each element represents an
upper bound on the corresponding value of ``A_ub @ x``.
A_eq : 2-D array, optional
The equality constraint matrix. Each row of ``A_eq`` specifies the
coefficients of a linear equality constraint on ``x``.
b_eq : 1-D array, optional
The equality constraint vector. Each element of ``A_eq @ x`` must equal
the corresponding element of ``b_eq``.
bounds : sequence, optional
A sequence of ``(min, max)`` pairs for each element in ``x``, defining
the minimum and maximum values of that decision variable. Use ``None``
to indicate that there is no bound. By default, bounds are
``(0, None)`` (all decision variables are non-negative).
If a single tuple ``(min, max)`` is provided, then ``min`` and
``max`` will serve as bounds for all decision variables.
method : str
This is the method-specific documentation for 'highs', which chooses
automatically between
:ref:`'highs-ds' <optimize.linprog-highs-ds>` and
:ref:`'highs-ipm' <optimize.linprog-highs-ipm>`.
:ref:`'interior-point' <optimize.linprog-interior-point>` (default),
:ref:`'revised simplex' <optimize.linprog-revised_simplex>`, and
:ref:`'simplex' <optimize.linprog-simplex>` (legacy)
are also available.
integrality : 1-D array or int, optional
Indicates the type of integrality constraint on each decision variable.
``0`` : Continuous variable; no integrality constraint.
``1`` : Integer variable; decision variable must be an integer
within `bounds`.
``2`` : Semi-continuous variable; decision variable must be within
`bounds` or take value ``0``.
``3`` : Semi-integer variable; decision variable must be an integer
within `bounds` or take value ``0``.
By default, all variables are continuous.
For mixed integrality constraints, supply an array of shape `c.shape`.
To infer a constraint on each decision variable from shorter inputs,
the argument will be broadcasted to `c.shape` using `np.broadcast_to`.
This argument is currently used only by the ``'highs'`` method and
ignored otherwise.
Options
-------
maxiter : int
The maximum number of iterations to perform in either phase.
For :ref:`'highs-ipm' <optimize.linprog-highs-ipm>`, this does not
include the number of crossover iterations. Default is the largest
possible value for an ``int`` on the platform.
disp : bool (default: ``False``)
Set to ``True`` if indicators of optimization status are to be
printed to the console during optimization.
presolve : bool (default: ``True``)
Presolve attempts to identify trivial infeasibilities,
identify trivial unboundedness, and simplify the problem before
sending it to the main solver. It is generally recommended
to keep the default setting ``True``; set to ``False`` if
presolve is to be disabled.
time_limit : float
The maximum time in seconds allotted to solve the problem;
default is the largest possible value for a ``double`` on the
platform.
dual_feasibility_tolerance : double (default: 1e-07)
Dual feasibility tolerance for
:ref:`'highs-ds' <optimize.linprog-highs-ds>`.
The minimum of this and ``primal_feasibility_tolerance``
is used for the feasibility tolerance of
:ref:`'highs-ipm' <optimize.linprog-highs-ipm>`.
primal_feasibility_tolerance : double (default: 1e-07)
Primal feasibility tolerance for
:ref:`'highs-ds' <optimize.linprog-highs-ds>`.
The minimum of this and ``dual_feasibility_tolerance``
is used for the feasibility tolerance of
:ref:`'highs-ipm' <optimize.linprog-highs-ipm>`.
ipm_optimality_tolerance : double (default: ``1e-08``)
Optimality tolerance for
:ref:`'highs-ipm' <optimize.linprog-highs-ipm>`.
Minimum allowable value is 1e-12.
simplex_dual_edge_weight_strategy : str (default: None)
Strategy for simplex dual edge weights. The default, ``None``,
automatically selects one of the following.
``'dantzig'`` uses Dantzig's original strategy of choosing the most
negative reduced cost.
``'devex'`` uses the strategy described in [15]_.
``steepest`` uses the exact steepest edge strategy as described in
[16]_.
``'steepest-devex'`` begins with the exact steepest edge strategy
until the computation is too costly or inexact and then switches to
the devex method.
Curently, ``None`` always selects ``'steepest-devex'``, but this
may change as new options become available.
mip_rel_gap : double (default: None)
Termination criterion for MIP solver: solver will terminate when the
gap between the primal objective value and the dual objective bound,
scaled by the primal objective value, is <= mip_rel_gap.
unknown_options : dict
Optional arguments not used by this particular solver. If
``unknown_options`` is non-empty, a warning is issued listing
all unused options.
Returns
-------
res : OptimizeResult
A :class:`scipy.optimize.OptimizeResult` consisting of the fields:
x : 1D array
The values of the decision variables that minimizes the
objective function while satisfying the constraints.
fun : float
The optimal value of the objective function ``c @ x``.
slack : 1D array
The (nominally positive) values of the slack,
``b_ub - A_ub @ x``.
con : 1D array
The (nominally zero) residuals of the equality constraints,
``b_eq - A_eq @ x``.
success : bool
``True`` when the algorithm succeeds in finding an optimal
solution.
status : int
An integer representing the exit status of the algorithm.
``0`` : Optimization terminated successfully.
``1`` : Iteration or time limit reached.
``2`` : Problem appears to be infeasible.
``3`` : Problem appears to be unbounded.
``4`` : The HiGHS solver ran into a problem.
message : str
A string descriptor of the exit status of the algorithm.
nit : int
The total number of iterations performed.
For the HiGHS simplex method, this includes iterations in all
phases. For the HiGHS interior-point method, this does not include
crossover iterations.
crossover_nit : int
The number of primal/dual pushes performed during the
crossover routine for the HiGHS interior-point method.
This is ``0`` for the HiGHS simplex method.
ineqlin : OptimizeResult
Solution and sensitivity information corresponding to the
inequality constraints, `b_ub`. A dictionary consisting of the
fields:
residual : np.ndnarray
The (nominally positive) values of the slack variables,
``b_ub - A_ub @ x``. This quantity is also commonly
referred to as "slack".
marginals : np.ndarray
The sensitivity (partial derivative) of the objective
function with respect to the right-hand side of the
inequality constraints, `b_ub`.
eqlin : OptimizeResult
Solution and sensitivity information corresponding to the
equality constraints, `b_eq`. A dictionary consisting of the
fields:
residual : np.ndarray
The (nominally zero) residuals of the equality constraints,
``b_eq - A_eq @ x``.
marginals : np.ndarray
The sensitivity (partial derivative) of the objective
function with respect to the right-hand side of the
equality constraints, `b_eq`.
lower, upper : OptimizeResult
Solution and sensitivity information corresponding to the
lower and upper bounds on decision variables, `bounds`.
residual : np.ndarray
The (nominally positive) values of the quantity
``x - lb`` (lower) or ``ub - x`` (upper).
marginals : np.ndarray
The sensitivity (partial derivative) of the objective
function with respect to the lower and upper
`bounds`.
Notes
-----
Method :ref:`'highs-ds' <optimize.linprog-highs-ds>` is a wrapper
of the C++ high performance dual revised simplex implementation (HSOL)
[13]_, [14]_. Method :ref:`'highs-ipm' <optimize.linprog-highs-ipm>`
is a wrapper of a C++ implementation of an **i**\ nterior-\ **p**\ oint
**m**\ ethod [13]_; it features a crossover routine, so it is as accurate
as a simplex solver. Method :ref:`'highs' <optimize.linprog-highs>` chooses
between the two automatically. For new code involving `linprog`, we
recommend explicitly choosing one of these three method values instead of
:ref:`'interior-point' <optimize.linprog-interior-point>` (default),
:ref:`'revised simplex' <optimize.linprog-revised_simplex>`, and
:ref:`'simplex' <optimize.linprog-simplex>` (legacy).
The result fields `ineqlin`, `eqlin`, `lower`, and `upper` all contain
`marginals`, or partial derivatives of the objective function with respect
to the right-hand side of each constraint. These partial derivatives are
also referred to as "Lagrange multipliers", "dual values", and
"shadow prices". The sign convention of `marginals` is opposite that
of Lagrange multipliers produced by many nonlinear solvers.
References
----------
.. [13] Huangfu, Q., Galabova, I., Feldmeier, M., and Hall, J. A. J.
"HiGHS - high performance software for linear optimization."
https://highs.dev/
.. [14] Huangfu, Q. and Hall, J. A. J. "Parallelizing the dual revised
simplex method." Mathematical Programming Computation, 10 (1),
119-142, 2018. DOI: 10.1007/s12532-017-0130-5
.. [15] Harris, Paula MJ. "Pivot selection methods of the Devex LP code."
Mathematical programming 5.1 (1973): 1-28.
.. [16] Goldfarb, Donald, and John Ker Reid. "A practicable steepest-edge
simplex algorithm." Mathematical Programming 12.1 (1977): 361-371.
"""
pass
def _linprog_highs_ds_doc(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None,
bounds=None, method='highs-ds', callback=None,
maxiter=None, disp=False, presolve=True,
time_limit=None,
dual_feasibility_tolerance=None,
primal_feasibility_tolerance=None,
simplex_dual_edge_weight_strategy=None,
**unknown_options):
r"""
Linear programming: minimize a linear objective function subject to linear
equality and inequality constraints using the HiGHS dual simplex solver.
Linear programming solves problems of the following form:
.. math::
\min_x \ & c^T x \\
\mbox{such that} \ & A_{ub} x \leq b_{ub},\\
& A_{eq} x = b_{eq},\\
& l \leq x \leq u ,
where :math:`x` is a vector of decision variables; :math:`c`,
:math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and
:math:`A_{ub}` and :math:`A_{eq}` are matrices.
Alternatively, that's:
minimize::
c @ x
such that::
A_ub @ x <= b_ub
A_eq @ x == b_eq
lb <= x <= ub
Note that by default ``lb = 0`` and ``ub = None`` unless specified with
``bounds``.
Parameters
----------
c : 1-D array
The coefficients of the linear objective function to be minimized.
A_ub : 2-D array, optional
The inequality constraint matrix. Each row of ``A_ub`` specifies the
coefficients of a linear inequality constraint on ``x``.
b_ub : 1-D array, optional
The inequality constraint vector. Each element represents an
upper bound on the corresponding value of ``A_ub @ x``.
A_eq : 2-D array, optional
The equality constraint matrix. Each row of ``A_eq`` specifies the
coefficients of a linear equality constraint on ``x``.
b_eq : 1-D array, optional
The equality constraint vector. Each element of ``A_eq @ x`` must equal
the corresponding element of ``b_eq``.
bounds : sequence, optional
A sequence of ``(min, max)`` pairs for each element in ``x``, defining
the minimum and maximum values of that decision variable. Use ``None``
to indicate that there is no bound. By default, bounds are
``(0, None)`` (all decision variables are non-negative).
If a single tuple ``(min, max)`` is provided, then ``min`` and
``max`` will serve as bounds for all decision variables.
method : str
This is the method-specific documentation for 'highs-ds'.
:ref:`'highs' <optimize.linprog-highs>`,
:ref:`'highs-ipm' <optimize.linprog-highs-ipm>`,
:ref:`'interior-point' <optimize.linprog-interior-point>` (default),
:ref:`'revised simplex' <optimize.linprog-revised_simplex>`, and
:ref:`'simplex' <optimize.linprog-simplex>` (legacy)
are also available.
Options
-------
maxiter : int
The maximum number of iterations to perform in either phase.
Default is the largest possible value for an ``int`` on the platform.
disp : bool (default: ``False``)
Set to ``True`` if indicators of optimization status are to be
printed to the console during optimization.
presolve : bool (default: ``True``)
Presolve attempts to identify trivial infeasibilities,
identify trivial unboundedness, and simplify the problem before
sending it to the main solver. It is generally recommended
to keep the default setting ``True``; set to ``False`` if
presolve is to be disabled.
time_limit : float
The maximum time in seconds allotted to solve the problem;
default is the largest possible value for a ``double`` on the
platform.
dual_feasibility_tolerance : double (default: 1e-07)
Dual feasibility tolerance for
:ref:`'highs-ds' <optimize.linprog-highs-ds>`.
primal_feasibility_tolerance : double (default: 1e-07)
Primal feasibility tolerance for
:ref:`'highs-ds' <optimize.linprog-highs-ds>`.
simplex_dual_edge_weight_strategy : str (default: None)
Strategy for simplex dual edge weights. The default, ``None``,
automatically selects one of the following.
``'dantzig'`` uses Dantzig's original strategy of choosing the most
negative reduced cost.
``'devex'`` uses the strategy described in [15]_.
``steepest`` uses the exact steepest edge strategy as described in
[16]_.
``'steepest-devex'`` begins with the exact steepest edge strategy
until the computation is too costly or inexact and then switches to
the devex method.
Curently, ``None`` always selects ``'steepest-devex'``, but this
may change as new options become available.
unknown_options : dict
Optional arguments not used by this particular solver. If
``unknown_options`` is non-empty, a warning is issued listing
all unused options.
Returns
-------
res : OptimizeResult
A :class:`scipy.optimize.OptimizeResult` consisting of the fields:
x : 1D array
The values of the decision variables that minimizes the
objective function while satisfying the constraints.
fun : float
The optimal value of the objective function ``c @ x``.
slack : 1D array
The (nominally positive) values of the slack,
``b_ub - A_ub @ x``.
con : 1D array
The (nominally zero) residuals of the equality constraints,
``b_eq - A_eq @ x``.
success : bool
``True`` when the algorithm succeeds in finding an optimal
solution.
status : int
An integer representing the exit status of the algorithm.
``0`` : Optimization terminated successfully.
``1`` : Iteration or time limit reached.
``2`` : Problem appears to be infeasible.
``3`` : Problem appears to be unbounded.
``4`` : The HiGHS solver ran into a problem.
message : str
A string descriptor of the exit status of the algorithm.
nit : int
The total number of iterations performed. This includes iterations
in all phases.
crossover_nit : int
This is always ``0`` for the HiGHS simplex method.
For the HiGHS interior-point method, this is the number of
primal/dual pushes performed during the crossover routine.
ineqlin : OptimizeResult
Solution and sensitivity information corresponding to the
inequality constraints, `b_ub`. A dictionary consisting of the
fields:
residual : np.ndnarray
The (nominally positive) values of the slack variables,
``b_ub - A_ub @ x``. This quantity is also commonly
referred to as "slack".
marginals : np.ndarray
The sensitivity (partial derivative) of the objective
function with respect to the right-hand side of the
inequality constraints, `b_ub`.
eqlin : OptimizeResult
Solution and sensitivity information corresponding to the
equality constraints, `b_eq`. A dictionary consisting of the
fields:
residual : np.ndarray
The (nominally zero) residuals of the equality constraints,
``b_eq - A_eq @ x``.
marginals : np.ndarray
The sensitivity (partial derivative) of the objective
function with respect to the right-hand side of the
equality constraints, `b_eq`.
lower, upper : OptimizeResult
Solution and sensitivity information corresponding to the
lower and upper bounds on decision variables, `bounds`.
residual : np.ndarray
The (nominally positive) values of the quantity
``x - lb`` (lower) or ``ub - x`` (upper).
marginals : np.ndarray
The sensitivity (partial derivative) of the objective
function with respect to the lower and upper
`bounds`.
Notes
-----
Method :ref:`'highs-ds' <optimize.linprog-highs-ds>` is a wrapper
of the C++ high performance dual revised simplex implementation (HSOL)
[13]_, [14]_. Method :ref:`'highs-ipm' <optimize.linprog-highs-ipm>`
is a wrapper of a C++ implementation of an **i**\ nterior-\ **p**\ oint
**m**\ ethod [13]_; it features a crossover routine, so it is as accurate
as a simplex solver. Method :ref:`'highs' <optimize.linprog-highs>` chooses
between the two automatically. For new code involving `linprog`, we
recommend explicitly choosing one of these three method values instead of
:ref:`'interior-point' <optimize.linprog-interior-point>` (default),
:ref:`'revised simplex' <optimize.linprog-revised_simplex>`, and
:ref:`'simplex' <optimize.linprog-simplex>` (legacy).
The result fields `ineqlin`, `eqlin`, `lower`, and `upper` all contain
`marginals`, or partial derivatives of the objective function with respect
to the right-hand side of each constraint. These partial derivatives are
also referred to as "Lagrange multipliers", "dual values", and
"shadow prices". The sign convention of `marginals` is opposite that
of Lagrange multipliers produced by many nonlinear solvers.
References
----------
.. [13] Huangfu, Q., Galabova, I., Feldmeier, M., and Hall, J. A. J.
"HiGHS - high performance software for linear optimization."
https://highs.dev/
.. [14] Huangfu, Q. and Hall, J. A. J. "Parallelizing the dual revised
simplex method." Mathematical Programming Computation, 10 (1),
119-142, 2018. DOI: 10.1007/s12532-017-0130-5
.. [15] Harris, Paula MJ. "Pivot selection methods of the Devex LP code."
Mathematical programming 5.1 (1973): 1-28.
.. [16] Goldfarb, Donald, and John Ker Reid. "A practicable steepest-edge
simplex algorithm." Mathematical Programming 12.1 (1977): 361-371.
"""
pass
def _linprog_highs_ipm_doc(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None,
bounds=None, method='highs-ipm', callback=None,
maxiter=None, disp=False, presolve=True,
time_limit=None,
dual_feasibility_tolerance=None,
primal_feasibility_tolerance=None,
ipm_optimality_tolerance=None,
**unknown_options):
r"""
Linear programming: minimize a linear objective function subject to linear
equality and inequality constraints using the HiGHS interior point solver.
Linear programming solves problems of the following form:
.. math::
\min_x \ & c^T x \\
\mbox{such that} \ & A_{ub} x \leq b_{ub},\\
& A_{eq} x = b_{eq},\\
& l \leq x \leq u ,
where :math:`x` is a vector of decision variables; :math:`c`,
:math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and
:math:`A_{ub}` and :math:`A_{eq}` are matrices.
Alternatively, that's:
minimize::
c @ x
such that::
A_ub @ x <= b_ub
A_eq @ x == b_eq
lb <= x <= ub
Note that by default ``lb = 0`` and ``ub = None`` unless specified with
``bounds``.
Parameters
----------
c : 1-D array
The coefficients of the linear objective function to be minimized.
A_ub : 2-D array, optional
The inequality constraint matrix. Each row of ``A_ub`` specifies the
coefficients of a linear inequality constraint on ``x``.
b_ub : 1-D array, optional
The inequality constraint vector. Each element represents an
upper bound on the corresponding value of ``A_ub @ x``.
A_eq : 2-D array, optional
The equality constraint matrix. Each row of ``A_eq`` specifies the
coefficients of a linear equality constraint on ``x``.
b_eq : 1-D array, optional
The equality constraint vector. Each element of ``A_eq @ x`` must equal
the corresponding element of ``b_eq``.
bounds : sequence, optional
A sequence of ``(min, max)`` pairs for each element in ``x``, defining
the minimum and maximum values of that decision variable. Use ``None``
to indicate that there is no bound. By default, bounds are
``(0, None)`` (all decision variables are non-negative).
If a single tuple ``(min, max)`` is provided, then ``min`` and
``max`` will serve as bounds for all decision variables.
method : str
This is the method-specific documentation for 'highs-ipm'.
:ref:`'highs-ipm' <optimize.linprog-highs>`,
:ref:`'highs-ds' <optimize.linprog-highs-ds>`,
:ref:`'interior-point' <optimize.linprog-interior-point>` (default),
:ref:`'revised simplex' <optimize.linprog-revised_simplex>`, and
:ref:`'simplex' <optimize.linprog-simplex>` (legacy)
are also available.
Options
-------
maxiter : int
The maximum number of iterations to perform in either phase.
For :ref:`'highs-ipm' <optimize.linprog-highs-ipm>`, this does not
include the number of crossover iterations. Default is the largest
possible value for an ``int`` on the platform.
disp : bool (default: ``False``)
Set to ``True`` if indicators of optimization status are to be
printed to the console during optimization.
presolve : bool (default: ``True``)
Presolve attempts to identify trivial infeasibilities,
identify trivial unboundedness, and simplify the problem before
sending it to the main solver. It is generally recommended
to keep the default setting ``True``; set to ``False`` if
presolve is to be disabled.
time_limit : float
The maximum time in seconds allotted to solve the problem;
default is the largest possible value for a ``double`` on the
platform.
dual_feasibility_tolerance : double (default: 1e-07)
The minimum of this and ``primal_feasibility_tolerance``
is used for the feasibility tolerance of
:ref:`'highs-ipm' <optimize.linprog-highs-ipm>`.
primal_feasibility_tolerance : double (default: 1e-07)
The minimum of this and ``dual_feasibility_tolerance``
is used for the feasibility tolerance of
:ref:`'highs-ipm' <optimize.linprog-highs-ipm>`.
ipm_optimality_tolerance : double (default: ``1e-08``)
Optimality tolerance for
:ref:`'highs-ipm' <optimize.linprog-highs-ipm>`.
Minimum allowable value is 1e-12.
unknown_options : dict
Optional arguments not used by this particular solver. If
``unknown_options`` is non-empty, a warning is issued listing
all unused options.
Returns
-------
res : OptimizeResult
A :class:`scipy.optimize.OptimizeResult` consisting of the fields:
x : 1D array
The values of the decision variables that minimizes the
objective function while satisfying the constraints.
fun : float
The optimal value of the objective function ``c @ x``.
slack : 1D array
The (nominally positive) values of the slack,
``b_ub - A_ub @ x``.
con : 1D array
The (nominally zero) residuals of the equality constraints,
``b_eq - A_eq @ x``.
success : bool
``True`` when the algorithm succeeds in finding an optimal
solution.
status : int
An integer representing the exit status of the algorithm.
``0`` : Optimization terminated successfully.
``1`` : Iteration or time limit reached.
``2`` : Problem appears to be infeasible.
``3`` : Problem appears to be unbounded.
``4`` : The HiGHS solver ran into a problem.
message : str
A string descriptor of the exit status of the algorithm.
nit : int
The total number of iterations performed.
For the HiGHS interior-point method, this does not include
crossover iterations.
crossover_nit : int
The number of primal/dual pushes performed during the
crossover routine for the HiGHS interior-point method.
ineqlin : OptimizeResult
Solution and sensitivity information corresponding to the
inequality constraints, `b_ub`. A dictionary consisting of the
fields:
residual : np.ndnarray
The (nominally positive) values of the slack variables,
``b_ub - A_ub @ x``. This quantity is also commonly
referred to as "slack".
marginals : np.ndarray
The sensitivity (partial derivative) of the objective
function with respect to the right-hand side of the
inequality constraints, `b_ub`.
eqlin : OptimizeResult
Solution and sensitivity information corresponding to the
equality constraints, `b_eq`. A dictionary consisting of the
fields:
residual : np.ndarray
The (nominally zero) residuals of the equality constraints,
``b_eq - A_eq @ x``.
marginals : np.ndarray
The sensitivity (partial derivative) of the objective
function with respect to the right-hand side of the
equality constraints, `b_eq`.
lower, upper : OptimizeResult
Solution and sensitivity information corresponding to the
lower and upper bounds on decision variables, `bounds`.
residual : np.ndarray
The (nominally positive) values of the quantity
``x - lb`` (lower) or ``ub - x`` (upper).
marginals : np.ndarray
The sensitivity (partial derivative) of the objective
function with respect to the lower and upper
`bounds`.
Notes
-----
Method :ref:`'highs-ipm' <optimize.linprog-highs-ipm>`
is a wrapper of a C++ implementation of an **i**\ nterior-\ **p**\ oint
**m**\ ethod [13]_; it features a crossover routine, so it is as accurate
as a simplex solver.
Method :ref:`'highs-ds' <optimize.linprog-highs-ds>` is a wrapper
of the C++ high performance dual revised simplex implementation (HSOL)
[13]_, [14]_. Method :ref:`'highs' <optimize.linprog-highs>` chooses
between the two automatically. For new code involving `linprog`, we
recommend explicitly choosing one of these three method values instead of
:ref:`'interior-point' <optimize.linprog-interior-point>` (default),
:ref:`'revised simplex' <optimize.linprog-revised_simplex>`, and
:ref:`'simplex' <optimize.linprog-simplex>` (legacy).
The result fields `ineqlin`, `eqlin`, `lower`, and `upper` all contain
`marginals`, or partial derivatives of the objective function with respect
to the right-hand side of each constraint. These partial derivatives are
also referred to as "Lagrange multipliers", "dual values", and
"shadow prices". The sign convention of `marginals` is opposite that
of Lagrange multipliers produced by many nonlinear solvers.
References
----------
.. [13] Huangfu, Q., Galabova, I., Feldmeier, M., and Hall, J. A. J.
"HiGHS - high performance software for linear optimization."
https://highs.dev/
.. [14] Huangfu, Q. and Hall, J. A. J. "Parallelizing the dual revised
simplex method." Mathematical Programming Computation, 10 (1),
119-142, 2018. DOI: 10.1007/s12532-017-0130-5
"""
pass
def _linprog_ip_doc(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None,
bounds=None, method='interior-point', callback=None,
maxiter=1000, disp=False, presolve=True,
tol=1e-8, autoscale=False, rr=True,
alpha0=.99995, beta=0.1, sparse=False,
lstsq=False, sym_pos=True, cholesky=True, pc=True,
ip=False, permc_spec='MMD_AT_PLUS_A', **unknown_options):
r"""
Linear programming: minimize a linear objective function subject to linear
equality and inequality constraints using the interior-point method of
[4]_.
.. deprecated:: 1.9.0
`method='interior-point'` will be removed in SciPy 1.11.0.
It is replaced by `method='highs'` because the latter is
faster and more robust.
Linear programming solves problems of the following form:
.. math::
\min_x \ & c^T x \\
\mbox{such that} \ & A_{ub} x \leq b_{ub},\\
& A_{eq} x = b_{eq},\\
& l \leq x \leq u ,
where :math:`x` is a vector of decision variables; :math:`c`,
:math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and
:math:`A_{ub}` and :math:`A_{eq}` are matrices.
Alternatively, that's:
minimize::
c @ x
such that::
A_ub @ x <= b_ub
A_eq @ x == b_eq
lb <= x <= ub
Note that by default ``lb = 0`` and ``ub = None`` unless specified with
``bounds``.
Parameters
----------
c : 1-D array
The coefficients of the linear objective function to be minimized.
A_ub : 2-D array, optional
The inequality constraint matrix. Each row of ``A_ub`` specifies the
coefficients of a linear inequality constraint on ``x``.
b_ub : 1-D array, optional
The inequality constraint vector. Each element represents an
upper bound on the corresponding value of ``A_ub @ x``.
A_eq : 2-D array, optional
The equality constraint matrix. Each row of ``A_eq`` specifies the
coefficients of a linear equality constraint on ``x``.
b_eq : 1-D array, optional
The equality constraint vector. Each element of ``A_eq @ x`` must equal
the corresponding element of ``b_eq``.
bounds : sequence, optional
A sequence of ``(min, max)`` pairs for each element in ``x``, defining
the minimum and maximum values of that decision variable. Use ``None``
to indicate that there is no bound. By default, bounds are
``(0, None)`` (all decision variables are non-negative).
If a single tuple ``(min, max)`` is provided, then ``min`` and
``max`` will serve as bounds for all decision variables.
method : str
This is the method-specific documentation for 'interior-point'.
:ref:`'highs' <optimize.linprog-highs>`,
:ref:`'highs-ds' <optimize.linprog-highs-ds>`,
:ref:`'highs-ipm' <optimize.linprog-highs-ipm>`,
:ref:`'revised simplex' <optimize.linprog-revised_simplex>`, and
:ref:`'simplex' <optimize.linprog-simplex>` (legacy)
are also available.
callback : callable, optional
Callback function to be executed once per iteration.
Options
-------
maxiter : int (default: 1000)
The maximum number of iterations of the algorithm.
disp : bool (default: False)
Set to ``True`` if indicators of optimization status are to be printed
to the console each iteration.
presolve : bool (default: True)
Presolve attempts to identify trivial infeasibilities,
identify trivial unboundedness, and simplify the problem before
sending it to the main solver. It is generally recommended
to keep the default setting ``True``; set to ``False`` if
presolve is to be disabled.
tol : float (default: 1e-8)
Termination tolerance to be used for all termination criteria;
see [4]_ Section 4.5.
autoscale : bool (default: False)
Set to ``True`` to automatically perform equilibration.
Consider using this option if the numerical values in the
constraints are separated by several orders of magnitude.
rr : bool (default: True)
Set to ``False`` to disable automatic redundancy removal.
alpha0 : float (default: 0.99995)
The maximal step size for Mehrota's predictor-corrector search
direction; see :math:`\beta_{3}` of [4]_ Table 8.1.
beta : float (default: 0.1)
The desired reduction of the path parameter :math:`\mu` (see [6]_)
when Mehrota's predictor-corrector is not in use (uncommon).
sparse : bool (default: False)
Set to ``True`` if the problem is to be treated as sparse after
presolve. If either ``A_eq`` or ``A_ub`` is a sparse matrix,
this option will automatically be set ``True``, and the problem
will be treated as sparse even during presolve. If your constraint
matrices contain mostly zeros and the problem is not very small (less
than about 100 constraints or variables), consider setting ``True``
or providing ``A_eq`` and ``A_ub`` as sparse matrices.
lstsq : bool (default: ``False``)
Set to ``True`` if the problem is expected to be very poorly
conditioned. This should always be left ``False`` unless severe
numerical difficulties are encountered. Leave this at the default
unless you receive a warning message suggesting otherwise.
sym_pos : bool (default: True)
Leave ``True`` if the problem is expected to yield a well conditioned
symmetric positive definite normal equation matrix
(almost always). Leave this at the default unless you receive
a warning message suggesting otherwise.
cholesky : bool (default: True)
Set to ``True`` if the normal equations are to be solved by explicit
Cholesky decomposition followed by explicit forward/backward
substitution. This is typically faster for problems
that are numerically well-behaved.
pc : bool (default: True)
Leave ``True`` if the predictor-corrector method of Mehrota is to be
used. This is almost always (if not always) beneficial.
ip : bool (default: False)
Set to ``True`` if the improved initial point suggestion due to [4]_
Section 4.3 is desired. Whether this is beneficial or not
depends on the problem.
permc_spec : str (default: 'MMD_AT_PLUS_A')
(Has effect only with ``sparse = True``, ``lstsq = False``, ``sym_pos =
True``, and no SuiteSparse.)
A matrix is factorized in each iteration of the algorithm.
This option specifies how to permute the columns of the matrix for
sparsity preservation. Acceptable values are:
- ``NATURAL``: natural ordering.
- ``MMD_ATA``: minimum degree ordering on the structure of A^T A.
- ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A.
- ``COLAMD``: approximate minimum degree column ordering.
This option can impact the convergence of the
interior point algorithm; test different values to determine which
performs best for your problem. For more information, refer to
``scipy.sparse.linalg.splu``.
unknown_options : dict
Optional arguments not used by this particular solver. If
`unknown_options` is non-empty a warning is issued listing all
unused options.
Returns
-------
res : OptimizeResult
A :class:`scipy.optimize.OptimizeResult` consisting of the fields:
x : 1-D array
The values of the decision variables that minimizes the
objective function while satisfying the constraints.
fun : float
The optimal value of the objective function ``c @ x``.
slack : 1-D array
The (nominally positive) values of the slack variables,
``b_ub - A_ub @ x``.
con : 1-D array
The (nominally zero) residuals of the equality constraints,
``b_eq - A_eq @ x``.
success : bool
``True`` when the algorithm succeeds in finding an optimal
solution.
status : int
An integer representing the exit status of the algorithm.
``0`` : Optimization terminated successfully.
``1`` : Iteration limit reached.
``2`` : Problem appears to be infeasible.
``3`` : Problem appears to be unbounded.
``4`` : Numerical difficulties encountered.
message : str
A string descriptor of the exit status of the algorithm.
nit : int
The total number of iterations performed in all phases.
Notes
-----
This method implements the algorithm outlined in [4]_ with ideas from [8]_
and a structure inspired by the simpler methods of [6]_.
The primal-dual path following method begins with initial 'guesses' of
the primal and dual variables of the standard form problem and iteratively
attempts to solve the (nonlinear) Karush-Kuhn-Tucker conditions for the
problem with a gradually reduced logarithmic barrier term added to the
objective. This particular implementation uses a homogeneous self-dual
formulation, which provides certificates of infeasibility or unboundedness
where applicable.
The default initial point for the primal and dual variables is that
defined in [4]_ Section 4.4 Equation 8.22. Optionally (by setting initial
point option ``ip=True``), an alternate (potentially improved) starting
point can be calculated according to the additional recommendations of
[4]_ Section 4.4.
A search direction is calculated using the predictor-corrector method
(single correction) proposed by Mehrota and detailed in [4]_ Section 4.1.
(A potential improvement would be to implement the method of multiple
corrections described in [4]_ Section 4.2.) In practice, this is
accomplished by solving the normal equations, [4]_ Section 5.1 Equations
8.31 and 8.32, derived from the Newton equations [4]_ Section 5 Equations
8.25 (compare to [4]_ Section 4 Equations 8.6-8.8). The advantage of
solving the normal equations rather than 8.25 directly is that the
matrices involved are symmetric positive definite, so Cholesky
decomposition can be used rather than the more expensive LU factorization.
With default options, the solver used to perform the factorization depends
on third-party software availability and the conditioning of the problem.
For dense problems, solvers are tried in the following order:
1. ``scipy.linalg.cho_factor``
2. ``scipy.linalg.solve`` with option ``sym_pos=True``
3. ``scipy.linalg.solve`` with option ``sym_pos=False``
4. ``scipy.linalg.lstsq``
For sparse problems:
1. ``sksparse.cholmod.cholesky`` (if scikit-sparse and SuiteSparse are
installed)
2. ``scipy.sparse.linalg.factorized`` (if scikit-umfpack and SuiteSparse
are installed)
3. ``scipy.sparse.linalg.splu`` (which uses SuperLU distributed with SciPy)
4. ``scipy.sparse.linalg.lsqr``
If the solver fails for any reason, successively more robust (but slower)
solvers are attempted in the order indicated. Attempting, failing, and
re-starting factorization can be time consuming, so if the problem is
numerically challenging, options can be set to bypass solvers that are
failing. Setting ``cholesky=False`` skips to solver 2,
``sym_pos=False`` skips to solver 3, and ``lstsq=True`` skips
to solver 4 for both sparse and dense problems.
Potential improvements for combatting issues associated with dense
columns in otherwise sparse problems are outlined in [4]_ Section 5.3 and
[10]_ Section 4.1-4.2; the latter also discusses the alleviation of
accuracy issues associated with the substitution approach to free
variables.
After calculating the search direction, the maximum possible step size
that does not activate the non-negativity constraints is calculated, and
the smaller of this step size and unity is applied (as in [4]_ Section
4.1.) [4]_ Section 4.3 suggests improvements for choosing the step size.
The new point is tested according to the termination conditions of [4]_
Section 4.5. The same tolerance, which can be set using the ``tol`` option,
is used for all checks. (A potential improvement would be to expose
the different tolerances to be set independently.) If optimality,
unboundedness, or infeasibility is detected, the solve procedure
terminates; otherwise it repeats.
Whereas the top level ``linprog`` module expects a problem of form:
Minimize::
c @ x
Subject to::
A_ub @ x <= b_ub
A_eq @ x == b_eq
lb <= x <= ub
where ``lb = 0`` and ``ub = None`` unless set in ``bounds``. The problem
is automatically converted to the form:
Minimize::
c @ x
Subject to::
A @ x == b
x >= 0
for solution. That is, the original problem contains equality, upper-bound
and variable constraints whereas the method specific solver requires
equality constraints and variable non-negativity. ``linprog`` converts the
original problem to standard form by converting the simple bounds to upper
bound constraints, introducing non-negative slack variables for inequality
constraints, and expressing unbounded variables as the difference between
two non-negative variables. The problem is converted back to the original
form before results are reported.
References
----------
.. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point
optimizer for linear programming: an implementation of the
homogeneous algorithm." High performance optimization. Springer US,
2000. 197-232.
.. [6] Freund, Robert M. "Primal-Dual Interior-Point Methods for Linear
Programming based on Newton's Method." Unpublished Course Notes,
March 2004. Available 2/25/2017 at
https://ocw.mit.edu/courses/sloan-school-of-management/15-084j-nonlinear-programming-spring-2004/lecture-notes/lec14_int_pt_mthd.pdf
.. [8] Andersen, Erling D., and Knud D. Andersen. "Presolving in linear
programming." Mathematical Programming 71.2 (1995): 221-245.
.. [9] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear
programming." Athena Scientific 1 (1997): 997.
.. [10] Andersen, Erling D., et al. Implementation of interior point
methods for large scale linear programming. HEC/Universite de
Geneve, 1996.
"""
pass
def _linprog_rs_doc(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None,
bounds=None, method='interior-point', callback=None,
x0=None, maxiter=5000, disp=False, presolve=True,
tol=1e-12, autoscale=False, rr=True, maxupdate=10,
mast=False, pivot="mrc", **unknown_options):
r"""
Linear programming: minimize a linear objective function subject to linear
equality and inequality constraints using the revised simplex method.
.. deprecated:: 1.9.0
`method='revised simplex'` will be removed in SciPy 1.11.0.
It is replaced by `method='highs'` because the latter is
faster and more robust.
Linear programming solves problems of the following form:
.. math::
\min_x \ & c^T x \\
\mbox{such that} \ & A_{ub} x \leq b_{ub},\\
& A_{eq} x = b_{eq},\\
& l \leq x \leq u ,
where :math:`x` is a vector of decision variables; :math:`c`,
:math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and
:math:`A_{ub}` and :math:`A_{eq}` are matrices.
Alternatively, that's:
minimize::
c @ x
such that::
A_ub @ x <= b_ub
A_eq @ x == b_eq
lb <= x <= ub
Note that by default ``lb = 0`` and ``ub = None`` unless specified with
``bounds``.
Parameters
----------
c : 1-D array
The coefficients of the linear objective function to be minimized.
A_ub : 2-D array, optional
The inequality constraint matrix. Each row of ``A_ub`` specifies the
coefficients of a linear inequality constraint on ``x``.
b_ub : 1-D array, optional
The inequality constraint vector. Each element represents an
upper bound on the corresponding value of ``A_ub @ x``.
A_eq : 2-D array, optional
The equality constraint matrix. Each row of ``A_eq`` specifies the
coefficients of a linear equality constraint on ``x``.
b_eq : 1-D array, optional
The equality constraint vector. Each element of ``A_eq @ x`` must equal
the corresponding element of ``b_eq``.
bounds : sequence, optional
A sequence of ``(min, max)`` pairs for each element in ``x``, defining
the minimum and maximum values of that decision variable. Use ``None``
to indicate that there is no bound. By default, bounds are
``(0, None)`` (all decision variables are non-negative).
If a single tuple ``(min, max)`` is provided, then ``min`` and
``max`` will serve as bounds for all decision variables.
method : str
This is the method-specific documentation for 'revised simplex'.
:ref:`'highs' <optimize.linprog-highs>`,
:ref:`'highs-ds' <optimize.linprog-highs-ds>`,
:ref:`'highs-ipm' <optimize.linprog-highs-ipm>`,
:ref:`'interior-point' <optimize.linprog-interior-point>` (default),
and :ref:`'simplex' <optimize.linprog-simplex>` (legacy)
are also available.
callback : callable, optional
Callback function to be executed once per iteration.
x0 : 1-D array, optional
Guess values of the decision variables, which will be refined by
the optimization algorithm. This argument is currently used only by the
'revised simplex' method, and can only be used if `x0` represents a
basic feasible solution.
Options
-------
maxiter : int (default: 5000)
The maximum number of iterations to perform in either phase.
disp : bool (default: False)
Set to ``True`` if indicators of optimization status are to be printed
to the console each iteration.
presolve : bool (default: True)
Presolve attempts to identify trivial infeasibilities,
identify trivial unboundedness, and simplify the problem before
sending it to the main solver. It is generally recommended
to keep the default setting ``True``; set to ``False`` if
presolve is to be disabled.
tol : float (default: 1e-12)
The tolerance which determines when a solution is "close enough" to
zero in Phase 1 to be considered a basic feasible solution or close
enough to positive to serve as an optimal solution.
autoscale : bool (default: False)
Set to ``True`` to automatically perform equilibration.
Consider using this option if the numerical values in the
constraints are separated by several orders of magnitude.
rr : bool (default: True)
Set to ``False`` to disable automatic redundancy removal.
maxupdate : int (default: 10)
The maximum number of updates performed on the LU factorization.
After this many updates is reached, the basis matrix is factorized
from scratch.
mast : bool (default: False)
Minimize Amortized Solve Time. If enabled, the average time to solve
a linear system using the basis factorization is measured. Typically,
the average solve time will decrease with each successive solve after
initial factorization, as factorization takes much more time than the
solve operation (and updates). Eventually, however, the updated
factorization becomes sufficiently complex that the average solve time
begins to increase. When this is detected, the basis is refactorized
from scratch. Enable this option to maximize speed at the risk of
nondeterministic behavior. Ignored if ``maxupdate`` is 0.
pivot : "mrc" or "bland" (default: "mrc")
Pivot rule: Minimum Reduced Cost ("mrc") or Bland's rule ("bland").
Choose Bland's rule if iteration limit is reached and cycling is
suspected.
unknown_options : dict
Optional arguments not used by this particular solver. If
`unknown_options` is non-empty a warning is issued listing all
unused options.
Returns
-------
res : OptimizeResult
A :class:`scipy.optimize.OptimizeResult` consisting of the fields:
x : 1-D array
The values of the decision variables that minimizes the
objective function while satisfying the constraints.
fun : float
The optimal value of the objective function ``c @ x``.
slack : 1-D array
The (nominally positive) values of the slack variables,
``b_ub - A_ub @ x``.
con : 1-D array
The (nominally zero) residuals of the equality constraints,
``b_eq - A_eq @ x``.
success : bool
``True`` when the algorithm succeeds in finding an optimal
solution.
status : int
An integer representing the exit status of the algorithm.
``0`` : Optimization terminated successfully.
``1`` : Iteration limit reached.
``2`` : Problem appears to be infeasible.
``3`` : Problem appears to be unbounded.
``4`` : Numerical difficulties encountered.
``5`` : Problem has no constraints; turn presolve on.
``6`` : Invalid guess provided.
message : str
A string descriptor of the exit status of the algorithm.
nit : int
The total number of iterations performed in all phases.
Notes
-----
Method *revised simplex* uses the revised simplex method as described in
[9]_, except that a factorization [11]_ of the basis matrix, rather than
its inverse, is efficiently maintained and used to solve the linear systems
at each iteration of the algorithm.
References
----------
.. [9] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear
programming." Athena Scientific 1 (1997): 997.
.. [11] Bartels, Richard H. "A stabilization of the simplex method."
Journal in Numerische Mathematik 16.5 (1971): 414-434.
"""
pass
def _linprog_simplex_doc(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None,
bounds=None, method='interior-point', callback=None,
maxiter=5000, disp=False, presolve=True,
tol=1e-12, autoscale=False, rr=True, bland=False,
**unknown_options):
r"""
Linear programming: minimize a linear objective function subject to linear
equality and inequality constraints using the tableau-based simplex method.
.. deprecated:: 1.9.0
`method='simplex'` will be removed in SciPy 1.11.0.
It is replaced by `method='highs'` because the latter is
faster and more robust.
Linear programming solves problems of the following form:
.. math::
\min_x \ & c^T x \\
\mbox{such that} \ & A_{ub} x \leq b_{ub},\\
& A_{eq} x = b_{eq},\\
& l \leq x \leq u ,
where :math:`x` is a vector of decision variables; :math:`c`,
:math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and
:math:`A_{ub}` and :math:`A_{eq}` are matrices.
Alternatively, that's:
minimize::
c @ x
such that::
A_ub @ x <= b_ub
A_eq @ x == b_eq
lb <= x <= ub
Note that by default ``lb = 0`` and ``ub = None`` unless specified with
``bounds``.
Parameters
----------
c : 1-D array
The coefficients of the linear objective function to be minimized.
A_ub : 2-D array, optional
The inequality constraint matrix. Each row of ``A_ub`` specifies the
coefficients of a linear inequality constraint on ``x``.
b_ub : 1-D array, optional
The inequality constraint vector. Each element represents an
upper bound on the corresponding value of ``A_ub @ x``.
A_eq : 2-D array, optional
The equality constraint matrix. Each row of ``A_eq`` specifies the
coefficients of a linear equality constraint on ``x``.
b_eq : 1-D array, optional
The equality constraint vector. Each element of ``A_eq @ x`` must equal
the corresponding element of ``b_eq``.
bounds : sequence, optional
A sequence of ``(min, max)`` pairs for each element in ``x``, defining
the minimum and maximum values of that decision variable. Use ``None``
to indicate that there is no bound. By default, bounds are
``(0, None)`` (all decision variables are non-negative).
If a single tuple ``(min, max)`` is provided, then ``min`` and
``max`` will serve as bounds for all decision variables.
method : str
This is the method-specific documentation for 'simplex'.
:ref:`'highs' <optimize.linprog-highs>`,
:ref:`'highs-ds' <optimize.linprog-highs-ds>`,
:ref:`'highs-ipm' <optimize.linprog-highs-ipm>`,
:ref:`'interior-point' <optimize.linprog-interior-point>` (default),
and :ref:`'revised simplex' <optimize.linprog-revised_simplex>`
are also available.
callback : callable, optional
Callback function to be executed once per iteration.
Options
-------
maxiter : int (default: 5000)
The maximum number of iterations to perform in either phase.
disp : bool (default: False)
Set to ``True`` if indicators of optimization status are to be printed
to the console each iteration.
presolve : bool (default: True)
Presolve attempts to identify trivial infeasibilities,
identify trivial unboundedness, and simplify the problem before
sending it to the main solver. It is generally recommended
to keep the default setting ``True``; set to ``False`` if
presolve is to be disabled.
tol : float (default: 1e-12)
The tolerance which determines when a solution is "close enough" to
zero in Phase 1 to be considered a basic feasible solution or close
enough to positive to serve as an optimal solution.
autoscale : bool (default: False)
Set to ``True`` to automatically perform equilibration.
Consider using this option if the numerical values in the
constraints are separated by several orders of magnitude.
rr : bool (default: True)
Set to ``False`` to disable automatic redundancy removal.
bland : bool
If True, use Bland's anti-cycling rule [3]_ to choose pivots to
prevent cycling. If False, choose pivots which should lead to a
converged solution more quickly. The latter method is subject to
cycling (non-convergence) in rare instances.
unknown_options : dict
Optional arguments not used by this particular solver. If
`unknown_options` is non-empty a warning is issued listing all
unused options.
Returns
-------
res : OptimizeResult
A :class:`scipy.optimize.OptimizeResult` consisting of the fields:
x : 1-D array
The values of the decision variables that minimizes the
objective function while satisfying the constraints.
fun : float
The optimal value of the objective function ``c @ x``.
slack : 1-D array
The (nominally positive) values of the slack variables,
``b_ub - A_ub @ x``.
con : 1-D array
The (nominally zero) residuals of the equality constraints,
``b_eq - A_eq @ x``.
success : bool
``True`` when the algorithm succeeds in finding an optimal
solution.
status : int
An integer representing the exit status of the algorithm.
``0`` : Optimization terminated successfully.
``1`` : Iteration limit reached.
``2`` : Problem appears to be infeasible.
``3`` : Problem appears to be unbounded.
``4`` : Numerical difficulties encountered.
message : str
A string descriptor of the exit status of the algorithm.
nit : int
The total number of iterations performed in all phases.
References
----------
.. [1] Dantzig, George B., Linear programming and extensions. Rand
Corporation Research Study Princeton Univ. Press, Princeton, NJ,
1963
.. [2] Hillier, S.H. and Lieberman, G.J. (1995), "Introduction to
Mathematical Programming", McGraw-Hill, Chapter 4.
.. [3] Bland, Robert G. New finite pivoting rules for the simplex method.
Mathematics of Operations Research (2), 1977: pp. 103-107.
"""
pass
| bsd-3-clause | dc654454a971be029d40d9eff6bbe7c5 | 42.182578 | 143 | 0.629351 | 4.172581 | false | false | false | false |
scipy/scipy | scipy/special/_orthogonal.py | 1 | 73921 | """
A collection of functions to find the weights and abscissas for
Gaussian Quadrature.
These calculations are done by finding the eigenvalues of a
tridiagonal matrix whose entries are dependent on the coefficients
in the recursion formula for the orthogonal polynomials with the
corresponding weighting function over the interval.
Many recursion relations for orthogonal polynomials are given:
.. math::
a1n f_{n+1} (x) = (a2n + a3n x ) f_n (x) - a4n f_{n-1} (x)
The recursion relation of interest is
.. math::
P_{n+1} (x) = (x - A_n) P_n (x) - B_n P_{n-1} (x)
where :math:`P` has a different normalization than :math:`f`.
The coefficients can be found as:
.. math::
A_n = -a2n / a3n
\\qquad
B_n = ( a4n / a3n \\sqrt{h_n-1 / h_n})^2
where
.. math::
h_n = \\int_a^b w(x) f_n(x)^2
assume:
.. math::
P_0 (x) = 1
\\qquad
P_{-1} (x) == 0
For the mathematical background, see [golub.welsch-1969-mathcomp]_ and
[abramowitz.stegun-1965]_.
References
----------
.. [golub.welsch-1969-mathcomp]
Golub, Gene H, and John H Welsch. 1969. Calculation of Gauss
Quadrature Rules. *Mathematics of Computation* 23, 221-230+s1--s10.
.. [abramowitz.stegun-1965]
Abramowitz, Milton, and Irene A Stegun. (1965) *Handbook of
Mathematical Functions: with Formulas, Graphs, and Mathematical
Tables*. Gaithersburg, MD: National Bureau of Standards.
http://www.math.sfu.ca/~cbm/aands/
.. [townsend.trogdon.olver-2014]
Townsend, A. and Trogdon, T. and Olver, S. (2014)
*Fast computation of Gauss quadrature nodes and
weights on the whole real line*. :arXiv:`1410.5286`.
.. [townsend.trogdon.olver-2015]
Townsend, A. and Trogdon, T. and Olver, S. (2015)
*Fast computation of Gauss quadrature nodes and
weights on the whole real line*.
IMA Journal of Numerical Analysis
:doi:`10.1093/imanum/drv002`.
"""
#
# Author: Travis Oliphant 2000
# Updated Sep. 2003 (fixed bugs --- tested to be accurate)
# SciPy imports.
import numpy as np
from numpy import (exp, inf, pi, sqrt, floor, sin, cos, around,
hstack, arccos, arange)
from scipy import linalg
from scipy.special import airy
# Local imports.
from . import _ufuncs
_gam = _ufuncs.gamma
# There is no .pyi file for _specfun
from . import _specfun # type: ignore
_polyfuns = ['legendre', 'chebyt', 'chebyu', 'chebyc', 'chebys',
'jacobi', 'laguerre', 'genlaguerre', 'hermite',
'hermitenorm', 'gegenbauer', 'sh_legendre', 'sh_chebyt',
'sh_chebyu', 'sh_jacobi']
# Correspondence between new and old names of root functions
_rootfuns_map = {'roots_legendre': 'p_roots',
'roots_chebyt': 't_roots',
'roots_chebyu': 'u_roots',
'roots_chebyc': 'c_roots',
'roots_chebys': 's_roots',
'roots_jacobi': 'j_roots',
'roots_laguerre': 'l_roots',
'roots_genlaguerre': 'la_roots',
'roots_hermite': 'h_roots',
'roots_hermitenorm': 'he_roots',
'roots_gegenbauer': 'cg_roots',
'roots_sh_legendre': 'ps_roots',
'roots_sh_chebyt': 'ts_roots',
'roots_sh_chebyu': 'us_roots',
'roots_sh_jacobi': 'js_roots'}
__all__ = _polyfuns + list(_rootfuns_map.keys())
class orthopoly1d(np.poly1d):
def __init__(self, roots, weights=None, hn=1.0, kn=1.0, wfunc=None,
limits=None, monic=False, eval_func=None):
equiv_weights = [weights[k] / wfunc(roots[k]) for
k in range(len(roots))]
mu = sqrt(hn)
if monic:
evf = eval_func
if evf:
knn = kn
eval_func = lambda x: evf(x) / knn
mu = mu / abs(kn)
kn = 1.0
# compute coefficients from roots, then scale
poly = np.poly1d(roots, r=True)
np.poly1d.__init__(self, poly.coeffs * float(kn))
self.weights = np.array(list(zip(roots, weights, equiv_weights)))
self.weight_func = wfunc
self.limits = limits
self.normcoef = mu
# Note: eval_func will be discarded on arithmetic
self._eval_func = eval_func
def __call__(self, v):
if self._eval_func and not isinstance(v, np.poly1d):
return self._eval_func(v)
else:
return np.poly1d.__call__(self, v)
def _scale(self, p):
if p == 1.0:
return
self._coeffs *= p
evf = self._eval_func
if evf:
self._eval_func = lambda x: evf(x) * p
self.normcoef *= p
def _gen_roots_and_weights(n, mu0, an_func, bn_func, f, df, symmetrize, mu):
"""[x,w] = gen_roots_and_weights(n,an_func,sqrt_bn_func,mu)
Returns the roots (x) of an nth order orthogonal polynomial,
and weights (w) to use in appropriate Gaussian quadrature with that
orthogonal polynomial.
The polynomials have the recurrence relation
P_n+1(x) = (x - A_n) P_n(x) - B_n P_n-1(x)
an_func(n) should return A_n
sqrt_bn_func(n) should return sqrt(B_n)
mu ( = h_0 ) is the integral of the weight over the orthogonal
interval
"""
k = np.arange(n, dtype='d')
c = np.zeros((2, n))
c[0,1:] = bn_func(k[1:])
c[1,:] = an_func(k)
x = linalg.eigvals_banded(c, overwrite_a_band=True)
# improve roots by one application of Newton's method
y = f(n, x)
dy = df(n, x)
x -= y/dy
# fm and dy may contain very large/small values, so we
# log-normalize them to maintain precision in the product fm*dy
fm = f(n-1, x)
log_fm = np.log(np.abs(fm))
log_dy = np.log(np.abs(dy))
fm /= np.exp((log_fm.max() + log_fm.min()) / 2.)
dy /= np.exp((log_dy.max() + log_dy.min()) / 2.)
w = 1.0 / (fm * dy)
if symmetrize:
w = (w + w[::-1]) / 2
x = (x - x[::-1]) / 2
w *= mu0 / w.sum()
if mu:
return x, w, mu0
else:
return x, w
# Jacobi Polynomials 1 P^(alpha,beta)_n(x)
def roots_jacobi(n, alpha, beta, mu=False):
r"""Gauss-Jacobi quadrature.
Compute the sample points and weights for Gauss-Jacobi
quadrature. The sample points are the roots of the nth degree
Jacobi polynomial, :math:`P^{\alpha, \beta}_n(x)`. These sample
points and weights correctly integrate polynomials of degree
:math:`2n - 1` or less over the interval :math:`[-1, 1]` with
weight function :math:`w(x) = (1 - x)^{\alpha} (1 +
x)^{\beta}`. See 22.2.1 in [AS]_ for details.
Parameters
----------
n : int
quadrature order
alpha : float
alpha must be > -1
beta : float
beta must be > -1
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
scipy.integrate.quadrature
scipy.integrate.fixed_quad
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
"""
m = int(n)
if n < 1 or n != m:
raise ValueError("n must be a positive integer.")
if alpha <= -1 or beta <= -1:
raise ValueError("alpha and beta must be greater than -1.")
if alpha == 0.0 and beta == 0.0:
return roots_legendre(m, mu)
if alpha == beta:
return roots_gegenbauer(m, alpha+0.5, mu)
if (alpha + beta) <= 1000:
mu0 = 2.0**(alpha+beta+1) * _ufuncs.beta(alpha+1, beta+1)
else:
# Avoid overflows in pow and beta for very large parameters
mu0 = np.exp((alpha + beta + 1) * np.log(2.0)
+ _ufuncs.betaln(alpha+1, beta+1))
a = alpha
b = beta
if a + b == 0.0:
an_func = lambda k: np.where(k == 0, (b-a)/(2+a+b), 0.0)
else:
an_func = lambda k: np.where(k == 0, (b-a)/(2+a+b),
(b*b - a*a) / ((2.0*k+a+b)*(2.0*k+a+b+2)))
bn_func = lambda k: 2.0 / (2.0*k+a+b)*np.sqrt((k+a)*(k+b) / (2*k+a+b+1)) \
* np.where(k == 1, 1.0, np.sqrt(k*(k+a+b) / (2.0*k+a+b-1)))
f = lambda n, x: _ufuncs.eval_jacobi(n, a, b, x)
df = lambda n, x: (0.5 * (n + a + b + 1)
* _ufuncs.eval_jacobi(n-1, a+1, b+1, x))
return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, False, mu)
def jacobi(n, alpha, beta, monic=False):
r"""Jacobi polynomial.
Defined to be the solution of
.. math::
(1 - x^2)\frac{d^2}{dx^2}P_n^{(\alpha, \beta)}
+ (\beta - \alpha - (\alpha + \beta + 2)x)
\frac{d}{dx}P_n^{(\alpha, \beta)}
+ n(n + \alpha + \beta + 1)P_n^{(\alpha, \beta)} = 0
for :math:`\alpha, \beta > -1`; :math:`P_n^{(\alpha, \beta)}` is a
polynomial of degree :math:`n`.
Parameters
----------
n : int
Degree of the polynomial.
alpha : float
Parameter, must be greater than -1.
beta : float
Parameter, must be greater than -1.
monic : bool, optional
If `True`, scale the leading coefficient to be 1. Default is
`False`.
Returns
-------
P : orthopoly1d
Jacobi polynomial.
Notes
-----
For fixed :math:`\alpha, \beta`, the polynomials
:math:`P_n^{(\alpha, \beta)}` are orthogonal over :math:`[-1, 1]`
with weight function :math:`(1 - x)^\alpha(1 + x)^\beta`.
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
Examples
--------
The Jacobi polynomials satisfy the recurrence relation:
.. math::
P_n^{(\alpha, \beta-1)}(x) - P_n^{(\alpha-1, \beta)}(x)
= P_{n-1}^{(\alpha, \beta)}(x)
This can be verified, for example, for :math:`\alpha = \beta = 2`
and :math:`n = 1` over the interval :math:`[-1, 1]`:
>>> import numpy as np
>>> from scipy.special import jacobi
>>> x = np.arange(-1.0, 1.0, 0.01)
>>> np.allclose(jacobi(0, 2, 2)(x),
... jacobi(1, 2, 1)(x) - jacobi(1, 1, 2)(x))
True
Plot of the Jacobi polynomial :math:`P_5^{(\alpha, -0.5)}` for
different values of :math:`\alpha`:
>>> import matplotlib.pyplot as plt
>>> x = np.arange(-1.0, 1.0, 0.01)
>>> fig, ax = plt.subplots()
>>> ax.set_ylim(-2.0, 2.0)
>>> ax.set_title(r'Jacobi polynomials $P_5^{(\alpha, -0.5)}$')
>>> for alpha in np.arange(0, 4, 1):
... ax.plot(x, jacobi(5, alpha, -0.5)(x), label=rf'$\alpha={alpha}$')
>>> plt.legend(loc='best')
>>> plt.show()
"""
if n < 0:
raise ValueError("n must be nonnegative.")
wfunc = lambda x: (1 - x)**alpha * (1 + x)**beta
if n == 0:
return orthopoly1d([], [], 1.0, 1.0, wfunc, (-1, 1), monic,
eval_func=np.ones_like)
x, w, mu = roots_jacobi(n, alpha, beta, mu=True)
ab1 = alpha + beta + 1.0
hn = 2**ab1 / (2 * n + ab1) * _gam(n + alpha + 1)
hn *= _gam(n + beta + 1.0) / _gam(n + 1) / _gam(n + ab1)
kn = _gam(2 * n + ab1) / 2.0**n / _gam(n + 1) / _gam(n + ab1)
# here kn = coefficient on x^n term
p = orthopoly1d(x, w, hn, kn, wfunc, (-1, 1), monic,
lambda x: _ufuncs.eval_jacobi(n, alpha, beta, x))
return p
# Jacobi Polynomials shifted G_n(p,q,x)
def roots_sh_jacobi(n, p1, q1, mu=False):
"""Gauss-Jacobi (shifted) quadrature.
Compute the sample points and weights for Gauss-Jacobi (shifted)
quadrature. The sample points are the roots of the nth degree
shifted Jacobi polynomial, :math:`G^{p,q}_n(x)`. These sample
points and weights correctly integrate polynomials of degree
:math:`2n - 1` or less over the interval :math:`[0, 1]` with
weight function :math:`w(x) = (1 - x)^{p-q} x^{q-1}`. See 22.2.2
in [AS]_ for details.
Parameters
----------
n : int
quadrature order
p1 : float
(p1 - q1) must be > -1
q1 : float
q1 must be > 0
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
scipy.integrate.quadrature
scipy.integrate.fixed_quad
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
"""
if (p1-q1) <= -1 or q1 <= 0:
raise ValueError("(p - q) must be greater than -1, and q must be greater than 0.")
x, w, m = roots_jacobi(n, p1-q1, q1-1, True)
x = (x + 1) / 2
scale = 2.0**p1
w /= scale
m /= scale
if mu:
return x, w, m
else:
return x, w
def sh_jacobi(n, p, q, monic=False):
r"""Shifted Jacobi polynomial.
Defined by
.. math::
G_n^{(p, q)}(x)
= \binom{2n + p - 1}{n}^{-1}P_n^{(p - q, q - 1)}(2x - 1),
where :math:`P_n^{(\cdot, \cdot)}` is the nth Jacobi polynomial.
Parameters
----------
n : int
Degree of the polynomial.
p : float
Parameter, must have :math:`p > q - 1`.
q : float
Parameter, must be greater than 0.
monic : bool, optional
If `True`, scale the leading coefficient to be 1. Default is
`False`.
Returns
-------
G : orthopoly1d
Shifted Jacobi polynomial.
Notes
-----
For fixed :math:`p, q`, the polynomials :math:`G_n^{(p, q)}` are
orthogonal over :math:`[0, 1]` with weight function :math:`(1 -
x)^{p - q}x^{q - 1}`.
"""
if n < 0:
raise ValueError("n must be nonnegative.")
wfunc = lambda x: (1.0 - x)**(p - q) * (x)**(q - 1.)
if n == 0:
return orthopoly1d([], [], 1.0, 1.0, wfunc, (-1, 1), monic,
eval_func=np.ones_like)
n1 = n
x, w = roots_sh_jacobi(n1, p, q)
hn = _gam(n + 1) * _gam(n + q) * _gam(n + p) * _gam(n + p - q + 1)
hn /= (2 * n + p) * (_gam(2 * n + p)**2)
# kn = 1.0 in standard form so monic is redundant. Kept for compatibility.
kn = 1.0
pp = orthopoly1d(x, w, hn, kn, wfunc=wfunc, limits=(0, 1), monic=monic,
eval_func=lambda x: _ufuncs.eval_sh_jacobi(n, p, q, x))
return pp
# Generalized Laguerre L^(alpha)_n(x)
def roots_genlaguerre(n, alpha, mu=False):
r"""Gauss-generalized Laguerre quadrature.
Compute the sample points and weights for Gauss-generalized
Laguerre quadrature. The sample points are the roots of the nth
degree generalized Laguerre polynomial, :math:`L^{\alpha}_n(x)`.
These sample points and weights correctly integrate polynomials of
degree :math:`2n - 1` or less over the interval :math:`[0,
\infty]` with weight function :math:`w(x) = x^{\alpha}
e^{-x}`. See 22.3.9 in [AS]_ for details.
Parameters
----------
n : int
quadrature order
alpha : float
alpha must be > -1
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
scipy.integrate.quadrature
scipy.integrate.fixed_quad
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
"""
m = int(n)
if n < 1 or n != m:
raise ValueError("n must be a positive integer.")
if alpha < -1:
raise ValueError("alpha must be greater than -1.")
mu0 = _ufuncs.gamma(alpha + 1)
if m == 1:
x = np.array([alpha+1.0], 'd')
w = np.array([mu0], 'd')
if mu:
return x, w, mu0
else:
return x, w
an_func = lambda k: 2 * k + alpha + 1
bn_func = lambda k: -np.sqrt(k * (k + alpha))
f = lambda n, x: _ufuncs.eval_genlaguerre(n, alpha, x)
df = lambda n, x: (n*_ufuncs.eval_genlaguerre(n, alpha, x)
- (n + alpha)*_ufuncs.eval_genlaguerre(n-1, alpha, x))/x
return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, False, mu)
def genlaguerre(n, alpha, monic=False):
r"""Generalized (associated) Laguerre polynomial.
Defined to be the solution of
.. math::
x\frac{d^2}{dx^2}L_n^{(\alpha)}
+ (\alpha + 1 - x)\frac{d}{dx}L_n^{(\alpha)}
+ nL_n^{(\alpha)} = 0,
where :math:`\alpha > -1`; :math:`L_n^{(\alpha)}` is a polynomial
of degree :math:`n`.
Parameters
----------
n : int
Degree of the polynomial.
alpha : float
Parameter, must be greater than -1.
monic : bool, optional
If `True`, scale the leading coefficient to be 1. Default is
`False`.
Returns
-------
L : orthopoly1d
Generalized Laguerre polynomial.
Notes
-----
For fixed :math:`\alpha`, the polynomials :math:`L_n^{(\alpha)}`
are orthogonal over :math:`[0, \infty)` with weight function
:math:`e^{-x}x^\alpha`.
The Laguerre polynomials are the special case where :math:`\alpha
= 0`.
See Also
--------
laguerre : Laguerre polynomial.
hyp1f1 : confluent hypergeometric function
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
Examples
--------
The generalized Laguerre polynomials are closely related to the confluent
hypergeometric function :math:`{}_1F_1`:
.. math::
L_n^{(\alpha)} = \binom{n + \alpha}{n} {}_1F_1(-n, \alpha +1, x)
This can be verified, for example, for :math:`n = \alpha = 3` over the
interval :math:`[-1, 1]`:
>>> import numpy as np
>>> from scipy.special import binom
>>> from scipy.special import genlaguerre
>>> from scipy.special import hyp1f1
>>> x = np.arange(-1.0, 1.0, 0.01)
>>> np.allclose(genlaguerre(3, 3)(x), binom(6, 3) * hyp1f1(-3, 4, x))
True
This is the plot of the generalized Laguerre polynomials
:math:`L_3^{(\alpha)}` for some values of :math:`\alpha`:
>>> import matplotlib.pyplot as plt
>>> x = np.arange(-4.0, 12.0, 0.01)
>>> fig, ax = plt.subplots()
>>> ax.set_ylim(-5.0, 10.0)
>>> ax.set_title(r'Generalized Laguerre polynomials $L_3^{\alpha}$')
>>> for alpha in np.arange(0, 5):
... ax.plot(x, genlaguerre(3, alpha)(x), label=rf'$L_3^{(alpha)}$')
>>> plt.legend(loc='best')
>>> plt.show()
"""
if alpha <= -1:
raise ValueError("alpha must be > -1")
if n < 0:
raise ValueError("n must be nonnegative.")
if n == 0:
n1 = n + 1
else:
n1 = n
x, w = roots_genlaguerre(n1, alpha)
wfunc = lambda x: exp(-x) * x**alpha
if n == 0:
x, w = [], []
hn = _gam(n + alpha + 1) / _gam(n + 1)
kn = (-1)**n / _gam(n + 1)
p = orthopoly1d(x, w, hn, kn, wfunc, (0, inf), monic,
lambda x: _ufuncs.eval_genlaguerre(n, alpha, x))
return p
# Laguerre L_n(x)
def roots_laguerre(n, mu=False):
r"""Gauss-Laguerre quadrature.
Compute the sample points and weights for Gauss-Laguerre
quadrature. The sample points are the roots of the nth degree
Laguerre polynomial, :math:`L_n(x)`. These sample points and
weights correctly integrate polynomials of degree :math:`2n - 1`
or less over the interval :math:`[0, \infty]` with weight function
:math:`w(x) = e^{-x}`. See 22.2.13 in [AS]_ for details.
Parameters
----------
n : int
quadrature order
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
scipy.integrate.quadrature
scipy.integrate.fixed_quad
numpy.polynomial.laguerre.laggauss
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
"""
return roots_genlaguerre(n, 0.0, mu=mu)
def laguerre(n, monic=False):
r"""Laguerre polynomial.
Defined to be the solution of
.. math::
x\frac{d^2}{dx^2}L_n + (1 - x)\frac{d}{dx}L_n + nL_n = 0;
:math:`L_n` is a polynomial of degree :math:`n`.
Parameters
----------
n : int
Degree of the polynomial.
monic : bool, optional
If `True`, scale the leading coefficient to be 1. Default is
`False`.
Returns
-------
L : orthopoly1d
Laguerre Polynomial.
Notes
-----
The polynomials :math:`L_n` are orthogonal over :math:`[0,
\infty)` with weight function :math:`e^{-x}`.
See Also
--------
genlaguerre : Generalized (associated) Laguerre polynomial.
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
Examples
--------
The Laguerre polynomials :math:`L_n` are the special case
:math:`\alpha = 0` of the generalized Laguerre polynomials
:math:`L_n^{(\alpha)}`.
Let's verify it on the interval :math:`[-1, 1]`:
>>> import numpy as np
>>> from scipy.special import genlaguerre
>>> from scipy.special import laguerre
>>> x = np.arange(-1.0, 1.0, 0.01)
>>> np.allclose(genlaguerre(3, 0)(x), laguerre(3)(x))
True
The polynomials :math:`L_n` also satisfy the recurrence relation:
.. math::
(n + 1)L_{n+1}(x) = (2n +1 -x)L_n(x) - nL_{n-1}(x)
This can be easily checked on :math:`[0, 1]` for :math:`n = 3`:
>>> x = np.arange(0.0, 1.0, 0.01)
>>> np.allclose(4 * laguerre(4)(x),
... (7 - x) * laguerre(3)(x) - 3 * laguerre(2)(x))
True
This is the plot of the first few Laguerre polynomials :math:`L_n`:
>>> import matplotlib.pyplot as plt
>>> x = np.arange(-1.0, 5.0, 0.01)
>>> fig, ax = plt.subplots()
>>> ax.set_ylim(-5.0, 5.0)
>>> ax.set_title(r'Laguerre polynomials $L_n$')
>>> for n in np.arange(0, 5):
... ax.plot(x, laguerre(n)(x), label=rf'$L_{n}$')
>>> plt.legend(loc='best')
>>> plt.show()
"""
if n < 0:
raise ValueError("n must be nonnegative.")
if n == 0:
n1 = n + 1
else:
n1 = n
x, w = roots_laguerre(n1)
if n == 0:
x, w = [], []
hn = 1.0
kn = (-1)**n / _gam(n + 1)
p = orthopoly1d(x, w, hn, kn, lambda x: exp(-x), (0, inf), monic,
lambda x: _ufuncs.eval_laguerre(n, x))
return p
# Hermite 1 H_n(x)
def roots_hermite(n, mu=False):
r"""Gauss-Hermite (physicist's) quadrature.
Compute the sample points and weights for Gauss-Hermite
quadrature. The sample points are the roots of the nth degree
Hermite polynomial, :math:`H_n(x)`. These sample points and
weights correctly integrate polynomials of degree :math:`2n - 1`
or less over the interval :math:`[-\infty, \infty]` with weight
function :math:`w(x) = e^{-x^2}`. See 22.2.14 in [AS]_ for
details.
Parameters
----------
n : int
quadrature order
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
Notes
-----
For small n up to 150 a modified version of the Golub-Welsch
algorithm is used. Nodes are computed from the eigenvalue
problem and improved by one step of a Newton iteration.
The weights are computed from the well-known analytical formula.
For n larger than 150 an optimal asymptotic algorithm is applied
which computes nodes and weights in a numerically stable manner.
The algorithm has linear runtime making computation for very
large n (several thousand or more) feasible.
See Also
--------
scipy.integrate.quadrature
scipy.integrate.fixed_quad
numpy.polynomial.hermite.hermgauss
roots_hermitenorm
References
----------
.. [townsend.trogdon.olver-2014]
Townsend, A. and Trogdon, T. and Olver, S. (2014)
*Fast computation of Gauss quadrature nodes and
weights on the whole real line*. :arXiv:`1410.5286`.
.. [townsend.trogdon.olver-2015]
Townsend, A. and Trogdon, T. and Olver, S. (2015)
*Fast computation of Gauss quadrature nodes and
weights on the whole real line*.
IMA Journal of Numerical Analysis
:doi:`10.1093/imanum/drv002`.
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
"""
m = int(n)
if n < 1 or n != m:
raise ValueError("n must be a positive integer.")
mu0 = np.sqrt(np.pi)
if n <= 150:
an_func = lambda k: 0.0*k
bn_func = lambda k: np.sqrt(k/2.0)
f = _ufuncs.eval_hermite
df = lambda n, x: 2.0 * n * _ufuncs.eval_hermite(n-1, x)
return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, True, mu)
else:
nodes, weights = _roots_hermite_asy(m)
if mu:
return nodes, weights, mu0
else:
return nodes, weights
def _compute_tauk(n, k, maxit=5):
"""Helper function for Tricomi initial guesses
For details, see formula 3.1 in lemma 3.1 in the
original paper.
Parameters
----------
n : int
Quadrature order
k : ndarray of type int
Index of roots :math:`\tau_k` to compute
maxit : int
Number of Newton maxit performed, the default
value of 5 is sufficient.
Returns
-------
tauk : ndarray
Roots of equation 3.1
See Also
--------
initial_nodes_a
roots_hermite_asy
"""
a = n % 2 - 0.5
c = (4.0*floor(n/2.0) - 4.0*k + 3.0)*pi / (4.0*floor(n/2.0) + 2.0*a + 2.0)
f = lambda x: x - sin(x) - c
df = lambda x: 1.0 - cos(x)
xi = 0.5*pi
for i in range(maxit):
xi = xi - f(xi)/df(xi)
return xi
def _initial_nodes_a(n, k):
r"""Tricomi initial guesses
Computes an initial approximation to the square of the `k`-th
(positive) root :math:`x_k` of the Hermite polynomial :math:`H_n`
of order :math:`n`. The formula is the one from lemma 3.1 in the
original paper. The guesses are accurate except in the region
near :math:`\sqrt{2n + 1}`.
Parameters
----------
n : int
Quadrature order
k : ndarray of type int
Index of roots to compute
Returns
-------
xksq : ndarray
Square of the approximate roots
See Also
--------
initial_nodes
roots_hermite_asy
"""
tauk = _compute_tauk(n, k)
sigk = cos(0.5*tauk)**2
a = n % 2 - 0.5
nu = 4.0*floor(n/2.0) + 2.0*a + 2.0
# Initial approximation of Hermite roots (square)
xksq = nu*sigk - 1.0/(3.0*nu) * (5.0/(4.0*(1.0-sigk)**2) - 1.0/(1.0-sigk) - 0.25)
return xksq
def _initial_nodes_b(n, k):
r"""Gatteschi initial guesses
Computes an initial approximation to the square of the kth
(positive) root :math:`x_k` of the Hermite polynomial :math:`H_n`
of order :math:`n`. The formula is the one from lemma 3.2 in the
original paper. The guesses are accurate in the region just
below :math:`\sqrt{2n + 1}`.
Parameters
----------
n : int
Quadrature order
k : ndarray of type int
Index of roots to compute
Returns
-------
xksq : ndarray
Square of the approximate root
See Also
--------
initial_nodes
roots_hermite_asy
"""
a = n % 2 - 0.5
nu = 4.0*floor(n/2.0) + 2.0*a + 2.0
# Airy roots by approximation
ak = _specfun.airyzo(k.max(), 1)[0][::-1]
# Initial approximation of Hermite roots (square)
xksq = (nu +
2.0**(2.0/3.0) * ak * nu**(1.0/3.0) +
1.0/5.0 * 2.0**(4.0/3.0) * ak**2 * nu**(-1.0/3.0) +
(9.0/140.0 - 12.0/175.0 * ak**3) * nu**(-1.0) +
(16.0/1575.0 * ak + 92.0/7875.0 * ak**4) * 2.0**(2.0/3.0) * nu**(-5.0/3.0) -
(15152.0/3031875.0 * ak**5 + 1088.0/121275.0 * ak**2) * 2.0**(1.0/3.0) * nu**(-7.0/3.0))
return xksq
def _initial_nodes(n):
"""Initial guesses for the Hermite roots
Computes an initial approximation to the non-negative
roots :math:`x_k` of the Hermite polynomial :math:`H_n`
of order :math:`n`. The Tricomi and Gatteschi initial
guesses are used in the region where they are accurate.
Parameters
----------
n : int
Quadrature order
Returns
-------
xk : ndarray
Approximate roots
See Also
--------
roots_hermite_asy
"""
# Turnover point
# linear polynomial fit to error of 10, 25, 40, ..., 1000 point rules
fit = 0.49082003*n - 4.37859653
turnover = around(fit).astype(int)
# Compute all approximations
ia = arange(1, int(floor(n*0.5)+1))
ib = ia[::-1]
xasq = _initial_nodes_a(n, ia[:turnover+1])
xbsq = _initial_nodes_b(n, ib[turnover+1:])
# Combine
iv = sqrt(hstack([xasq, xbsq]))
# Central node is always zero
if n % 2 == 1:
iv = hstack([0.0, iv])
return iv
def _pbcf(n, theta):
r"""Asymptotic series expansion of parabolic cylinder function
The implementation is based on sections 3.2 and 3.3 from the
original paper. Compared to the published version this code
adds one more term to the asymptotic series. The detailed
formulas can be found at [parabolic-asymptotics]_. The evaluation
is done in a transformed variable :math:`\theta := \arccos(t)`
where :math:`t := x / \mu` and :math:`\mu := \sqrt{2n + 1}`.
Parameters
----------
n : int
Quadrature order
theta : ndarray
Transformed position variable
Returns
-------
U : ndarray
Value of the parabolic cylinder function :math:`U(a, \theta)`.
Ud : ndarray
Value of the derivative :math:`U^{\prime}(a, \theta)` of
the parabolic cylinder function.
See Also
--------
roots_hermite_asy
References
----------
.. [parabolic-asymptotics]
https://dlmf.nist.gov/12.10#vii
"""
st = sin(theta)
ct = cos(theta)
# https://dlmf.nist.gov/12.10#vii
mu = 2.0*n + 1.0
# https://dlmf.nist.gov/12.10#E23
eta = 0.5*theta - 0.5*st*ct
# https://dlmf.nist.gov/12.10#E39
zeta = -(3.0*eta/2.0) ** (2.0/3.0)
# https://dlmf.nist.gov/12.10#E40
phi = (-zeta / st**2) ** (0.25)
# Coefficients
# https://dlmf.nist.gov/12.10#E43
a0 = 1.0
a1 = 0.10416666666666666667
a2 = 0.08355034722222222222
a3 = 0.12822657455632716049
a4 = 0.29184902646414046425
a5 = 0.88162726744375765242
b0 = 1.0
b1 = -0.14583333333333333333
b2 = -0.09874131944444444444
b3 = -0.14331205391589506173
b4 = -0.31722720267841354810
b5 = -0.94242914795712024914
# Polynomials
# https://dlmf.nist.gov/12.10#E9
# https://dlmf.nist.gov/12.10#E10
ctp = ct ** arange(16).reshape((-1,1))
u0 = 1.0
u1 = (1.0*ctp[3,:] - 6.0*ct) / 24.0
u2 = (-9.0*ctp[4,:] + 249.0*ctp[2,:] + 145.0) / 1152.0
u3 = (-4042.0*ctp[9,:] + 18189.0*ctp[7,:] - 28287.0*ctp[5,:] - 151995.0*ctp[3,:] - 259290.0*ct) / 414720.0
u4 = (72756.0*ctp[10,:] - 321339.0*ctp[8,:] - 154982.0*ctp[6,:] + 50938215.0*ctp[4,:] + 122602962.0*ctp[2,:] + 12773113.0) / 39813120.0
u5 = (82393456.0*ctp[15,:] - 617950920.0*ctp[13,:] + 1994971575.0*ctp[11,:] - 3630137104.0*ctp[9,:] + 4433574213.0*ctp[7,:]
- 37370295816.0*ctp[5,:] - 119582875013.0*ctp[3,:] - 34009066266.0*ct) / 6688604160.0
v0 = 1.0
v1 = (1.0*ctp[3,:] + 6.0*ct) / 24.0
v2 = (15.0*ctp[4,:] - 327.0*ctp[2,:] - 143.0) / 1152.0
v3 = (-4042.0*ctp[9,:] + 18189.0*ctp[7,:] - 36387.0*ctp[5,:] + 238425.0*ctp[3,:] + 259290.0*ct) / 414720.0
v4 = (-121260.0*ctp[10,:] + 551733.0*ctp[8,:] - 151958.0*ctp[6,:] - 57484425.0*ctp[4,:] - 132752238.0*ctp[2,:] - 12118727) / 39813120.0
v5 = (82393456.0*ctp[15,:] - 617950920.0*ctp[13,:] + 2025529095.0*ctp[11,:] - 3750839308.0*ctp[9,:] + 3832454253.0*ctp[7,:]
+ 35213253348.0*ctp[5,:] + 130919230435.0*ctp[3,:] + 34009066266*ct) / 6688604160.0
# Airy Evaluation (Bi and Bip unused)
Ai, Aip, Bi, Bip = airy(mu**(4.0/6.0) * zeta)
# Prefactor for U
P = 2.0*sqrt(pi) * mu**(1.0/6.0) * phi
# Terms for U
# https://dlmf.nist.gov/12.10#E42
phip = phi ** arange(6, 31, 6).reshape((-1,1))
A0 = b0*u0
A1 = (b2*u0 + phip[0,:]*b1*u1 + phip[1,:]*b0*u2) / zeta**3
A2 = (b4*u0 + phip[0,:]*b3*u1 + phip[1,:]*b2*u2 + phip[2,:]*b1*u3 + phip[3,:]*b0*u4) / zeta**6
B0 = -(a1*u0 + phip[0,:]*a0*u1) / zeta**2
B1 = -(a3*u0 + phip[0,:]*a2*u1 + phip[1,:]*a1*u2 + phip[2,:]*a0*u3) / zeta**5
B2 = -(a5*u0 + phip[0,:]*a4*u1 + phip[1,:]*a3*u2 + phip[2,:]*a2*u3 + phip[3,:]*a1*u4 + phip[4,:]*a0*u5) / zeta**8
# U
# https://dlmf.nist.gov/12.10#E35
U = P * (Ai * (A0 + A1/mu**2.0 + A2/mu**4.0) +
Aip * (B0 + B1/mu**2.0 + B2/mu**4.0) / mu**(8.0/6.0))
# Prefactor for derivative of U
Pd = sqrt(2.0*pi) * mu**(2.0/6.0) / phi
# Terms for derivative of U
# https://dlmf.nist.gov/12.10#E46
C0 = -(b1*v0 + phip[0,:]*b0*v1) / zeta
C1 = -(b3*v0 + phip[0,:]*b2*v1 + phip[1,:]*b1*v2 + phip[2,:]*b0*v3) / zeta**4
C2 = -(b5*v0 + phip[0,:]*b4*v1 + phip[1,:]*b3*v2 + phip[2,:]*b2*v3 + phip[3,:]*b1*v4 + phip[4,:]*b0*v5) / zeta**7
D0 = a0*v0
D1 = (a2*v0 + phip[0,:]*a1*v1 + phip[1,:]*a0*v2) / zeta**3
D2 = (a4*v0 + phip[0,:]*a3*v1 + phip[1,:]*a2*v2 + phip[2,:]*a1*v3 + phip[3,:]*a0*v4) / zeta**6
# Derivative of U
# https://dlmf.nist.gov/12.10#E36
Ud = Pd * (Ai * (C0 + C1/mu**2.0 + C2/mu**4.0) / mu**(4.0/6.0) +
Aip * (D0 + D1/mu**2.0 + D2/mu**4.0))
return U, Ud
def _newton(n, x_initial, maxit=5):
"""Newton iteration for polishing the asymptotic approximation
to the zeros of the Hermite polynomials.
Parameters
----------
n : int
Quadrature order
x_initial : ndarray
Initial guesses for the roots
maxit : int
Maximal number of Newton iterations.
The default 5 is sufficient, usually
only one or two steps are needed.
Returns
-------
nodes : ndarray
Quadrature nodes
weights : ndarray
Quadrature weights
See Also
--------
roots_hermite_asy
"""
# Variable transformation
mu = sqrt(2.0*n + 1.0)
t = x_initial / mu
theta = arccos(t)
# Newton iteration
for i in range(maxit):
u, ud = _pbcf(n, theta)
dtheta = u / (sqrt(2.0) * mu * sin(theta) * ud)
theta = theta + dtheta
if max(abs(dtheta)) < 1e-14:
break
# Undo variable transformation
x = mu * cos(theta)
# Central node is always zero
if n % 2 == 1:
x[0] = 0.0
# Compute weights
w = exp(-x**2) / (2.0*ud**2)
return x, w
def _roots_hermite_asy(n):
r"""Gauss-Hermite (physicist's) quadrature for large n.
Computes the sample points and weights for Gauss-Hermite quadrature.
The sample points are the roots of the nth degree Hermite polynomial,
:math:`H_n(x)`. These sample points and weights correctly integrate
polynomials of degree :math:`2n - 1` or less over the interval
:math:`[-\infty, \infty]` with weight function :math:`f(x) = e^{-x^2}`.
This method relies on asymptotic expansions which work best for n > 150.
The algorithm has linear runtime making computation for very large n
feasible.
Parameters
----------
n : int
quadrature order
Returns
-------
nodes : ndarray
Quadrature nodes
weights : ndarray
Quadrature weights
See Also
--------
roots_hermite
References
----------
.. [townsend.trogdon.olver-2014]
Townsend, A. and Trogdon, T. and Olver, S. (2014)
*Fast computation of Gauss quadrature nodes and
weights on the whole real line*. :arXiv:`1410.5286`.
.. [townsend.trogdon.olver-2015]
Townsend, A. and Trogdon, T. and Olver, S. (2015)
*Fast computation of Gauss quadrature nodes and
weights on the whole real line*.
IMA Journal of Numerical Analysis
:doi:`10.1093/imanum/drv002`.
"""
iv = _initial_nodes(n)
nodes, weights = _newton(n, iv)
# Combine with negative parts
if n % 2 == 0:
nodes = hstack([-nodes[::-1], nodes])
weights = hstack([weights[::-1], weights])
else:
nodes = hstack([-nodes[-1:0:-1], nodes])
weights = hstack([weights[-1:0:-1], weights])
# Scale weights
weights *= sqrt(pi) / sum(weights)
return nodes, weights
def hermite(n, monic=False):
r"""Physicist's Hermite polynomial.
Defined by
.. math::
H_n(x) = (-1)^ne^{x^2}\frac{d^n}{dx^n}e^{-x^2};
:math:`H_n` is a polynomial of degree :math:`n`.
Parameters
----------
n : int
Degree of the polynomial.
monic : bool, optional
If `True`, scale the leading coefficient to be 1. Default is
`False`.
Returns
-------
H : orthopoly1d
Hermite polynomial.
Notes
-----
The polynomials :math:`H_n` are orthogonal over :math:`(-\infty,
\infty)` with weight function :math:`e^{-x^2}`.
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> p_monic = special.hermite(3, monic=True)
>>> p_monic
poly1d([ 1. , 0. , -1.5, 0. ])
>>> p_monic(1)
-0.49999999999999983
>>> x = np.linspace(-3, 3, 400)
>>> y = p_monic(x)
>>> plt.plot(x, y)
>>> plt.title("Monic Hermite polynomial of degree 3")
>>> plt.xlabel("x")
>>> plt.ylabel("H_3(x)")
>>> plt.show()
"""
if n < 0:
raise ValueError("n must be nonnegative.")
if n == 0:
n1 = n + 1
else:
n1 = n
x, w = roots_hermite(n1)
wfunc = lambda x: exp(-x * x)
if n == 0:
x, w = [], []
hn = 2**n * _gam(n + 1) * sqrt(pi)
kn = 2**n
p = orthopoly1d(x, w, hn, kn, wfunc, (-inf, inf), monic,
lambda x: _ufuncs.eval_hermite(n, x))
return p
# Hermite 2 He_n(x)
def roots_hermitenorm(n, mu=False):
r"""Gauss-Hermite (statistician's) quadrature.
Compute the sample points and weights for Gauss-Hermite
quadrature. The sample points are the roots of the nth degree
Hermite polynomial, :math:`He_n(x)`. These sample points and
weights correctly integrate polynomials of degree :math:`2n - 1`
or less over the interval :math:`[-\infty, \infty]` with weight
function :math:`w(x) = e^{-x^2/2}`. See 22.2.15 in [AS]_ for more
details.
Parameters
----------
n : int
quadrature order
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
Notes
-----
For small n up to 150 a modified version of the Golub-Welsch
algorithm is used. Nodes are computed from the eigenvalue
problem and improved by one step of a Newton iteration.
The weights are computed from the well-known analytical formula.
For n larger than 150 an optimal asymptotic algorithm is used
which computes nodes and weights in a numerical stable manner.
The algorithm has linear runtime making computation for very
large n (several thousand or more) feasible.
See Also
--------
scipy.integrate.quadrature
scipy.integrate.fixed_quad
numpy.polynomial.hermite_e.hermegauss
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
"""
m = int(n)
if n < 1 or n != m:
raise ValueError("n must be a positive integer.")
mu0 = np.sqrt(2.0*np.pi)
if n <= 150:
an_func = lambda k: 0.0*k
bn_func = lambda k: np.sqrt(k)
f = _ufuncs.eval_hermitenorm
df = lambda n, x: n * _ufuncs.eval_hermitenorm(n-1, x)
return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, True, mu)
else:
nodes, weights = _roots_hermite_asy(m)
# Transform
nodes *= sqrt(2)
weights *= sqrt(2)
if mu:
return nodes, weights, mu0
else:
return nodes, weights
def hermitenorm(n, monic=False):
r"""Normalized (probabilist's) Hermite polynomial.
Defined by
.. math::
He_n(x) = (-1)^ne^{x^2/2}\frac{d^n}{dx^n}e^{-x^2/2};
:math:`He_n` is a polynomial of degree :math:`n`.
Parameters
----------
n : int
Degree of the polynomial.
monic : bool, optional
If `True`, scale the leading coefficient to be 1. Default is
`False`.
Returns
-------
He : orthopoly1d
Hermite polynomial.
Notes
-----
The polynomials :math:`He_n` are orthogonal over :math:`(-\infty,
\infty)` with weight function :math:`e^{-x^2/2}`.
"""
if n < 0:
raise ValueError("n must be nonnegative.")
if n == 0:
n1 = n + 1
else:
n1 = n
x, w = roots_hermitenorm(n1)
wfunc = lambda x: exp(-x * x / 2.0)
if n == 0:
x, w = [], []
hn = sqrt(2 * pi) * _gam(n + 1)
kn = 1.0
p = orthopoly1d(x, w, hn, kn, wfunc=wfunc, limits=(-inf, inf), monic=monic,
eval_func=lambda x: _ufuncs.eval_hermitenorm(n, x))
return p
# The remainder of the polynomials can be derived from the ones above.
# Ultraspherical (Gegenbauer) C^(alpha)_n(x)
def roots_gegenbauer(n, alpha, mu=False):
r"""Gauss-Gegenbauer quadrature.
Compute the sample points and weights for Gauss-Gegenbauer
quadrature. The sample points are the roots of the nth degree
Gegenbauer polynomial, :math:`C^{\alpha}_n(x)`. These sample
points and weights correctly integrate polynomials of degree
:math:`2n - 1` or less over the interval :math:`[-1, 1]` with
weight function :math:`w(x) = (1 - x^2)^{\alpha - 1/2}`. See
22.2.3 in [AS]_ for more details.
Parameters
----------
n : int
quadrature order
alpha : float
alpha must be > -0.5
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
scipy.integrate.quadrature
scipy.integrate.fixed_quad
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
"""
m = int(n)
if n < 1 or n != m:
raise ValueError("n must be a positive integer.")
if alpha < -0.5:
raise ValueError("alpha must be greater than -0.5.")
elif alpha == 0.0:
# C(n,0,x) == 0 uniformly, however, as alpha->0, C(n,alpha,x)->T(n,x)
# strictly, we should just error out here, since the roots are not
# really defined, but we used to return something useful, so let's
# keep doing so.
return roots_chebyt(n, mu)
if alpha <= 170:
mu0 = (np.sqrt(np.pi) * _ufuncs.gamma(alpha + 0.5)) \
/ _ufuncs.gamma(alpha + 1)
else:
# For large alpha we use a Taylor series expansion around inf,
# expressed as a 6th order polynomial of a^-1 and using Horner's
# method to minimize computation and maximize precision
inv_alpha = 1. / alpha
coeffs = np.array([0.000207186, -0.00152206, -0.000640869,
0.00488281, 0.0078125, -0.125, 1.])
mu0 = coeffs[0]
for term in range(1, len(coeffs)):
mu0 = mu0 * inv_alpha + coeffs[term]
mu0 = mu0 * np.sqrt(np.pi / alpha)
an_func = lambda k: 0.0 * k
bn_func = lambda k: np.sqrt(k * (k + 2 * alpha - 1)
/ (4 * (k + alpha) * (k + alpha - 1)))
f = lambda n, x: _ufuncs.eval_gegenbauer(n, alpha, x)
df = lambda n, x: ((-n*x*_ufuncs.eval_gegenbauer(n, alpha, x)
+ ((n + 2*alpha - 1)
* _ufuncs.eval_gegenbauer(n - 1, alpha, x)))
/ (1 - x**2))
return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, True, mu)
def gegenbauer(n, alpha, monic=False):
r"""Gegenbauer (ultraspherical) polynomial.
Defined to be the solution of
.. math::
(1 - x^2)\frac{d^2}{dx^2}C_n^{(\alpha)}
- (2\alpha + 1)x\frac{d}{dx}C_n^{(\alpha)}
+ n(n + 2\alpha)C_n^{(\alpha)} = 0
for :math:`\alpha > -1/2`; :math:`C_n^{(\alpha)}` is a polynomial
of degree :math:`n`.
Parameters
----------
n : int
Degree of the polynomial.
alpha : float
Parameter, must be greater than -0.5.
monic : bool, optional
If `True`, scale the leading coefficient to be 1. Default is
`False`.
Returns
-------
C : orthopoly1d
Gegenbauer polynomial.
Notes
-----
The polynomials :math:`C_n^{(\alpha)}` are orthogonal over
:math:`[-1,1]` with weight function :math:`(1 - x^2)^{(\alpha -
1/2)}`.
Examples
--------
>>> import numpy as np
>>> from scipy import special
>>> import matplotlib.pyplot as plt
We can initialize a variable ``p`` as a Gegenbauer polynomial using the
`gegenbauer` function and evaluate at a point ``x = 1``.
>>> p = special.gegenbauer(3, 0.5, monic=False)
>>> p
poly1d([ 2.5, 0. , -1.5, 0. ])
>>> p(1)
1.0
To evaluate ``p`` at various points ``x`` in the interval ``(-3, 3)``,
simply pass an array ``x`` to ``p`` as follows:
>>> x = np.linspace(-3, 3, 400)
>>> y = p(x)
We can then visualize ``x, y`` using `matplotlib.pyplot`.
>>> fig, ax = plt.subplots()
>>> ax.plot(x, y)
>>> ax.set_title("Gegenbauer (ultraspherical) polynomial of degree 3")
>>> ax.set_xlabel("x")
>>> ax.set_ylabel("G_3(x)")
>>> plt.show()
"""
base = jacobi(n, alpha - 0.5, alpha - 0.5, monic=monic)
if monic:
return base
# Abrahmowitz and Stegan 22.5.20
factor = (_gam(2*alpha + n) * _gam(alpha + 0.5) /
_gam(2*alpha) / _gam(alpha + 0.5 + n))
base._scale(factor)
base.__dict__['_eval_func'] = lambda x: _ufuncs.eval_gegenbauer(float(n),
alpha, x)
return base
# Chebyshev of the first kind: T_n(x) =
# n! sqrt(pi) / _gam(n+1./2)* P^(-1/2,-1/2)_n(x)
# Computed anew.
def roots_chebyt(n, mu=False):
r"""Gauss-Chebyshev (first kind) quadrature.
Computes the sample points and weights for Gauss-Chebyshev
quadrature. The sample points are the roots of the nth degree
Chebyshev polynomial of the first kind, :math:`T_n(x)`. These
sample points and weights correctly integrate polynomials of
degree :math:`2n - 1` or less over the interval :math:`[-1, 1]`
with weight function :math:`w(x) = 1/\sqrt{1 - x^2}`. See 22.2.4
in [AS]_ for more details.
Parameters
----------
n : int
quadrature order
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
scipy.integrate.quadrature
scipy.integrate.fixed_quad
numpy.polynomial.chebyshev.chebgauss
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
"""
m = int(n)
if n < 1 or n != m:
raise ValueError('n must be a positive integer.')
x = _ufuncs._sinpi(np.arange(-m + 1, m, 2) / (2*m))
w = np.full_like(x, pi/m)
if mu:
return x, w, pi
else:
return x, w
def chebyt(n, monic=False):
r"""Chebyshev polynomial of the first kind.
Defined to be the solution of
.. math::
(1 - x^2)\frac{d^2}{dx^2}T_n - x\frac{d}{dx}T_n + n^2T_n = 0;
:math:`T_n` is a polynomial of degree :math:`n`.
Parameters
----------
n : int
Degree of the polynomial.
monic : bool, optional
If `True`, scale the leading coefficient to be 1. Default is
`False`.
Returns
-------
T : orthopoly1d
Chebyshev polynomial of the first kind.
Notes
-----
The polynomials :math:`T_n` are orthogonal over :math:`[-1, 1]`
with weight function :math:`(1 - x^2)^{-1/2}`.
See Also
--------
chebyu : Chebyshev polynomial of the second kind.
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
Examples
--------
Chebyshev polynomials of the first kind of order :math:`n` can
be obtained as the determinant of specific :math:`n \times n`
matrices. As an example we can check how the points obtained from
the determinant of the following :math:`3 \times 3` matrix
lay exacty on :math:`T_3`:
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from scipy.linalg import det
>>> from scipy.special import chebyt
>>> x = np.arange(-1.0, 1.0, 0.01)
>>> fig, ax = plt.subplots()
>>> ax.set_ylim(-2.0, 2.0)
>>> ax.set_title(r'Chebyshev polynomial $T_3$')
>>> ax.plot(x, chebyt(3)(x), label=rf'$T_3$')
>>> for p in np.arange(-1.0, 1.0, 0.1):
... ax.plot(p,
... det(np.array([[p, 1, 0], [1, 2*p, 1], [0, 1, 2*p]])),
... 'rx')
>>> plt.legend(loc='best')
>>> plt.show()
They are also related to the Jacobi Polynomials
:math:`P_n^{(-0.5, -0.5)}` through the relation:
.. math::
P_n^{(-0.5, -0.5)}(x) = \frac{1}{4^n} \binom{2n}{n} T_n(x)
Let's verify it for :math:`n = 3`:
>>> from scipy.special import binom
>>> from scipy.special import jacobi
>>> x = np.arange(-1.0, 1.0, 0.01)
>>> np.allclose(jacobi(3, -0.5, -0.5)(x),
... 1/64 * binom(6, 3) * chebyt(3)(x))
True
We can plot the Chebyshev polynomials :math:`T_n` for some values
of :math:`n`:
>>> x = np.arange(-1.5, 1.5, 0.01)
>>> fig, ax = plt.subplots()
>>> ax.set_ylim(-4.0, 4.0)
>>> ax.set_title(r'Chebyshev polynomials $T_n$')
>>> for n in np.arange(2,5):
... ax.plot(x, chebyt(n)(x), label=rf'$T_n={n}$')
>>> plt.legend(loc='best')
>>> plt.show()
"""
if n < 0:
raise ValueError("n must be nonnegative.")
wfunc = lambda x: 1.0 / sqrt(1 - x * x)
if n == 0:
return orthopoly1d([], [], pi, 1.0, wfunc, (-1, 1), monic,
lambda x: _ufuncs.eval_chebyt(n, x))
n1 = n
x, w, mu = roots_chebyt(n1, mu=True)
hn = pi / 2
kn = 2**(n - 1)
p = orthopoly1d(x, w, hn, kn, wfunc, (-1, 1), monic,
lambda x: _ufuncs.eval_chebyt(n, x))
return p
# Chebyshev of the second kind
# U_n(x) = (n+1)! sqrt(pi) / (2*_gam(n+3./2)) * P^(1/2,1/2)_n(x)
def roots_chebyu(n, mu=False):
r"""Gauss-Chebyshev (second kind) quadrature.
Computes the sample points and weights for Gauss-Chebyshev
quadrature. The sample points are the roots of the nth degree
Chebyshev polynomial of the second kind, :math:`U_n(x)`. These
sample points and weights correctly integrate polynomials of
degree :math:`2n - 1` or less over the interval :math:`[-1, 1]`
with weight function :math:`w(x) = \sqrt{1 - x^2}`. See 22.2.5 in
[AS]_ for details.
Parameters
----------
n : int
quadrature order
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
scipy.integrate.quadrature
scipy.integrate.fixed_quad
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
"""
m = int(n)
if n < 1 or n != m:
raise ValueError('n must be a positive integer.')
t = np.arange(m, 0, -1) * pi / (m + 1)
x = np.cos(t)
w = pi * np.sin(t)**2 / (m + 1)
if mu:
return x, w, pi / 2
else:
return x, w
def chebyu(n, monic=False):
r"""Chebyshev polynomial of the second kind.
Defined to be the solution of
.. math::
(1 - x^2)\frac{d^2}{dx^2}U_n - 3x\frac{d}{dx}U_n
+ n(n + 2)U_n = 0;
:math:`U_n` is a polynomial of degree :math:`n`.
Parameters
----------
n : int
Degree of the polynomial.
monic : bool, optional
If `True`, scale the leading coefficient to be 1. Default is
`False`.
Returns
-------
U : orthopoly1d
Chebyshev polynomial of the second kind.
Notes
-----
The polynomials :math:`U_n` are orthogonal over :math:`[-1, 1]`
with weight function :math:`(1 - x^2)^{1/2}`.
See Also
--------
chebyt : Chebyshev polynomial of the first kind.
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
Examples
--------
Chebyshev polynomials of the second kind of order :math:`n` can
be obtained as the determinant of specific :math:`n \times n`
matrices. As an example we can check how the points obtained from
the determinant of the following :math:`3 \times 3` matrix
lay exacty on :math:`U_3`:
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from scipy.linalg import det
>>> from scipy.special import chebyu
>>> x = np.arange(-1.0, 1.0, 0.01)
>>> fig, ax = plt.subplots()
>>> ax.set_ylim(-2.0, 2.0)
>>> ax.set_title(r'Chebyshev polynomial $U_3$')
>>> ax.plot(x, chebyu(3)(x), label=rf'$U_3$')
>>> for p in np.arange(-1.0, 1.0, 0.1):
... ax.plot(p,
... det(np.array([[2*p, 1, 0], [1, 2*p, 1], [0, 1, 2*p]])),
... 'rx')
>>> plt.legend(loc='best')
>>> plt.show()
They satisfy the recurrence relation:
.. math::
U_{2n-1}(x) = 2 T_n(x)U_{n-1}(x)
where the :math:`T_n` are the Chebyshev polynomial of the first kind.
Let's verify it for :math:`n = 2`:
>>> from scipy.special import chebyt
>>> x = np.arange(-1.0, 1.0, 0.01)
>>> np.allclose(chebyu(3)(x), 2 * chebyt(2)(x) * chebyu(1)(x))
True
We can plot the Chebyshev polynomials :math:`U_n` for some values
of :math:`n`:
>>> x = np.arange(-1.0, 1.0, 0.01)
>>> fig, ax = plt.subplots()
>>> ax.set_ylim(-1.5, 1.5)
>>> ax.set_title(r'Chebyshev polynomials $U_n$')
>>> for n in np.arange(1,5):
... ax.plot(x, chebyu(n)(x), label=rf'$U_n={n}$')
>>> plt.legend(loc='best')
>>> plt.show()
"""
base = jacobi(n, 0.5, 0.5, monic=monic)
if monic:
return base
factor = sqrt(pi) / 2.0 * _gam(n + 2) / _gam(n + 1.5)
base._scale(factor)
return base
# Chebyshev of the first kind C_n(x)
def roots_chebyc(n, mu=False):
r"""Gauss-Chebyshev (first kind) quadrature.
Compute the sample points and weights for Gauss-Chebyshev
quadrature. The sample points are the roots of the nth degree
Chebyshev polynomial of the first kind, :math:`C_n(x)`. These
sample points and weights correctly integrate polynomials of
degree :math:`2n - 1` or less over the interval :math:`[-2, 2]`
with weight function :math:`w(x) = 1 / \sqrt{1 - (x/2)^2}`. See
22.2.6 in [AS]_ for more details.
Parameters
----------
n : int
quadrature order
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
scipy.integrate.quadrature
scipy.integrate.fixed_quad
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
"""
x, w, m = roots_chebyt(n, True)
x *= 2
w *= 2
m *= 2
if mu:
return x, w, m
else:
return x, w
def chebyc(n, monic=False):
r"""Chebyshev polynomial of the first kind on :math:`[-2, 2]`.
Defined as :math:`C_n(x) = 2T_n(x/2)`, where :math:`T_n` is the
nth Chebychev polynomial of the first kind.
Parameters
----------
n : int
Degree of the polynomial.
monic : bool, optional
If `True`, scale the leading coefficient to be 1. Default is
`False`.
Returns
-------
C : orthopoly1d
Chebyshev polynomial of the first kind on :math:`[-2, 2]`.
Notes
-----
The polynomials :math:`C_n(x)` are orthogonal over :math:`[-2, 2]`
with weight function :math:`1/\sqrt{1 - (x/2)^2}`.
See Also
--------
chebyt : Chebyshev polynomial of the first kind.
References
----------
.. [1] Abramowitz and Stegun, "Handbook of Mathematical Functions"
Section 22. National Bureau of Standards, 1972.
"""
if n < 0:
raise ValueError("n must be nonnegative.")
if n == 0:
n1 = n + 1
else:
n1 = n
x, w = roots_chebyc(n1)
if n == 0:
x, w = [], []
hn = 4 * pi * ((n == 0) + 1)
kn = 1.0
p = orthopoly1d(x, w, hn, kn,
wfunc=lambda x: 1.0 / sqrt(1 - x * x / 4.0),
limits=(-2, 2), monic=monic)
if not monic:
p._scale(2.0 / p(2))
p.__dict__['_eval_func'] = lambda x: _ufuncs.eval_chebyc(n, x)
return p
# Chebyshev of the second kind S_n(x)
def roots_chebys(n, mu=False):
r"""Gauss-Chebyshev (second kind) quadrature.
Compute the sample points and weights for Gauss-Chebyshev
quadrature. The sample points are the roots of the nth degree
Chebyshev polynomial of the second kind, :math:`S_n(x)`. These
sample points and weights correctly integrate polynomials of
degree :math:`2n - 1` or less over the interval :math:`[-2, 2]`
with weight function :math:`w(x) = \sqrt{1 - (x/2)^2}`. See 22.2.7
in [AS]_ for more details.
Parameters
----------
n : int
quadrature order
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
scipy.integrate.quadrature
scipy.integrate.fixed_quad
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
"""
x, w, m = roots_chebyu(n, True)
x *= 2
w *= 2
m *= 2
if mu:
return x, w, m
else:
return x, w
def chebys(n, monic=False):
r"""Chebyshev polynomial of the second kind on :math:`[-2, 2]`.
Defined as :math:`S_n(x) = U_n(x/2)` where :math:`U_n` is the
nth Chebychev polynomial of the second kind.
Parameters
----------
n : int
Degree of the polynomial.
monic : bool, optional
If `True`, scale the leading coefficient to be 1. Default is
`False`.
Returns
-------
S : orthopoly1d
Chebyshev polynomial of the second kind on :math:`[-2, 2]`.
Notes
-----
The polynomials :math:`S_n(x)` are orthogonal over :math:`[-2, 2]`
with weight function :math:`\sqrt{1 - (x/2)}^2`.
See Also
--------
chebyu : Chebyshev polynomial of the second kind
References
----------
.. [1] Abramowitz and Stegun, "Handbook of Mathematical Functions"
Section 22. National Bureau of Standards, 1972.
"""
if n < 0:
raise ValueError("n must be nonnegative.")
if n == 0:
n1 = n + 1
else:
n1 = n
x, w = roots_chebys(n1)
if n == 0:
x, w = [], []
hn = pi
kn = 1.0
p = orthopoly1d(x, w, hn, kn,
wfunc=lambda x: sqrt(1 - x * x / 4.0),
limits=(-2, 2), monic=monic)
if not monic:
factor = (n + 1.0) / p(2)
p._scale(factor)
p.__dict__['_eval_func'] = lambda x: _ufuncs.eval_chebys(n, x)
return p
# Shifted Chebyshev of the first kind T^*_n(x)
def roots_sh_chebyt(n, mu=False):
r"""Gauss-Chebyshev (first kind, shifted) quadrature.
Compute the sample points and weights for Gauss-Chebyshev
quadrature. The sample points are the roots of the nth degree
shifted Chebyshev polynomial of the first kind, :math:`T_n(x)`.
These sample points and weights correctly integrate polynomials of
degree :math:`2n - 1` or less over the interval :math:`[0, 1]`
with weight function :math:`w(x) = 1/\sqrt{x - x^2}`. See 22.2.8
in [AS]_ for more details.
Parameters
----------
n : int
quadrature order
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
scipy.integrate.quadrature
scipy.integrate.fixed_quad
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
"""
xw = roots_chebyt(n, mu)
return ((xw[0] + 1) / 2,) + xw[1:]
def sh_chebyt(n, monic=False):
r"""Shifted Chebyshev polynomial of the first kind.
Defined as :math:`T^*_n(x) = T_n(2x - 1)` for :math:`T_n` the nth
Chebyshev polynomial of the first kind.
Parameters
----------
n : int
Degree of the polynomial.
monic : bool, optional
If `True`, scale the leading coefficient to be 1. Default is
`False`.
Returns
-------
T : orthopoly1d
Shifted Chebyshev polynomial of the first kind.
Notes
-----
The polynomials :math:`T^*_n` are orthogonal over :math:`[0, 1]`
with weight function :math:`(x - x^2)^{-1/2}`.
"""
base = sh_jacobi(n, 0.0, 0.5, monic=monic)
if monic:
return base
if n > 0:
factor = 4**n / 2.0
else:
factor = 1.0
base._scale(factor)
return base
# Shifted Chebyshev of the second kind U^*_n(x)
def roots_sh_chebyu(n, mu=False):
r"""Gauss-Chebyshev (second kind, shifted) quadrature.
Computes the sample points and weights for Gauss-Chebyshev
quadrature. The sample points are the roots of the nth degree
shifted Chebyshev polynomial of the second kind, :math:`U_n(x)`.
These sample points and weights correctly integrate polynomials of
degree :math:`2n - 1` or less over the interval :math:`[0, 1]`
with weight function :math:`w(x) = \sqrt{x - x^2}`. See 22.2.9 in
[AS]_ for more details.
Parameters
----------
n : int
quadrature order
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
scipy.integrate.quadrature
scipy.integrate.fixed_quad
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
"""
x, w, m = roots_chebyu(n, True)
x = (x + 1) / 2
m_us = _ufuncs.beta(1.5, 1.5)
w *= m_us / m
if mu:
return x, w, m_us
else:
return x, w
def sh_chebyu(n, monic=False):
r"""Shifted Chebyshev polynomial of the second kind.
Defined as :math:`U^*_n(x) = U_n(2x - 1)` for :math:`U_n` the nth
Chebyshev polynomial of the second kind.
Parameters
----------
n : int
Degree of the polynomial.
monic : bool, optional
If `True`, scale the leading coefficient to be 1. Default is
`False`.
Returns
-------
U : orthopoly1d
Shifted Chebyshev polynomial of the second kind.
Notes
-----
The polynomials :math:`U^*_n` are orthogonal over :math:`[0, 1]`
with weight function :math:`(x - x^2)^{1/2}`.
"""
base = sh_jacobi(n, 2.0, 1.5, monic=monic)
if monic:
return base
factor = 4**n
base._scale(factor)
return base
# Legendre
def roots_legendre(n, mu=False):
r"""Gauss-Legendre quadrature.
Compute the sample points and weights for Gauss-Legendre
quadrature [GL]_. The sample points are the roots of the nth degree
Legendre polynomial :math:`P_n(x)`. These sample points and
weights correctly integrate polynomials of degree :math:`2n - 1`
or less over the interval :math:`[-1, 1]` with weight function
:math:`w(x) = 1`. See 2.2.10 in [AS]_ for more details.
Parameters
----------
n : int
quadrature order
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
scipy.integrate.quadrature
scipy.integrate.fixed_quad
numpy.polynomial.legendre.leggauss
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
.. [GL] Gauss-Legendre quadrature, Wikipedia,
https://en.wikipedia.org/wiki/Gauss%E2%80%93Legendre_quadrature
Examples
--------
>>> import numpy as np
>>> from scipy.special import roots_legendre, eval_legendre
>>> roots, weights = roots_legendre(9)
``roots`` holds the roots, and ``weights`` holds the weights for
Gauss-Legendre quadrature.
>>> roots
array([-0.96816024, -0.83603111, -0.61337143, -0.32425342, 0. ,
0.32425342, 0.61337143, 0.83603111, 0.96816024])
>>> weights
array([0.08127439, 0.18064816, 0.2606107 , 0.31234708, 0.33023936,
0.31234708, 0.2606107 , 0.18064816, 0.08127439])
Verify that we have the roots by evaluating the degree 9 Legendre
polynomial at ``roots``. All the values are approximately zero:
>>> eval_legendre(9, roots)
array([-8.88178420e-16, -2.22044605e-16, 1.11022302e-16, 1.11022302e-16,
0.00000000e+00, -5.55111512e-17, -1.94289029e-16, 1.38777878e-16,
-8.32667268e-17])
Here we'll show how the above values can be used to estimate the
integral from 1 to 2 of f(t) = t + 1/t with Gauss-Legendre
quadrature [GL]_. First define the function and the integration
limits.
>>> def f(t):
... return t + 1/t
...
>>> a = 1
>>> b = 2
We'll use ``integral(f(t), t=a, t=b)`` to denote the definite integral
of f from t=a to t=b. The sample points in ``roots`` are from the
interval [-1, 1], so we'll rewrite the integral with the simple change
of variable::
x = 2/(b - a) * t - (a + b)/(b - a)
with inverse::
t = (b - a)/2 * x + (a + 2)/2
Then::
integral(f(t), a, b) =
(b - a)/2 * integral(f((b-a)/2*x + (a+b)/2), x=-1, x=1)
We can approximate the latter integral with the values returned
by `roots_legendre`.
Map the roots computed above from [-1, 1] to [a, b].
>>> t = (b - a)/2 * roots + (a + b)/2
Approximate the integral as the weighted sum of the function values.
>>> (b - a)/2 * f(t).dot(weights)
2.1931471805599276
Compare that to the exact result, which is 3/2 + log(2):
>>> 1.5 + np.log(2)
2.1931471805599454
"""
m = int(n)
if n < 1 or n != m:
raise ValueError("n must be a positive integer.")
mu0 = 2.0
an_func = lambda k: 0.0 * k
bn_func = lambda k: k * np.sqrt(1.0 / (4 * k * k - 1))
f = _ufuncs.eval_legendre
df = lambda n, x: (-n*x*_ufuncs.eval_legendre(n, x)
+ n*_ufuncs.eval_legendre(n-1, x))/(1-x**2)
return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, True, mu)
def legendre(n, monic=False):
r"""Legendre polynomial.
Defined to be the solution of
.. math::
\frac{d}{dx}\left[(1 - x^2)\frac{d}{dx}P_n(x)\right]
+ n(n + 1)P_n(x) = 0;
:math:`P_n(x)` is a polynomial of degree :math:`n`.
Parameters
----------
n : int
Degree of the polynomial.
monic : bool, optional
If `True`, scale the leading coefficient to be 1. Default is
`False`.
Returns
-------
P : orthopoly1d
Legendre polynomial.
Notes
-----
The polynomials :math:`P_n` are orthogonal over :math:`[-1, 1]`
with weight function 1.
Examples
--------
Generate the 3rd-order Legendre polynomial 1/2*(5x^3 + 0x^2 - 3x + 0):
>>> from scipy.special import legendre
>>> legendre(3)
poly1d([ 2.5, 0. , -1.5, 0. ])
"""
if n < 0:
raise ValueError("n must be nonnegative.")
if n == 0:
n1 = n + 1
else:
n1 = n
x, w = roots_legendre(n1)
if n == 0:
x, w = [], []
hn = 2.0 / (2 * n + 1)
kn = _gam(2 * n + 1) / _gam(n + 1)**2 / 2.0**n
p = orthopoly1d(x, w, hn, kn, wfunc=lambda x: 1.0, limits=(-1, 1),
monic=monic,
eval_func=lambda x: _ufuncs.eval_legendre(n, x))
return p
# Shifted Legendre P^*_n(x)
def roots_sh_legendre(n, mu=False):
r"""Gauss-Legendre (shifted) quadrature.
Compute the sample points and weights for Gauss-Legendre
quadrature. The sample points are the roots of the nth degree
shifted Legendre polynomial :math:`P^*_n(x)`. These sample points
and weights correctly integrate polynomials of degree :math:`2n -
1` or less over the interval :math:`[0, 1]` with weight function
:math:`w(x) = 1.0`. See 2.2.11 in [AS]_ for details.
Parameters
----------
n : int
quadrature order
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
scipy.integrate.quadrature
scipy.integrate.fixed_quad
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
"""
x, w = roots_legendre(n)
x = (x + 1) / 2
w /= 2
if mu:
return x, w, 1.0
else:
return x, w
def sh_legendre(n, monic=False):
r"""Shifted Legendre polynomial.
Defined as :math:`P^*_n(x) = P_n(2x - 1)` for :math:`P_n` the nth
Legendre polynomial.
Parameters
----------
n : int
Degree of the polynomial.
monic : bool, optional
If `True`, scale the leading coefficient to be 1. Default is
`False`.
Returns
-------
P : orthopoly1d
Shifted Legendre polynomial.
Notes
-----
The polynomials :math:`P^*_n` are orthogonal over :math:`[0, 1]`
with weight function 1.
"""
if n < 0:
raise ValueError("n must be nonnegative.")
wfunc = lambda x: 0.0 * x + 1.0
if n == 0:
return orthopoly1d([], [], 1.0, 1.0, wfunc, (0, 1), monic,
lambda x: _ufuncs.eval_sh_legendre(n, x))
x, w = roots_sh_legendre(n)
hn = 1.0 / (2 * n + 1.0)
kn = _gam(2 * n + 1) / _gam(n + 1)**2
p = orthopoly1d(x, w, hn, kn, wfunc, limits=(0, 1), monic=monic,
eval_func=lambda x: _ufuncs.eval_sh_legendre(n, x))
return p
# Make the old root function names an alias for the new ones
_modattrs = globals()
for newfun, oldfun in _rootfuns_map.items():
_modattrs[oldfun] = _modattrs[newfun]
__all__.append(oldfun)
| bsd-3-clause | 5deb35f597b2aba073e4ed9233968b86 | 27.909269 | 139 | 0.558935 | 3.09086 | false | false | false | false |
scipy/scipy | scipy/signal/_bsplines.py | 8 | 19753 | from numpy import (logical_and, asarray, pi, zeros_like,
piecewise, array, arctan2, tan, zeros, arange, floor)
from numpy.core.umath import (sqrt, exp, greater, less, cos, add, sin,
less_equal, greater_equal)
# From splinemodule.c
from ._spline import cspline2d, sepfir2d
from scipy.special import comb
from scipy._lib._util import float_factorial
__all__ = ['spline_filter', 'bspline', 'gauss_spline', 'cubic', 'quadratic',
'cspline1d', 'qspline1d', 'cspline1d_eval', 'qspline1d_eval']
def spline_filter(Iin, lmbda=5.0):
"""Smoothing spline (cubic) filtering of a rank-2 array.
Filter an input data set, `Iin`, using a (cubic) smoothing spline of
fall-off `lmbda`.
Parameters
----------
Iin : array_like
input data set
lmbda : float, optional
spline smooghing fall-off value, default is `5.0`.
Returns
-------
res : ndarray
filterd input data
Examples
--------
We can filter an multi dimentional signal (ex: 2D image) using cubic
B-spline filter:
>>> import numpy as np
>>> from scipy.signal import spline_filter
>>> import matplotlib.pyplot as plt
>>> orig_img = np.eye(20) # create an image
>>> orig_img[10, :] = 1.0
>>> sp_filter = spline_filter(orig_img, lmbda=0.1)
>>> f, ax = plt.subplots(1, 2, sharex=True)
>>> for ind, data in enumerate([[orig_img, "original image"],
... [sp_filter, "spline filter"]]):
... ax[ind].imshow(data[0], cmap='gray_r')
... ax[ind].set_title(data[1])
>>> plt.tight_layout()
>>> plt.show()
"""
intype = Iin.dtype.char
hcol = array([1.0, 4.0, 1.0], 'f') / 6.0
if intype in ['F', 'D']:
Iin = Iin.astype('F')
ckr = cspline2d(Iin.real, lmbda)
cki = cspline2d(Iin.imag, lmbda)
outr = sepfir2d(ckr, hcol, hcol)
outi = sepfir2d(cki, hcol, hcol)
out = (outr + 1j * outi).astype(intype)
elif intype in ['f', 'd']:
ckr = cspline2d(Iin, lmbda)
out = sepfir2d(ckr, hcol, hcol)
out = out.astype(intype)
else:
raise TypeError("Invalid data type for Iin")
return out
_splinefunc_cache = {}
def _bspline_piecefunctions(order):
"""Returns the function defined over the left-side pieces for a bspline of
a given order.
The 0th piece is the first one less than 0. The last piece is a function
identical to 0 (returned as the constant 0). (There are order//2 + 2 total
pieces).
Also returns the condition functions that when evaluated return boolean
arrays for use with `numpy.piecewise`.
"""
try:
return _splinefunc_cache[order]
except KeyError:
pass
def condfuncgen(num, val1, val2):
if num == 0:
return lambda x: logical_and(less_equal(x, val1),
greater_equal(x, val2))
elif num == 2:
return lambda x: less_equal(x, val2)
else:
return lambda x: logical_and(less(x, val1),
greater_equal(x, val2))
last = order // 2 + 2
if order % 2:
startbound = -1.0
else:
startbound = -0.5
condfuncs = [condfuncgen(0, 0, startbound)]
bound = startbound
for num in range(1, last - 1):
condfuncs.append(condfuncgen(1, bound, bound - 1))
bound = bound - 1
condfuncs.append(condfuncgen(2, 0, -(order + 1) / 2.0))
# final value of bound is used in piecefuncgen below
# the functions to evaluate are taken from the left-hand side
# in the general expression derived from the central difference
# operator (because they involve fewer terms).
fval = float_factorial(order)
def piecefuncgen(num):
Mk = order // 2 - num
if (Mk < 0):
return 0 # final function is 0
coeffs = [(1 - 2 * (k % 2)) * float(comb(order + 1, k, exact=1)) / fval
for k in range(Mk + 1)]
shifts = [-bound - k for k in range(Mk + 1)]
def thefunc(x):
res = 0.0
for k in range(Mk + 1):
res += coeffs[k] * (x + shifts[k]) ** order
return res
return thefunc
funclist = [piecefuncgen(k) for k in range(last)]
_splinefunc_cache[order] = (funclist, condfuncs)
return funclist, condfuncs
def bspline(x, n):
"""B-spline basis function of order n.
Parameters
----------
x : array_like
a knot vector
n : int
The order of the spline. Must be non-negative, i.e., n >= 0
Returns
-------
res : ndarray
B-spline basis function values
See Also
--------
cubic : A cubic B-spline.
quadratic : A quadratic B-spline.
Notes
-----
Uses numpy.piecewise and automatic function-generator.
Examples
--------
We can calculate B-Spline basis function of several orders:
>>> import numpy as np
>>> from scipy.signal import bspline, cubic, quadratic
>>> bspline(0.0, 1)
1
>>> knots = [-1.0, 0.0, -1.0]
>>> bspline(knots, 2)
array([0.125, 0.75, 0.125])
>>> np.array_equal(bspline(knots, 2), quadratic(knots))
True
>>> np.array_equal(bspline(knots, 3), cubic(knots))
True
"""
ax = -abs(asarray(x))
# number of pieces on the left-side is (n+1)/2
funclist, condfuncs = _bspline_piecefunctions(n)
condlist = [func(ax) for func in condfuncs]
return piecewise(ax, condlist, funclist)
def gauss_spline(x, n):
r"""Gaussian approximation to B-spline basis function of order n.
Parameters
----------
x : array_like
a knot vector
n : int
The order of the spline. Must be non-negative, i.e., n >= 0
Returns
-------
res : ndarray
B-spline basis function values approximated by a zero-mean Gaussian
function.
Notes
-----
The B-spline basis function can be approximated well by a zero-mean
Gaussian function with standard-deviation equal to :math:`\sigma=(n+1)/12`
for large `n` :
.. math:: \frac{1}{\sqrt {2\pi\sigma^2}}exp(-\frac{x^2}{2\sigma})
References
----------
.. [1] Bouma H., Vilanova A., Bescos J.O., ter Haar Romeny B.M., Gerritsen
F.A. (2007) Fast and Accurate Gaussian Derivatives Based on B-Splines. In:
Sgallari F., Murli A., Paragios N. (eds) Scale Space and Variational
Methods in Computer Vision. SSVM 2007. Lecture Notes in Computer
Science, vol 4485. Springer, Berlin, Heidelberg
.. [2] http://folk.uio.no/inf3330/scripting/doc/python/SciPy/tutorial/old/node24.html
Examples
--------
We can calculate B-Spline basis functions approximated by a gaussian
distribution:
>>> import numpy as np
>>> from scipy.signal import gauss_spline, bspline
>>> knots = np.array([-1.0, 0.0, -1.0])
>>> gauss_spline(knots, 3)
array([0.15418033, 0.6909883, 0.15418033]) # may vary
>>> bspline(knots, 3)
array([0.16666667, 0.66666667, 0.16666667]) # may vary
"""
x = asarray(x)
signsq = (n + 1) / 12.0
return 1 / sqrt(2 * pi * signsq) * exp(-x ** 2 / 2 / signsq)
def cubic(x):
"""A cubic B-spline.
This is a special case of `bspline`, and equivalent to ``bspline(x, 3)``.
Parameters
----------
x : array_like
a knot vector
Returns
-------
res : ndarray
Cubic B-spline basis function values
See Also
--------
bspline : B-spline basis function of order n
quadratic : A quadratic B-spline.
Examples
--------
We can calculate B-Spline basis function of several orders:
>>> import numpy as np
>>> from scipy.signal import bspline, cubic, quadratic
>>> bspline(0.0, 1)
1
>>> knots = [-1.0, 0.0, -1.0]
>>> bspline(knots, 2)
array([0.125, 0.75, 0.125])
>>> np.array_equal(bspline(knots, 2), quadratic(knots))
True
>>> np.array_equal(bspline(knots, 3), cubic(knots))
True
"""
ax = abs(asarray(x))
res = zeros_like(ax)
cond1 = less(ax, 1)
if cond1.any():
ax1 = ax[cond1]
res[cond1] = 2.0 / 3 - 1.0 / 2 * ax1 ** 2 * (2 - ax1)
cond2 = ~cond1 & less(ax, 2)
if cond2.any():
ax2 = ax[cond2]
res[cond2] = 1.0 / 6 * (2 - ax2) ** 3
return res
def quadratic(x):
"""A quadratic B-spline.
This is a special case of `bspline`, and equivalent to ``bspline(x, 2)``.
Parameters
----------
x : array_like
a knot vector
Returns
-------
res : ndarray
Quadratic B-spline basis function values
See Also
--------
bspline : B-spline basis function of order n
cubic : A cubic B-spline.
Examples
--------
We can calculate B-Spline basis function of several orders:
>>> import numpy as np
>>> from scipy.signal import bspline, cubic, quadratic
>>> bspline(0.0, 1)
1
>>> knots = [-1.0, 0.0, -1.0]
>>> bspline(knots, 2)
array([0.125, 0.75, 0.125])
>>> np.array_equal(bspline(knots, 2), quadratic(knots))
True
>>> np.array_equal(bspline(knots, 3), cubic(knots))
True
"""
ax = abs(asarray(x))
res = zeros_like(ax)
cond1 = less(ax, 0.5)
if cond1.any():
ax1 = ax[cond1]
res[cond1] = 0.75 - ax1 ** 2
cond2 = ~cond1 & less(ax, 1.5)
if cond2.any():
ax2 = ax[cond2]
res[cond2] = (ax2 - 1.5) ** 2 / 2.0
return res
def _coeff_smooth(lam):
xi = 1 - 96 * lam + 24 * lam * sqrt(3 + 144 * lam)
omeg = arctan2(sqrt(144 * lam - 1), sqrt(xi))
rho = (24 * lam - 1 - sqrt(xi)) / (24 * lam)
rho = rho * sqrt((48 * lam + 24 * lam * sqrt(3 + 144 * lam)) / xi)
return rho, omeg
def _hc(k, cs, rho, omega):
return (cs / sin(omega) * (rho ** k) * sin(omega * (k + 1)) *
greater(k, -1))
def _hs(k, cs, rho, omega):
c0 = (cs * cs * (1 + rho * rho) / (1 - rho * rho) /
(1 - 2 * rho * rho * cos(2 * omega) + rho ** 4))
gamma = (1 - rho * rho) / (1 + rho * rho) / tan(omega)
ak = abs(k)
return c0 * rho ** ak * (cos(omega * ak) + gamma * sin(omega * ak))
def _cubic_smooth_coeff(signal, lamb):
rho, omega = _coeff_smooth(lamb)
cs = 1 - 2 * rho * cos(omega) + rho * rho
K = len(signal)
yp = zeros((K,), signal.dtype.char)
k = arange(K)
yp[0] = (_hc(0, cs, rho, omega) * signal[0] +
add.reduce(_hc(k + 1, cs, rho, omega) * signal))
yp[1] = (_hc(0, cs, rho, omega) * signal[0] +
_hc(1, cs, rho, omega) * signal[1] +
add.reduce(_hc(k + 2, cs, rho, omega) * signal))
for n in range(2, K):
yp[n] = (cs * signal[n] + 2 * rho * cos(omega) * yp[n - 1] -
rho * rho * yp[n - 2])
y = zeros((K,), signal.dtype.char)
y[K - 1] = add.reduce((_hs(k, cs, rho, omega) +
_hs(k + 1, cs, rho, omega)) * signal[::-1])
y[K - 2] = add.reduce((_hs(k - 1, cs, rho, omega) +
_hs(k + 2, cs, rho, omega)) * signal[::-1])
for n in range(K - 3, -1, -1):
y[n] = (cs * yp[n] + 2 * rho * cos(omega) * y[n + 1] -
rho * rho * y[n + 2])
return y
def _cubic_coeff(signal):
zi = -2 + sqrt(3)
K = len(signal)
yplus = zeros((K,), signal.dtype.char)
powers = zi ** arange(K)
yplus[0] = signal[0] + zi * add.reduce(powers * signal)
for k in range(1, K):
yplus[k] = signal[k] + zi * yplus[k - 1]
output = zeros((K,), signal.dtype)
output[K - 1] = zi / (zi - 1) * yplus[K - 1]
for k in range(K - 2, -1, -1):
output[k] = zi * (output[k + 1] - yplus[k])
return output * 6.0
def _quadratic_coeff(signal):
zi = -3 + 2 * sqrt(2.0)
K = len(signal)
yplus = zeros((K,), signal.dtype.char)
powers = zi ** arange(K)
yplus[0] = signal[0] + zi * add.reduce(powers * signal)
for k in range(1, K):
yplus[k] = signal[k] + zi * yplus[k - 1]
output = zeros((K,), signal.dtype.char)
output[K - 1] = zi / (zi - 1) * yplus[K - 1]
for k in range(K - 2, -1, -1):
output[k] = zi * (output[k + 1] - yplus[k])
return output * 8.0
def cspline1d(signal, lamb=0.0):
"""
Compute cubic spline coefficients for rank-1 array.
Find the cubic spline coefficients for a 1-D signal assuming
mirror-symmetric boundary conditions. To obtain the signal back from the
spline representation mirror-symmetric-convolve these coefficients with a
length 3 FIR window [1.0, 4.0, 1.0]/ 6.0 .
Parameters
----------
signal : ndarray
A rank-1 array representing samples of a signal.
lamb : float, optional
Smoothing coefficient, default is 0.0.
Returns
-------
c : ndarray
Cubic spline coefficients.
See Also
--------
cspline1d_eval : Evaluate a cubic spline at the new set of points.
Examples
--------
We can filter a signal to reduce and smooth out high-frequency noise with
a cubic spline:
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from scipy.signal import cspline1d, cspline1d_eval
>>> rng = np.random.default_rng()
>>> sig = np.repeat([0., 1., 0.], 100)
>>> sig += rng.standard_normal(len(sig))*0.05 # add noise
>>> time = np.linspace(0, len(sig))
>>> filtered = cspline1d_eval(cspline1d(sig), time)
>>> plt.plot(sig, label="signal")
>>> plt.plot(time, filtered, label="filtered")
>>> plt.legend()
>>> plt.show()
"""
if lamb != 0.0:
return _cubic_smooth_coeff(signal, lamb)
else:
return _cubic_coeff(signal)
def qspline1d(signal, lamb=0.0):
"""Compute quadratic spline coefficients for rank-1 array.
Parameters
----------
signal : ndarray
A rank-1 array representing samples of a signal.
lamb : float, optional
Smoothing coefficient (must be zero for now).
Returns
-------
c : ndarray
Quadratic spline coefficients.
See Also
--------
qspline1d_eval : Evaluate a quadratic spline at the new set of points.
Notes
-----
Find the quadratic spline coefficients for a 1-D signal assuming
mirror-symmetric boundary conditions. To obtain the signal back from the
spline representation mirror-symmetric-convolve these coefficients with a
length 3 FIR window [1.0, 6.0, 1.0]/ 8.0 .
Examples
--------
We can filter a signal to reduce and smooth out high-frequency noise with
a quadratic spline:
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from scipy.signal import qspline1d, qspline1d_eval
>>> rng = np.random.default_rng()
>>> sig = np.repeat([0., 1., 0.], 100)
>>> sig += rng.standard_normal(len(sig))*0.05 # add noise
>>> time = np.linspace(0, len(sig))
>>> filtered = qspline1d_eval(qspline1d(sig), time)
>>> plt.plot(sig, label="signal")
>>> plt.plot(time, filtered, label="filtered")
>>> plt.legend()
>>> plt.show()
"""
if lamb != 0.0:
raise ValueError("Smoothing quadratic splines not supported yet.")
else:
return _quadratic_coeff(signal)
def cspline1d_eval(cj, newx, dx=1.0, x0=0):
"""Evaluate a cubic spline at the new set of points.
`dx` is the old sample-spacing while `x0` was the old origin. In
other-words the old-sample points (knot-points) for which the `cj`
represent spline coefficients were at equally-spaced points of:
oldx = x0 + j*dx j=0...N-1, with N=len(cj)
Edges are handled using mirror-symmetric boundary conditions.
Parameters
----------
cj : ndarray
cublic spline coefficients
newx : ndarray
New set of points.
dx : float, optional
Old sample-spacing, the default value is 1.0.
x0 : int, optional
Old origin, the default value is 0.
Returns
-------
res : ndarray
Evaluated a cubic spline points.
See Also
--------
cspline1d : Compute cubic spline coefficients for rank-1 array.
Examples
--------
We can filter a signal to reduce and smooth out high-frequency noise with
a cubic spline:
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from scipy.signal import cspline1d, cspline1d_eval
>>> rng = np.random.default_rng()
>>> sig = np.repeat([0., 1., 0.], 100)
>>> sig += rng.standard_normal(len(sig))*0.05 # add noise
>>> time = np.linspace(0, len(sig))
>>> filtered = cspline1d_eval(cspline1d(sig), time)
>>> plt.plot(sig, label="signal")
>>> plt.plot(time, filtered, label="filtered")
>>> plt.legend()
>>> plt.show()
"""
newx = (asarray(newx) - x0) / float(dx)
res = zeros_like(newx, dtype=cj.dtype)
if res.size == 0:
return res
N = len(cj)
cond1 = newx < 0
cond2 = newx > (N - 1)
cond3 = ~(cond1 | cond2)
# handle general mirror-symmetry
res[cond1] = cspline1d_eval(cj, -newx[cond1])
res[cond2] = cspline1d_eval(cj, 2 * (N - 1) - newx[cond2])
newx = newx[cond3]
if newx.size == 0:
return res
result = zeros_like(newx, dtype=cj.dtype)
jlower = floor(newx - 2).astype(int) + 1
for i in range(4):
thisj = jlower + i
indj = thisj.clip(0, N - 1) # handle edge cases
result += cj[indj] * cubic(newx - thisj)
res[cond3] = result
return res
def qspline1d_eval(cj, newx, dx=1.0, x0=0):
"""Evaluate a quadratic spline at the new set of points.
Parameters
----------
cj : ndarray
Quadratic spline coefficients
newx : ndarray
New set of points.
dx : float, optional
Old sample-spacing, the default value is 1.0.
x0 : int, optional
Old origin, the default value is 0.
Returns
-------
res : ndarray
Evaluated a quadratic spline points.
See Also
--------
qspline1d : Compute quadratic spline coefficients for rank-1 array.
Notes
-----
`dx` is the old sample-spacing while `x0` was the old origin. In
other-words the old-sample points (knot-points) for which the `cj`
represent spline coefficients were at equally-spaced points of::
oldx = x0 + j*dx j=0...N-1, with N=len(cj)
Edges are handled using mirror-symmetric boundary conditions.
Examples
--------
We can filter a signal to reduce and smooth out high-frequency noise with
a quadratic spline:
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from scipy.signal import qspline1d, qspline1d_eval
>>> rng = np.random.default_rng()
>>> sig = np.repeat([0., 1., 0.], 100)
>>> sig += rng.standard_normal(len(sig))*0.05 # add noise
>>> time = np.linspace(0, len(sig))
>>> filtered = qspline1d_eval(qspline1d(sig), time)
>>> plt.plot(sig, label="signal")
>>> plt.plot(time, filtered, label="filtered")
>>> plt.legend()
>>> plt.show()
"""
newx = (asarray(newx) - x0) / dx
res = zeros_like(newx)
if res.size == 0:
return res
N = len(cj)
cond1 = newx < 0
cond2 = newx > (N - 1)
cond3 = ~(cond1 | cond2)
# handle general mirror-symmetry
res[cond1] = qspline1d_eval(cj, -newx[cond1])
res[cond2] = qspline1d_eval(cj, 2 * (N - 1) - newx[cond2])
newx = newx[cond3]
if newx.size == 0:
return res
result = zeros_like(newx)
jlower = floor(newx - 1.5).astype(int) + 1
for i in range(3):
thisj = jlower + i
indj = thisj.clip(0, N - 1) # handle edge cases
result += cj[indj] * quadratic(newx - thisj)
res[cond3] = result
return res
| bsd-3-clause | 829591a7aae9de5285e8b77c90b954ac | 27.920937 | 89 | 0.568116 | 3.250987 | false | false | false | false |
scipy/scipy | scipy/sparse/tests/test_array_api.py | 10 | 7480 | import pytest
import numpy as np
import numpy.testing as npt
import scipy.sparse
import scipy.sparse.linalg as spla
sparray_types = ('bsr', 'coo', 'csc', 'csr', 'dia', 'dok', 'lil')
sparray_classes = [
getattr(scipy.sparse, f'{T}_array') for T in sparray_types
]
A = np.array([
[0, 1, 2, 0],
[2, 0, 0, 3],
[1, 4, 0, 0]
])
B = np.array([
[0, 1],
[2, 0]
])
X = np.array([
[1, 0, 0, 1],
[2, 1, 2, 0],
[0, 2, 1, 0],
[0, 0, 1, 2]
], dtype=float)
sparrays = [sparray(A) for sparray in sparray_classes]
square_sparrays = [sparray(B) for sparray in sparray_classes]
eig_sparrays = [sparray(X) for sparray in sparray_classes]
parametrize_sparrays = pytest.mark.parametrize(
"A", sparrays, ids=sparray_types
)
parametrize_square_sparrays = pytest.mark.parametrize(
"B", square_sparrays, ids=sparray_types
)
parametrize_eig_sparrays = pytest.mark.parametrize(
"X", eig_sparrays, ids=sparray_types
)
@parametrize_sparrays
def test_sum(A):
assert not isinstance(A.sum(axis=0), np.matrix), \
"Expected array, got matrix"
assert A.sum(axis=0).shape == (4,)
assert A.sum(axis=1).shape == (3,)
@parametrize_sparrays
def test_mean(A):
assert not isinstance(A.mean(axis=1), np.matrix), \
"Expected array, got matrix"
@parametrize_sparrays
def test_todense(A):
assert not isinstance(A.todense(), np.matrix), \
"Expected array, got matrix"
@parametrize_sparrays
def test_indexing(A):
if A.__class__.__name__[:3] in ('dia', 'coo', 'bsr'):
return
with pytest.raises(NotImplementedError):
A[1, :]
with pytest.raises(NotImplementedError):
A[:, 1]
with pytest.raises(NotImplementedError):
A[1, [1, 2]]
with pytest.raises(NotImplementedError):
A[[1, 2], 1]
assert A[[0]]._is_array, "Expected sparse array, got sparse matrix"
assert A[1, [[1, 2]]]._is_array, "Expected ndarray, got sparse array"
assert A[[[1, 2]], 1]._is_array, "Expected ndarray, got sparse array"
assert A[:, [1, 2]]._is_array, "Expected sparse array, got something else"
@parametrize_sparrays
def test_dense_addition(A):
X = np.random.random(A.shape)
assert not isinstance(A + X, np.matrix), "Expected array, got matrix"
@parametrize_sparrays
def test_sparse_addition(A):
assert (A + A)._is_array, "Expected array, got matrix"
@parametrize_sparrays
def test_elementwise_mul(A):
assert np.all((A * A).todense() == A.power(2).todense())
@parametrize_sparrays
def test_elementwise_rmul(A):
with pytest.raises(TypeError):
None * A
with pytest.raises(ValueError):
np.eye(3) * scipy.sparse.csr_array(np.arange(6).reshape(2, 3))
assert np.all((2 * A) == (A.todense() * 2))
assert np.all((A.todense() * A) == (A.todense() ** 2))
@parametrize_sparrays
def test_matmul(A):
assert np.all((A @ A.T).todense() == A.dot(A.T).todense())
@parametrize_square_sparrays
def test_pow(B):
assert (B**0)._is_array, "Expected array, got matrix"
assert (B**2)._is_array, "Expected array, got matrix"
@parametrize_sparrays
def test_sparse_divide(A):
assert isinstance(A / A, np.ndarray)
@parametrize_sparrays
def test_dense_divide(A):
assert (A / 2)._is_array, "Expected array, got matrix"
@parametrize_sparrays
def test_no_A_attr(A):
with pytest.warns(np.VisibleDeprecationWarning):
A.A
@parametrize_sparrays
def test_no_H_attr(A):
with pytest.warns(np.VisibleDeprecationWarning):
A.H
@parametrize_sparrays
def test_getrow_getcol(A):
assert A.getcol(0)._is_array
assert A.getrow(0)._is_array
@parametrize_sparrays
def test_docstr(A):
if A.__doc__ is None:
return
docstr = A.__doc__.lower()
for phrase in ('matrix', 'matrices'):
assert phrase not in docstr
# -- linalg --
@parametrize_sparrays
def test_as_linearoperator(A):
L = spla.aslinearoperator(A)
npt.assert_allclose(L * [1, 2, 3, 4], A @ [1, 2, 3, 4])
@parametrize_square_sparrays
def test_inv(B):
if B.__class__.__name__[:3] != 'csc':
return
C = spla.inv(B)
assert C._is_array
npt.assert_allclose(C.todense(), np.linalg.inv(B.todense()))
@parametrize_square_sparrays
def test_expm(B):
if B.__class__.__name__[:3] != 'csc':
return
Bmat = scipy.sparse.csc_matrix(B)
C = spla.expm(B)
assert C._is_array
npt.assert_allclose(
C.todense(),
spla.expm(Bmat).todense()
)
@parametrize_square_sparrays
def test_expm_multiply(B):
if B.__class__.__name__[:3] != 'csc':
return
npt.assert_allclose(
spla.expm_multiply(B, np.array([1, 2])),
spla.expm(B) @ [1, 2]
)
@parametrize_sparrays
def test_norm(A):
C = spla.norm(A)
npt.assert_allclose(C, np.linalg.norm(A.todense()))
@parametrize_square_sparrays
def test_onenormest(B):
C = spla.onenormest(B)
npt.assert_allclose(C, np.linalg.norm(B.todense(), 1))
@parametrize_square_sparrays
def test_spsolve(B):
if B.__class__.__name__[:3] not in ('csc', 'csr'):
return
npt.assert_allclose(
spla.spsolve(B, [1, 2]),
np.linalg.solve(B.todense(), [1, 2])
)
def test_spsolve_triangular():
X = scipy.sparse.csr_array([
[1, 0, 0, 0],
[2, 1, 0, 0],
[3, 2, 1, 0],
[4, 3, 2, 1],
])
spla.spsolve_triangular(X, [1, 2, 3, 4])
@parametrize_square_sparrays
def test_factorized(B):
if B.__class__.__name__[:3] != 'csc':
return
LU = spla.factorized(B)
npt.assert_allclose(
LU(np.array([1, 2])),
np.linalg.solve(B.todense(), [1, 2])
)
@parametrize_square_sparrays
@pytest.mark.parametrize(
"solver",
["bicg", "bicgstab", "cg", "cgs", "gmres", "lgmres", "minres", "qmr",
"gcrotmk", "tfqmr"]
)
def test_solvers(B, solver):
if solver == "minres":
kwargs = {}
else:
kwargs = {'atol': 1e-5}
x, info = getattr(spla, solver)(B, np.array([1, 2]), **kwargs)
assert info >= 0 # no errors, even if perhaps did not converge fully
npt.assert_allclose(x, [1, 1], atol=1e-1)
@parametrize_sparrays
@pytest.mark.parametrize(
"solver",
["lsqr", "lsmr"]
)
def test_lstsqr(A, solver):
x, *_ = getattr(spla, solver)(A, [1, 2, 3])
npt.assert_allclose(A @ x, [1, 2, 3])
@parametrize_eig_sparrays
def test_eigs(X):
e, v = spla.eigs(X, k=1)
npt.assert_allclose(
X @ v,
e[0] * v
)
@parametrize_eig_sparrays
def test_eigsh(X):
X = X + X.T
e, v = spla.eigsh(X, k=1)
npt.assert_allclose(
X @ v,
e[0] * v
)
@parametrize_eig_sparrays
def test_svds(X):
u, s, vh = spla.svds(X, k=3)
u2, s2, vh2 = np.linalg.svd(X.todense())
s = np.sort(s)
s2 = np.sort(s2[:3])
npt.assert_allclose(s, s2, atol=1e-3)
def test_splu():
X = scipy.sparse.csc_array([
[1, 0, 0, 0],
[2, 1, 0, 0],
[3, 2, 1, 0],
[4, 3, 2, 1],
])
LU = spla.splu(X)
npt.assert_allclose(LU.solve(np.array([1, 2, 3, 4])), [1, 0, 0, 0])
def test_spilu():
X = scipy.sparse.csc_array([
[1, 0, 0, 0],
[2, 1, 0, 0],
[3, 2, 1, 0],
[4, 3, 2, 1],
])
LU = spla.spilu(X)
npt.assert_allclose(LU.solve(np.array([1, 2, 3, 4])), [1, 0, 0, 0])
@parametrize_sparrays
def test_power_operator(A):
# https://github.com/scipy/scipy/issues/15948
npt.assert_equal((A**2).todense(), (A.todense())**2)
| bsd-3-clause | adca84dbb17b08ce332747a56f71cf80 | 21.064897 | 78 | 0.590909 | 2.834407 | false | true | false | false |
scipy/scipy | scipy/optimize/_trustregion_constr/minimize_trustregion_constr.py | 5 | 24890 | import time
import numpy as np
from scipy.sparse.linalg import LinearOperator
from .._differentiable_functions import VectorFunction
from .._constraints import (
NonlinearConstraint, LinearConstraint, PreparedConstraint, strict_bounds)
from .._hessian_update_strategy import BFGS
from .._optimize import OptimizeResult
from .._differentiable_functions import ScalarFunction
from .equality_constrained_sqp import equality_constrained_sqp
from .canonical_constraint import (CanonicalConstraint,
initial_constraints_as_canonical)
from .tr_interior_point import tr_interior_point
from .report import BasicReport, SQPReport, IPReport
TERMINATION_MESSAGES = {
0: "The maximum number of function evaluations is exceeded.",
1: "`gtol` termination condition is satisfied.",
2: "`xtol` termination condition is satisfied.",
3: "`callback` function requested termination."
}
class HessianLinearOperator:
"""Build LinearOperator from hessp"""
def __init__(self, hessp, n):
self.hessp = hessp
self.n = n
def __call__(self, x, *args):
def matvec(p):
return self.hessp(x, p, *args)
return LinearOperator((self.n, self.n), matvec=matvec)
class LagrangianHessian:
"""The Hessian of the Lagrangian as LinearOperator.
The Lagrangian is computed as the objective function plus all the
constraints multiplied with some numbers (Lagrange multipliers).
"""
def __init__(self, n, objective_hess, constraints_hess):
self.n = n
self.objective_hess = objective_hess
self.constraints_hess = constraints_hess
def __call__(self, x, v_eq=np.empty(0), v_ineq=np.empty(0)):
H_objective = self.objective_hess(x)
H_constraints = self.constraints_hess(x, v_eq, v_ineq)
def matvec(p):
return H_objective.dot(p) + H_constraints.dot(p)
return LinearOperator((self.n, self.n), matvec)
def update_state_sqp(state, x, last_iteration_failed, objective, prepared_constraints,
start_time, tr_radius, constr_penalty, cg_info):
state.nit += 1
state.nfev = objective.nfev
state.njev = objective.ngev
state.nhev = objective.nhev
state.constr_nfev = [c.fun.nfev if isinstance(c.fun, VectorFunction) else 0
for c in prepared_constraints]
state.constr_njev = [c.fun.njev if isinstance(c.fun, VectorFunction) else 0
for c in prepared_constraints]
state.constr_nhev = [c.fun.nhev if isinstance(c.fun, VectorFunction) else 0
for c in prepared_constraints]
if not last_iteration_failed:
state.x = x
state.fun = objective.f
state.grad = objective.g
state.v = [c.fun.v for c in prepared_constraints]
state.constr = [c.fun.f for c in prepared_constraints]
state.jac = [c.fun.J for c in prepared_constraints]
# Compute Lagrangian Gradient
state.lagrangian_grad = np.copy(state.grad)
for c in prepared_constraints:
state.lagrangian_grad += c.fun.J.T.dot(c.fun.v)
state.optimality = np.linalg.norm(state.lagrangian_grad, np.inf)
# Compute maximum constraint violation
state.constr_violation = 0
for i in range(len(prepared_constraints)):
lb, ub = prepared_constraints[i].bounds
c = state.constr[i]
state.constr_violation = np.max([state.constr_violation,
np.max(lb - c),
np.max(c - ub)])
state.execution_time = time.time() - start_time
state.tr_radius = tr_radius
state.constr_penalty = constr_penalty
state.cg_niter += cg_info["niter"]
state.cg_stop_cond = cg_info["stop_cond"]
return state
def update_state_ip(state, x, last_iteration_failed, objective,
prepared_constraints, start_time,
tr_radius, constr_penalty, cg_info,
barrier_parameter, barrier_tolerance):
state = update_state_sqp(state, x, last_iteration_failed, objective,
prepared_constraints, start_time, tr_radius,
constr_penalty, cg_info)
state.barrier_parameter = barrier_parameter
state.barrier_tolerance = barrier_tolerance
return state
def _minimize_trustregion_constr(fun, x0, args, grad,
hess, hessp, bounds, constraints,
xtol=1e-8, gtol=1e-8,
barrier_tol=1e-8,
sparse_jacobian=None,
callback=None, maxiter=1000,
verbose=0, finite_diff_rel_step=None,
initial_constr_penalty=1.0, initial_tr_radius=1.0,
initial_barrier_parameter=0.1,
initial_barrier_tolerance=0.1,
factorization_method=None,
disp=False):
"""Minimize a scalar function subject to constraints.
Parameters
----------
gtol : float, optional
Tolerance for termination by the norm of the Lagrangian gradient.
The algorithm will terminate when both the infinity norm (i.e., max
abs value) of the Lagrangian gradient and the constraint violation
are smaller than ``gtol``. Default is 1e-8.
xtol : float, optional
Tolerance for termination by the change of the independent variable.
The algorithm will terminate when ``tr_radius < xtol``, where
``tr_radius`` is the radius of the trust region used in the algorithm.
Default is 1e-8.
barrier_tol : float, optional
Threshold on the barrier parameter for the algorithm termination.
When inequality constraints are present, the algorithm will terminate
only when the barrier parameter is less than `barrier_tol`.
Default is 1e-8.
sparse_jacobian : {bool, None}, optional
Determines how to represent Jacobians of the constraints. If bool,
then Jacobians of all the constraints will be converted to the
corresponding format. If None (default), then Jacobians won't be
converted, but the algorithm can proceed only if they all have the
same format.
initial_tr_radius: float, optional
Initial trust radius. The trust radius gives the maximum distance
between solution points in consecutive iterations. It reflects the
trust the algorithm puts in the local approximation of the optimization
problem. For an accurate local approximation the trust-region should be
large and for an approximation valid only close to the current point it
should be a small one. The trust radius is automatically updated throughout
the optimization process, with ``initial_tr_radius`` being its initial value.
Default is 1 (recommended in [1]_, p. 19).
initial_constr_penalty : float, optional
Initial constraints penalty parameter. The penalty parameter is used for
balancing the requirements of decreasing the objective function
and satisfying the constraints. It is used for defining the merit function:
``merit_function(x) = fun(x) + constr_penalty * constr_norm_l2(x)``,
where ``constr_norm_l2(x)`` is the l2 norm of a vector containing all
the constraints. The merit function is used for accepting or rejecting
trial points and ``constr_penalty`` weights the two conflicting goals
of reducing objective function and constraints. The penalty is automatically
updated throughout the optimization process, with
``initial_constr_penalty`` being its initial value. Default is 1
(recommended in [1]_, p 19).
initial_barrier_parameter, initial_barrier_tolerance: float, optional
Initial barrier parameter and initial tolerance for the barrier subproblem.
Both are used only when inequality constraints are present. For dealing with
optimization problems ``min_x f(x)`` subject to inequality constraints
``c(x) <= 0`` the algorithm introduces slack variables, solving the problem
``min_(x,s) f(x) + barrier_parameter*sum(ln(s))`` subject to the equality
constraints ``c(x) + s = 0`` instead of the original problem. This subproblem
is solved for decreasing values of ``barrier_parameter`` and with decreasing
tolerances for the termination, starting with ``initial_barrier_parameter``
for the barrier parameter and ``initial_barrier_tolerance`` for the
barrier tolerance. Default is 0.1 for both values (recommended in [1]_ p. 19).
Also note that ``barrier_parameter`` and ``barrier_tolerance`` are updated
with the same prefactor.
factorization_method : string or None, optional
Method to factorize the Jacobian of the constraints. Use None (default)
for the auto selection or one of:
- 'NormalEquation' (requires scikit-sparse)
- 'AugmentedSystem'
- 'QRFactorization'
- 'SVDFactorization'
The methods 'NormalEquation' and 'AugmentedSystem' can be used only
with sparse constraints. The projections required by the algorithm
will be computed using, respectively, the normal equation and the
augmented system approaches explained in [1]_. 'NormalEquation'
computes the Cholesky factorization of ``A A.T`` and 'AugmentedSystem'
performs the LU factorization of an augmented system. They usually
provide similar results. 'AugmentedSystem' is used by default for
sparse matrices.
The methods 'QRFactorization' and 'SVDFactorization' can be used
only with dense constraints. They compute the required projections
using, respectively, QR and SVD factorizations. The 'SVDFactorization'
method can cope with Jacobian matrices with deficient row rank and will
be used whenever other factorization methods fail (which may imply the
conversion of sparse matrices to a dense format when required).
By default, 'QRFactorization' is used for dense matrices.
finite_diff_rel_step : None or array_like, optional
Relative step size for the finite difference approximation.
maxiter : int, optional
Maximum number of algorithm iterations. Default is 1000.
verbose : {0, 1, 2}, optional
Level of algorithm's verbosity:
* 0 (default) : work silently.
* 1 : display a termination report.
* 2 : display progress during iterations.
* 3 : display progress during iterations (more complete report).
disp : bool, optional
If True (default), then `verbose` will be set to 1 if it was 0.
Returns
-------
`OptimizeResult` with the fields documented below. Note the following:
1. All values corresponding to the constraints are ordered as they
were passed to the solver. And values corresponding to `bounds`
constraints are put *after* other constraints.
2. All numbers of function, Jacobian or Hessian evaluations correspond
to numbers of actual Python function calls. It means, for example,
that if a Jacobian is estimated by finite differences, then the
number of Jacobian evaluations will be zero and the number of
function evaluations will be incremented by all calls during the
finite difference estimation.
x : ndarray, shape (n,)
Solution found.
optimality : float
Infinity norm of the Lagrangian gradient at the solution.
constr_violation : float
Maximum constraint violation at the solution.
fun : float
Objective function at the solution.
grad : ndarray, shape (n,)
Gradient of the objective function at the solution.
lagrangian_grad : ndarray, shape (n,)
Gradient of the Lagrangian function at the solution.
nit : int
Total number of iterations.
nfev : integer
Number of the objective function evaluations.
njev : integer
Number of the objective function gradient evaluations.
nhev : integer
Number of the objective function Hessian evaluations.
cg_niter : int
Total number of the conjugate gradient method iterations.
method : {'equality_constrained_sqp', 'tr_interior_point'}
Optimization method used.
constr : list of ndarray
List of constraint values at the solution.
jac : list of {ndarray, sparse matrix}
List of the Jacobian matrices of the constraints at the solution.
v : list of ndarray
List of the Lagrange multipliers for the constraints at the solution.
For an inequality constraint a positive multiplier means that the upper
bound is active, a negative multiplier means that the lower bound is
active and if a multiplier is zero it means the constraint is not
active.
constr_nfev : list of int
Number of constraint evaluations for each of the constraints.
constr_njev : list of int
Number of Jacobian matrix evaluations for each of the constraints.
constr_nhev : list of int
Number of Hessian evaluations for each of the constraints.
tr_radius : float
Radius of the trust region at the last iteration.
constr_penalty : float
Penalty parameter at the last iteration, see `initial_constr_penalty`.
barrier_tolerance : float
Tolerance for the barrier subproblem at the last iteration.
Only for problems with inequality constraints.
barrier_parameter : float
Barrier parameter at the last iteration. Only for problems
with inequality constraints.
execution_time : float
Total execution time.
message : str
Termination message.
status : {0, 1, 2, 3}
Termination status:
* 0 : The maximum number of function evaluations is exceeded.
* 1 : `gtol` termination condition is satisfied.
* 2 : `xtol` termination condition is satisfied.
* 3 : `callback` function requested termination.
cg_stop_cond : int
Reason for CG subproblem termination at the last iteration:
* 0 : CG subproblem not evaluated.
* 1 : Iteration limit was reached.
* 2 : Reached the trust-region boundary.
* 3 : Negative curvature detected.
* 4 : Tolerance was satisfied.
References
----------
.. [1] Conn, A. R., Gould, N. I., & Toint, P. L.
Trust region methods. 2000. Siam. pp. 19.
"""
x0 = np.atleast_1d(x0).astype(float)
n_vars = np.size(x0)
if hess is None:
if callable(hessp):
hess = HessianLinearOperator(hessp, n_vars)
else:
hess = BFGS()
if disp and verbose == 0:
verbose = 1
if bounds is not None:
finite_diff_bounds = strict_bounds(bounds.lb, bounds.ub,
bounds.keep_feasible, n_vars)
else:
finite_diff_bounds = (-np.inf, np.inf)
# Define Objective Function
objective = ScalarFunction(fun, x0, args, grad, hess,
finite_diff_rel_step, finite_diff_bounds)
# Put constraints in list format when needed.
if isinstance(constraints, (NonlinearConstraint, LinearConstraint)):
constraints = [constraints]
# Prepare constraints.
prepared_constraints = [
PreparedConstraint(c, x0, sparse_jacobian, finite_diff_bounds)
for c in constraints]
# Check that all constraints are either sparse or dense.
n_sparse = sum(c.fun.sparse_jacobian for c in prepared_constraints)
if 0 < n_sparse < len(prepared_constraints):
raise ValueError("All constraints must have the same kind of the "
"Jacobian --- either all sparse or all dense. "
"You can set the sparsity globally by setting "
"`sparse_jacobian` to either True of False.")
if prepared_constraints:
sparse_jacobian = n_sparse > 0
if bounds is not None:
if sparse_jacobian is None:
sparse_jacobian = True
prepared_constraints.append(PreparedConstraint(bounds, x0,
sparse_jacobian))
# Concatenate initial constraints to the canonical form.
c_eq0, c_ineq0, J_eq0, J_ineq0 = initial_constraints_as_canonical(
n_vars, prepared_constraints, sparse_jacobian)
# Prepare all canonical constraints and concatenate it into one.
canonical_all = [CanonicalConstraint.from_PreparedConstraint(c)
for c in prepared_constraints]
if len(canonical_all) == 0:
canonical = CanonicalConstraint.empty(n_vars)
elif len(canonical_all) == 1:
canonical = canonical_all[0]
else:
canonical = CanonicalConstraint.concatenate(canonical_all,
sparse_jacobian)
# Generate the Hessian of the Lagrangian.
lagrangian_hess = LagrangianHessian(n_vars, objective.hess, canonical.hess)
# Choose appropriate method
if canonical.n_ineq == 0:
method = 'equality_constrained_sqp'
else:
method = 'tr_interior_point'
# Construct OptimizeResult
state = OptimizeResult(
nit=0, nfev=0, njev=0, nhev=0,
cg_niter=0, cg_stop_cond=0,
fun=objective.f, grad=objective.g,
lagrangian_grad=np.copy(objective.g),
constr=[c.fun.f for c in prepared_constraints],
jac=[c.fun.J for c in prepared_constraints],
constr_nfev=[0 for c in prepared_constraints],
constr_njev=[0 for c in prepared_constraints],
constr_nhev=[0 for c in prepared_constraints],
v=[c.fun.v for c in prepared_constraints],
method=method)
# Start counting
start_time = time.time()
# Define stop criteria
if method == 'equality_constrained_sqp':
def stop_criteria(state, x, last_iteration_failed,
optimality, constr_violation,
tr_radius, constr_penalty, cg_info):
state = update_state_sqp(state, x, last_iteration_failed,
objective, prepared_constraints,
start_time, tr_radius, constr_penalty,
cg_info)
if verbose == 2:
BasicReport.print_iteration(state.nit,
state.nfev,
state.cg_niter,
state.fun,
state.tr_radius,
state.optimality,
state.constr_violation)
elif verbose > 2:
SQPReport.print_iteration(state.nit,
state.nfev,
state.cg_niter,
state.fun,
state.tr_radius,
state.optimality,
state.constr_violation,
state.constr_penalty,
state.cg_stop_cond)
state.status = None
state.niter = state.nit # Alias for callback (backward-compatibility)
if callback is not None and callback(np.copy(state.x), state):
state.status = 3
elif state.optimality < gtol and state.constr_violation < gtol:
state.status = 1
elif state.tr_radius < xtol:
state.status = 2
elif state.nit >= maxiter:
state.status = 0
return state.status in (0, 1, 2, 3)
elif method == 'tr_interior_point':
def stop_criteria(state, x, last_iteration_failed, tr_radius,
constr_penalty, cg_info, barrier_parameter,
barrier_tolerance):
state = update_state_ip(state, x, last_iteration_failed,
objective, prepared_constraints,
start_time, tr_radius, constr_penalty,
cg_info, barrier_parameter, barrier_tolerance)
if verbose == 2:
BasicReport.print_iteration(state.nit,
state.nfev,
state.cg_niter,
state.fun,
state.tr_radius,
state.optimality,
state.constr_violation)
elif verbose > 2:
IPReport.print_iteration(state.nit,
state.nfev,
state.cg_niter,
state.fun,
state.tr_radius,
state.optimality,
state.constr_violation,
state.constr_penalty,
state.barrier_parameter,
state.cg_stop_cond)
state.status = None
state.niter = state.nit # Alias for callback (backward compatibility)
if callback is not None and callback(np.copy(state.x), state):
state.status = 3
elif state.optimality < gtol and state.constr_violation < gtol:
state.status = 1
elif (state.tr_radius < xtol
and state.barrier_parameter < barrier_tol):
state.status = 2
elif state.nit >= maxiter:
state.status = 0
return state.status in (0, 1, 2, 3)
if verbose == 2:
BasicReport.print_header()
elif verbose > 2:
if method == 'equality_constrained_sqp':
SQPReport.print_header()
elif method == 'tr_interior_point':
IPReport.print_header()
# Call inferior function to do the optimization
if method == 'equality_constrained_sqp':
def fun_and_constr(x):
f = objective.fun(x)
c_eq, _ = canonical.fun(x)
return f, c_eq
def grad_and_jac(x):
g = objective.grad(x)
J_eq, _ = canonical.jac(x)
return g, J_eq
_, result = equality_constrained_sqp(
fun_and_constr, grad_and_jac, lagrangian_hess,
x0, objective.f, objective.g,
c_eq0, J_eq0,
stop_criteria, state,
initial_constr_penalty, initial_tr_radius,
factorization_method)
elif method == 'tr_interior_point':
_, result = tr_interior_point(
objective.fun, objective.grad, lagrangian_hess,
n_vars, canonical.n_ineq, canonical.n_eq,
canonical.fun, canonical.jac,
x0, objective.f, objective.g,
c_ineq0, J_ineq0, c_eq0, J_eq0,
stop_criteria,
canonical.keep_feasible,
xtol, state, initial_barrier_parameter,
initial_barrier_tolerance,
initial_constr_penalty, initial_tr_radius,
factorization_method)
# Status 3 occurs when the callback function requests termination,
# this is assumed to not be a success.
result.success = True if result.status in (1, 2) else False
result.message = TERMINATION_MESSAGES[result.status]
# Alias (for backward compatibility with 1.1.0)
result.niter = result.nit
if verbose == 2:
BasicReport.print_footer()
elif verbose > 2:
if method == 'equality_constrained_sqp':
SQPReport.print_footer()
elif method == 'tr_interior_point':
IPReport.print_footer()
if verbose >= 1:
print(result.message)
print("Number of iterations: {}, function evaluations: {}, "
"CG iterations: {}, optimality: {:.2e}, "
"constraint violation: {:.2e}, execution time: {:4.2} s."
.format(result.nit, result.nfev, result.cg_niter,
result.optimality, result.constr_violation,
result.execution_time))
return result
| bsd-3-clause | 1775531a513f3bafb362bad6bb1a860a | 44.669725 | 86 | 0.595661 | 4.435929 | false | false | false | false |
scipy/scipy | scipy/special/_generate_pyx.py | 10 | 52332 | """
python _generate_pyx.py
Generate Ufunc definition source files for scipy.special. Produces
files '_ufuncs.c' and '_ufuncs_cxx.c' by first producing Cython.
This will generate both calls to PyUFunc_FromFuncAndData and the
required ufunc inner loops.
The functions signatures are contained in 'functions.json', the syntax
for a function signature is
<function>: <name> ':' <input> '*' <output>
'->' <retval> '*' <ignored_retval>
<input>: <typecode>*
<output>: <typecode>*
<retval>: <typecode>?
<ignored_retval>: <typecode>?
<headers>: <header_name> [',' <header_name>]*
The input parameter types are denoted by single character type
codes, according to
'f': 'float'
'd': 'double'
'g': 'long double'
'F': 'float complex'
'D': 'double complex'
'G': 'long double complex'
'i': 'int'
'l': 'long'
'v': 'void'
If multiple kernel functions are given for a single ufunc, the one
which is used is determined by the standard ufunc mechanism. Kernel
functions that are listed first are also matched first against the
ufunc input types, so functions listed earlier take precedence.
In addition, versions with casted variables, such as d->f,D->F and
i->d are automatically generated.
There should be either a single header that contains all of the kernel
functions listed, or there should be one header for each kernel
function. Cython pxd files are allowed in addition to .h files.
Cython functions may use fused types, but the names in the list
should be the specialized ones, such as 'somefunc[float]'.
Function coming from C++ should have ``++`` appended to the name of
the header.
Floating-point exceptions inside these Ufuncs are converted to
special function errors --- which are separately controlled by the
user, and off by default, as they are usually not especially useful
for the user.
The C++ module
--------------
In addition to ``_ufuncs`` module, a second module ``_ufuncs_cxx`` is
generated. This module only exports function pointers that are to be
used when constructing some of the ufuncs in ``_ufuncs``. The function
pointers are exported via Cython's standard mechanism.
This mainly avoids build issues --- Python distutils has no way to
figure out what to do if you want to link both C++ and Fortran code in
the same shared library.
"""
# -----------------------------------------------------------------------------
# Extra code
# -----------------------------------------------------------------------------
UFUNCS_EXTRA_CODE_COMMON = """\
# This file is automatically generated by _generate_pyx.py.
# Do not edit manually!
from libc.math cimport NAN
include "_ufuncs_extra_code_common.pxi"
"""
UFUNCS_EXTRA_CODE = """\
include "_ufuncs_extra_code.pxi"
"""
UFUNCS_EXTRA_CODE_BOTTOM = """\
#
# Aliases
#
jn = jv
"""
CYTHON_SPECIAL_PXD = """\
# This file is automatically generated by _generate_pyx.py.
# Do not edit manually!
ctypedef fused number_t:
double complex
double
cpdef number_t spherical_jn(long n, number_t z, bint derivative=*) nogil
cpdef number_t spherical_yn(long n, number_t z, bint derivative=*) nogil
cpdef number_t spherical_in(long n, number_t z, bint derivative=*) nogil
cpdef number_t spherical_kn(long n, number_t z, bint derivative=*) nogil
"""
CYTHON_SPECIAL_PYX = """\
# This file is automatically generated by _generate_pyx.py.
# Do not edit manually!
\"\"\"
.. highlight:: cython
Cython API for special functions
================================
Scalar, typed versions of many of the functions in ``scipy.special``
can be accessed directly from Cython; the complete list is given
below. Functions are overloaded using Cython fused types so their
names match their Python counterpart. The module follows the following
conventions:
- If a function's Python counterpart returns multiple values, then the
function returns its outputs via pointers in the final arguments.
- If a function's Python counterpart returns a single value, then the
function's output is returned directly.
The module is usable from Cython via::
cimport scipy.special.cython_special
Error handling
--------------
Functions can indicate an error by returning ``nan``; however they
cannot emit warnings like their counterparts in ``scipy.special``.
Available functions
-------------------
FUNCLIST
Custom functions
----------------
Some functions in ``scipy.special`` which are not ufuncs have custom
Cython wrappers.
Spherical Bessel functions
~~~~~~~~~~~~~~~~~~~~~~~~~~
The optional ``derivative`` boolean argument is replaced with an
optional Cython ``bint``, leading to the following signatures.
- :py:func:`~scipy.special.spherical_jn`::
double complex spherical_jn(long, double complex)
double complex spherical_jn(long, double complex, bint)
double spherical_jn(long, double)
double spherical_jn(long, double, bint)
- :py:func:`~scipy.special.spherical_yn`::
double complex spherical_yn(long, double complex)
double complex spherical_yn(long, double complex, bint)
double spherical_yn(long, double)
double spherical_yn(long, double, bint)
- :py:func:`~scipy.special.spherical_in`::
double complex spherical_in(long, double complex)
double complex spherical_in(long, double complex, bint)
double spherical_in(long, double)
double spherical_in(long, double, bint)
- :py:func:`~scipy.special.spherical_kn`::
double complex spherical_kn(long, double complex)
double complex spherical_kn(long, double complex, bint)
double spherical_kn(long, double)
double spherical_kn(long, double, bint)
\"\"\"
from libc.math cimport NAN
include "_cython_special.pxi"
include "_cython_special_custom.pxi"
"""
STUBS = """\
# This file is automatically generated by _generate_pyx.py.
# Do not edit manually!
from typing import Any, Dict
import numpy as np
__all__ = [
'geterr',
'seterr',
'errstate',
{ALL}
]
def geterr() -> Dict[str, str]: ...
def seterr(**kwargs: str) -> Dict[str, str]: ...
class errstate:
def __init__(self, **kargs: str) -> None: ...
def __enter__(self) -> None: ...
def __exit__(
self,
exc_type: Any, # Unused
exc_value: Any, # Unused
traceback: Any, # Unused
) -> None: ...
{STUBS}
"""
# -----------------------------------------------------------------------------
# Code generation
# -----------------------------------------------------------------------------
import itertools
import json
import os
from stat import ST_MTIME
import argparse
import re
import textwrap
from typing import List
import numpy
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
add_newdocs = __import__('_add_newdocs')
CY_TYPES = {
'f': 'float',
'd': 'double',
'g': 'long double',
'F': 'float complex',
'D': 'double complex',
'G': 'long double complex',
'i': 'int',
'l': 'long',
'v': 'void',
}
C_TYPES = {
'f': 'npy_float',
'd': 'npy_double',
'g': 'npy_longdouble',
'F': 'npy_cfloat',
'D': 'npy_cdouble',
'G': 'npy_clongdouble',
'i': 'npy_int',
'l': 'npy_long',
'v': 'void',
}
TYPE_NAMES = {
'f': 'NPY_FLOAT',
'd': 'NPY_DOUBLE',
'g': 'NPY_LONGDOUBLE',
'F': 'NPY_CFLOAT',
'D': 'NPY_CDOUBLE',
'G': 'NPY_CLONGDOUBLE',
'i': 'NPY_INT',
'l': 'NPY_LONG',
}
CYTHON_SPECIAL_BENCHFUNCS = {
'airy': ['d*dddd', 'D*DDDD'],
'beta': ['dd'],
'erf': ['d', 'D'],
'exprel': ['d'],
'gamma': ['d', 'D'],
'jv': ['dd', 'dD'],
'loggamma': ['D'],
'logit': ['d'],
'psi': ['d', 'D'],
}
def underscore(arg):
return arg.replace(" ", "_")
def cast_order(c):
return ['ilfdgFDG'.index(x) for x in c]
# These downcasts will cause the function to return NaNs, unless the
# values happen to coincide exactly.
DANGEROUS_DOWNCAST = set([
('F', 'i'), ('F', 'l'), ('F', 'f'), ('F', 'd'), ('F', 'g'),
('D', 'i'), ('D', 'l'), ('D', 'f'), ('D', 'd'), ('D', 'g'),
('G', 'i'), ('G', 'l'), ('G', 'f'), ('G', 'd'), ('G', 'g'),
('f', 'i'), ('f', 'l'),
('d', 'i'), ('d', 'l'),
('g', 'i'), ('g', 'l'),
('l', 'i'),
])
NAN_VALUE = {
'f': 'NAN',
'd': 'NAN',
'g': 'NAN',
'F': 'NAN',
'D': 'NAN',
'G': 'NAN',
'i': '0xbad0bad0',
'l': '0xbad0bad0',
}
def generate_loop(func_inputs, func_outputs, func_retval,
ufunc_inputs, ufunc_outputs):
"""
Generate a UFunc loop function that calls a function given as its
data parameter with the specified input and output arguments and
return value.
This function can be passed to PyUFunc_FromFuncAndData.
Parameters
----------
func_inputs, func_outputs, func_retval : str
Signature of the function to call, given as type codes of the
input, output and return value arguments. These 1-character
codes are given according to the CY_TYPES and TYPE_NAMES
lists above.
The corresponding C function signature to be called is:
retval func(intype1 iv1, intype2 iv2, ..., outtype1 *ov1, ...);
If len(ufunc_outputs) == len(func_outputs)+1, the return value
is treated as the first output argument. Otherwise, the return
value is ignored.
ufunc_inputs, ufunc_outputs : str
Ufunc input and output signature.
This does not have to exactly match the function signature,
as long as the type casts work out on the C level.
Returns
-------
loop_name
Name of the generated loop function.
loop_body
Generated C code for the loop.
"""
if len(func_inputs) != len(ufunc_inputs):
raise ValueError("Function and ufunc have different number of inputs")
if len(func_outputs) != len(ufunc_outputs) and not (
func_retval != "v" and len(func_outputs)+1 == len(ufunc_outputs)):
raise ValueError("Function retval and ufunc outputs don't match")
name = "loop_%s_%s_%s_As_%s_%s" % (
func_retval, func_inputs, func_outputs, ufunc_inputs, ufunc_outputs
)
body = "cdef void %s(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) nogil:\n" % name
body += " cdef np.npy_intp i, n = dims[0]\n"
body += " cdef void *func = (<void**>data)[0]\n"
body += " cdef char *func_name = <char*>(<void**>data)[1]\n"
for j in range(len(ufunc_inputs)):
body += " cdef char *ip%d = args[%d]\n" % (j, j)
for j in range(len(ufunc_outputs)):
body += " cdef char *op%d = args[%d]\n" % (j, j + len(ufunc_inputs))
ftypes = []
fvars = []
outtypecodes = []
for j in range(len(func_inputs)):
ftypes.append(CY_TYPES[func_inputs[j]])
fvars.append("<%s>(<%s*>ip%d)[0]" % (
CY_TYPES[func_inputs[j]],
CY_TYPES[ufunc_inputs[j]], j))
if len(func_outputs)+1 == len(ufunc_outputs):
func_joff = 1
outtypecodes.append(func_retval)
body += " cdef %s ov0\n" % (CY_TYPES[func_retval],)
else:
func_joff = 0
for j, outtype in enumerate(func_outputs):
body += " cdef %s ov%d\n" % (CY_TYPES[outtype], j+func_joff)
ftypes.append("%s *" % CY_TYPES[outtype])
fvars.append("&ov%d" % (j+func_joff))
outtypecodes.append(outtype)
body += " for i in range(n):\n"
if len(func_outputs)+1 == len(ufunc_outputs):
rv = "ov0 = "
else:
rv = ""
funcall = " %s(<%s(*)(%s) nogil>func)(%s)\n" % (
rv, CY_TYPES[func_retval], ", ".join(ftypes), ", ".join(fvars))
# Cast-check inputs and call function
input_checks = []
for j in range(len(func_inputs)):
if (ufunc_inputs[j], func_inputs[j]) in DANGEROUS_DOWNCAST:
chk = "<%s>(<%s*>ip%d)[0] == (<%s*>ip%d)[0]" % (
CY_TYPES[func_inputs[j]], CY_TYPES[ufunc_inputs[j]], j,
CY_TYPES[ufunc_inputs[j]], j)
input_checks.append(chk)
if input_checks:
body += " if %s:\n" % (" and ".join(input_checks))
body += " " + funcall
body += " else:\n"
body += " sf_error.error(func_name, sf_error.DOMAIN, \"invalid input argument\")\n"
for j, outtype in enumerate(outtypecodes):
body += " ov%d = <%s>%s\n" % (
j, CY_TYPES[outtype], NAN_VALUE[outtype])
else:
body += funcall
# Assign and cast-check output values
for j, (outtype, fouttype) in enumerate(zip(ufunc_outputs, outtypecodes)):
if (fouttype, outtype) in DANGEROUS_DOWNCAST:
body += " if ov%d == <%s>ov%d:\n" % (j, CY_TYPES[outtype], j)
body += " (<%s *>op%d)[0] = <%s>ov%d\n" % (
CY_TYPES[outtype], j, CY_TYPES[outtype], j)
body += " else:\n"
body += " sf_error.error(func_name, sf_error.DOMAIN, \"invalid output\")\n"
body += " (<%s *>op%d)[0] = <%s>%s\n" % (
CY_TYPES[outtype], j, CY_TYPES[outtype], NAN_VALUE[outtype])
else:
body += " (<%s *>op%d)[0] = <%s>ov%d\n" % (
CY_TYPES[outtype], j, CY_TYPES[outtype], j)
for j in range(len(ufunc_inputs)):
body += " ip%d += steps[%d]\n" % (j, j)
for j in range(len(ufunc_outputs)):
body += " op%d += steps[%d]\n" % (j, j + len(ufunc_inputs))
body += " sf_error.check_fpe(func_name)\n"
return name, body
def generate_fused_type(codes):
"""
Generate name of and cython code for a fused type.
Parameters
----------
codes : str
Valid inputs to CY_TYPES (i.e. f, d, g, ...).
"""
cytypes = [CY_TYPES[x] for x in codes]
name = codes + "_number_t"
declaration = ["ctypedef fused " + name + ":"]
for cytype in cytypes:
declaration.append(" " + cytype)
declaration = "\n".join(declaration)
return name, declaration
def generate_bench(name, codes):
tab = " "*4
top, middle, end = [], [], []
tmp = codes.split("*")
if len(tmp) > 1:
incodes = tmp[0]
outcodes = tmp[1]
else:
incodes = tmp[0]
outcodes = ""
inargs, inargs_and_types = [], []
for n, code in enumerate(incodes):
arg = "x{}".format(n)
inargs.append(arg)
inargs_and_types.append("{} {}".format(CY_TYPES[code], arg))
line = "def {{}}(int N, {}):".format(", ".join(inargs_and_types))
top.append(line)
top.append(tab + "cdef int n")
outargs = []
for n, code in enumerate(outcodes):
arg = "y{}".format(n)
outargs.append("&{}".format(arg))
line = "cdef {} {}".format(CY_TYPES[code], arg)
middle.append(tab + line)
end.append(tab + "for n in range(N):")
end.append(2*tab + "{}({})")
pyfunc = "_bench_{}_{}_{}".format(name, incodes, "py")
cyfunc = "_bench_{}_{}_{}".format(name, incodes, "cy")
pytemplate = "\n".join(top + end)
cytemplate = "\n".join(top + middle + end)
pybench = pytemplate.format(pyfunc, "_ufuncs." + name, ", ".join(inargs))
cybench = cytemplate.format(cyfunc, name, ", ".join(inargs + outargs))
return pybench, cybench
def generate_doc(name, specs):
tab = " "*4
doc = ["- :py:func:`~scipy.special.{}`::\n".format(name)]
for spec in specs:
incodes, outcodes = spec.split("->")
incodes = incodes.split("*")
intypes = [CY_TYPES[x] for x in incodes[0]]
if len(incodes) > 1:
types = [f"{CY_TYPES[x]} *" for x in incodes[1]]
intypes.extend(types)
outtype = CY_TYPES[outcodes]
line = "{} {}({})".format(outtype, name, ", ".join(intypes))
doc.append(2*tab + line)
doc[-1] = "{}\n".format(doc[-1])
doc = "\n".join(doc)
return doc
def npy_cdouble_from_double_complex(var):
"""Cast a Cython double complex to a NumPy cdouble."""
res = "_complexstuff.npy_cdouble_from_double_complex({})".format(var)
return res
def double_complex_from_npy_cdouble(var):
"""Cast a NumPy cdouble to a Cython double complex."""
res = "_complexstuff.double_complex_from_npy_cdouble({})".format(var)
return res
def iter_variants(inputs, outputs):
"""
Generate variants of UFunc signatures, by changing variable types,
within the limitation that the corresponding C types casts still
work out.
This does not generate all possibilities, just the ones required
for the ufunc to work properly with the most common data types.
Parameters
----------
inputs, outputs : str
UFunc input and output signature strings
Yields
------
new_input, new_output : str
Modified input and output strings.
Also the original input/output pair is yielded.
"""
maps = [
# always use long instead of int (more common type on 64-bit)
('i', 'l'),
]
# float32-preserving signatures
if not ('i' in inputs or 'l' in inputs):
# Don't add float32 versions of ufuncs with integer arguments, as this
# can lead to incorrect dtype selection if the integer arguments are
# arrays, but float arguments are scalars.
# For instance sph_harm(0,[0],0,0).dtype == complex64
# This may be a NumPy bug, but we need to work around it.
# cf. gh-4895, https://github.com/numpy/numpy/issues/5895
maps = maps + [(a + 'dD', b + 'fF') for a, b in maps]
# do the replacements
for src, dst in maps:
new_inputs = inputs
new_outputs = outputs
for a, b in zip(src, dst):
new_inputs = new_inputs.replace(a, b)
new_outputs = new_outputs.replace(a, b)
yield new_inputs, new_outputs
class Func:
"""
Base class for Ufunc and FusedFunc.
"""
def __init__(self, name, signatures):
self.name = name
self.signatures = []
self.function_name_overrides = {}
for header in signatures.keys():
for name, sig in signatures[header].items():
inarg, outarg, ret = self._parse_signature(sig)
self.signatures.append((name, inarg, outarg, ret, header))
def _parse_signature(self, sig):
m = re.match(r"\s*([fdgFDGil]*)\s*\*\s*([fdgFDGil]*)\s*->\s*([*fdgFDGil]*)\s*$", sig)
if m:
inarg, outarg, ret = [x.strip() for x in m.groups()]
if ret.count('*') > 1:
raise ValueError("{}: Invalid signature: {}".format(self.name, sig))
return inarg, outarg, ret
m = re.match(r"\s*([fdgFDGil]*)\s*->\s*([fdgFDGil]?)\s*$", sig)
if m:
inarg, ret = [x.strip() for x in m.groups()]
return inarg, "", ret
raise ValueError("{}: Invalid signature: {}".format(self.name, sig))
def get_prototypes(self, nptypes_for_h=False):
prototypes = []
for func_name, inarg, outarg, ret, header in self.signatures:
ret = ret.replace('*', '')
c_args = ([C_TYPES[x] for x in inarg]
+ [C_TYPES[x] + ' *' for x in outarg])
cy_args = ([CY_TYPES[x] for x in inarg]
+ [CY_TYPES[x] + ' *' for x in outarg])
c_proto = "%s (*)(%s)" % (C_TYPES[ret], ", ".join(c_args))
if header.endswith("h") and nptypes_for_h:
cy_proto = c_proto + "nogil"
else:
cy_proto = "%s (*)(%s) nogil" % (CY_TYPES[ret], ", ".join(cy_args))
prototypes.append((func_name, c_proto, cy_proto, header))
return prototypes
def cython_func_name(self, c_name, specialized=False, prefix="_func_",
override=True):
# act on function name overrides
if override and c_name in self.function_name_overrides:
c_name = self.function_name_overrides[c_name]
prefix = ""
# support fused types
m = re.match(r'^(.*?)(\[.*\])$', c_name)
if m:
c_base_name, fused_part = m.groups()
else:
c_base_name, fused_part = c_name, ""
if specialized:
return "%s%s%s" % (prefix, c_base_name, fused_part.replace(' ', '_'))
else:
return "%s%s" % (prefix, c_base_name,)
class Ufunc(Func):
"""
Ufunc signature, restricted format suitable for special functions.
Parameters
----------
name
Name of the ufunc to create
signature
String of form 'func: fff*ff->f, func2: ddd->*i' describing
the C-level functions and types of their input arguments
and return values.
The syntax is 'function_name: inputparams*outputparams->output_retval*ignored_retval'
Attributes
----------
name : str
Python name for the Ufunc
signatures : list of (func_name, inarg_spec, outarg_spec, ret_spec, header_name)
List of parsed signatures
doc : str
Docstring, obtained from add_newdocs
function_name_overrides : dict of str->str
Overrides for the function names in signatures
"""
def __init__(self, name, signatures):
super().__init__(name, signatures)
self.doc = add_newdocs.get(name)
if self.doc is None:
raise ValueError("No docstring for ufunc %r" % name)
self.doc = textwrap.dedent(self.doc).strip()
def _get_signatures_and_loops(self, all_loops):
inarg_num = None
outarg_num = None
seen = set()
variants = []
def add_variant(func_name, inarg, outarg, ret, inp, outp):
if inp in seen:
return
seen.add(inp)
sig = (func_name, inp, outp)
if "v" in outp:
raise ValueError("%s: void signature %r" % (self.name, sig))
if len(inp) != inarg_num or len(outp) != outarg_num:
raise ValueError("%s: signature %r does not have %d/%d input/output args" % (
self.name, sig,
inarg_num, outarg_num))
loop_name, loop = generate_loop(inarg, outarg, ret, inp, outp)
all_loops[loop_name] = loop
variants.append((func_name, loop_name, inp, outp))
# First add base variants
for func_name, inarg, outarg, ret, header in self.signatures:
outp = re.sub(r'\*.*', '', ret) + outarg
ret = ret.replace('*', '')
if inarg_num is None:
inarg_num = len(inarg)
outarg_num = len(outp)
inp, outp = list(iter_variants(inarg, outp))[0]
add_variant(func_name, inarg, outarg, ret, inp, outp)
# Then the supplementary ones
for func_name, inarg, outarg, ret, header in self.signatures:
outp = re.sub(r'\*.*', '', ret) + outarg
ret = ret.replace('*', '')
for inp, outp in iter_variants(inarg, outp):
add_variant(func_name, inarg, outarg, ret, inp, outp)
# Then sort variants to input argument cast order
# -- the sort is stable, so functions earlier in the signature list
# are still preferred
variants.sort(key=lambda v: cast_order(v[2]))
return variants, inarg_num, outarg_num
def generate(self, all_loops):
toplevel = ""
variants, inarg_num, outarg_num = self._get_signatures_and_loops(
all_loops)
loops = []
funcs = []
types = []
for func_name, loop_name, inputs, outputs in variants:
for x in inputs:
types.append(TYPE_NAMES[x])
for x in outputs:
types.append(TYPE_NAMES[x])
loops.append(loop_name)
funcs.append(func_name)
toplevel += "cdef np.PyUFuncGenericFunction ufunc_%s_loops[%d]\n" % (self.name, len(loops))
toplevel += "cdef void *ufunc_%s_ptr[%d]\n" % (self.name, 2*len(funcs))
toplevel += "cdef void *ufunc_%s_data[%d]\n" % (self.name, len(funcs))
toplevel += "cdef char ufunc_%s_types[%d]\n" % (self.name, len(types))
toplevel += 'cdef char *ufunc_%s_doc = (\n "%s")\n' % (
self.name,
self.doc.replace("\\", "\\\\").replace('"', '\\"').replace('\n', '\\n\"\n "')
)
for j, function in enumerate(loops):
toplevel += "ufunc_%s_loops[%d] = <np.PyUFuncGenericFunction>%s\n" % (self.name, j, function)
for j, type in enumerate(types):
toplevel += "ufunc_%s_types[%d] = <char>%s\n" % (self.name, j, type)
for j, func in enumerate(funcs):
toplevel += "ufunc_%s_ptr[2*%d] = <void*>%s\n" % (self.name, j,
self.cython_func_name(func, specialized=True))
toplevel += "ufunc_%s_ptr[2*%d+1] = <void*>(<char*>\"%s\")\n" % (self.name, j,
self.name)
for j, func in enumerate(funcs):
toplevel += "ufunc_%s_data[%d] = &ufunc_%s_ptr[2*%d]\n" % (
self.name, j, self.name, j)
toplevel += ('@ = np.PyUFunc_FromFuncAndData(ufunc_@_loops, '
'ufunc_@_data, ufunc_@_types, %d, %d, %d, 0, '
'"@", ufunc_@_doc, 0)\n' % (len(types)/(inarg_num+outarg_num),
inarg_num, outarg_num)
).replace('@', self.name)
return toplevel
class FusedFunc(Func):
"""
Generate code for a fused-type special function that can be
cimported in Cython.
"""
def __init__(self, name, signatures):
super().__init__(name, signatures)
self.doc = "See the documentation for scipy.special." + self.name
# "codes" are the keys for CY_TYPES
self.incodes, self.outcodes = self._get_codes()
self.fused_types = set()
self.intypes, infused_types = self._get_types(self.incodes)
self.fused_types.update(infused_types)
self.outtypes, outfused_types = self._get_types(self.outcodes)
self.fused_types.update(outfused_types)
self.invars, self.outvars = self._get_vars()
def _get_codes(self):
inarg_num, outarg_num = None, None
all_inp, all_outp = [], []
for _, inarg, outarg, ret, _ in self.signatures:
outp = re.sub(r'\*.*', '', ret) + outarg
if inarg_num is None:
inarg_num = len(inarg)
outarg_num = len(outp)
inp, outp = list(iter_variants(inarg, outp))[0]
all_inp.append(inp)
all_outp.append(outp)
incodes = []
for n in range(inarg_num):
codes = unique([x[n] for x in all_inp])
codes.sort()
incodes.append(''.join(codes))
outcodes = []
for n in range(outarg_num):
codes = unique([x[n] for x in all_outp])
codes.sort()
outcodes.append(''.join(codes))
return tuple(incodes), tuple(outcodes)
def _get_types(self, codes):
all_types = []
fused_types = set()
for code in codes:
if len(code) == 1:
# It's not a fused type
all_types.append((CY_TYPES[code], code))
else:
# It's a fused type
fused_type, dec = generate_fused_type(code)
fused_types.add(dec)
all_types.append((fused_type, code))
return all_types, fused_types
def _get_vars(self):
invars = ["x{}".format(n) for n in range(len(self.intypes))]
outvars = ["y{}".format(n) for n in range(len(self.outtypes))]
return invars, outvars
def _get_conditional(self, types, codes, adverb):
"""Generate an if/elif/else clause that selects a specialization of
fused types.
"""
clauses = []
seen = set()
for (typ, typcode), code in zip(types, codes):
if len(typcode) == 1:
continue
if typ not in seen:
clauses.append(f"{typ} is {underscore(CY_TYPES[code])}")
seen.add(typ)
if clauses and adverb != "else":
line = "{} {}:".format(adverb, " and ".join(clauses))
elif clauses and adverb == "else":
line = "else:"
else:
line = None
return line
def _get_incallvars(self, intypes, c):
"""Generate pure input variables to a specialization,
i.e., variables that aren't used to return a value.
"""
incallvars = []
for n, intype in enumerate(intypes):
var = self.invars[n]
if c and intype == "double complex":
var = npy_cdouble_from_double_complex(var)
incallvars.append(var)
return incallvars
def _get_outcallvars(self, outtypes, c):
"""Generate output variables to a specialization,
i.e., pointers that are used to return values.
"""
outcallvars, tmpvars, casts = [], [], []
# If there are more out variables than out types, we want the
# tail of the out variables
start = len(self.outvars) - len(outtypes)
outvars = self.outvars[start:]
for n, (var, outtype) in enumerate(zip(outvars, outtypes)):
if c and outtype == "double complex":
tmp = "tmp{}".format(n)
tmpvars.append(tmp)
outcallvars.append("&{}".format(tmp))
tmpcast = double_complex_from_npy_cdouble(tmp)
casts.append("{}[0] = {}".format(var, tmpcast))
else:
outcallvars.append("{}".format(var))
return outcallvars, tmpvars, casts
def _get_nan_decs(self):
"""Set all variables to nan for specializations of fused types for
which don't have signatures.
"""
# Set non fused-type variables to nan
tab = " "*4
fused_types, lines = [], [tab + "else:"]
seen = set()
for outvar, outtype, code in zip(self.outvars, self.outtypes,
self.outcodes):
if len(code) == 1:
line = "{}[0] = {}".format(outvar, NAN_VALUE[code])
lines.append(2*tab + line)
else:
fused_type = outtype
name, _ = fused_type
if name not in seen:
fused_types.append(fused_type)
seen.add(name)
if not fused_types:
return lines
# Set fused-type variables to nan
all_codes = tuple([codes for _unused, codes in fused_types])
codelens = [len(x) for x in all_codes]
last = numpy.prod(codelens) - 1
for m, codes in enumerate(itertools.product(*all_codes)):
fused_codes, decs = [], []
for n, fused_type in enumerate(fused_types):
code = codes[n]
fused_codes.append(underscore(CY_TYPES[code]))
for nn, outvar in enumerate(self.outvars):
if self.outtypes[nn] == fused_type:
line = "{}[0] = {}".format(outvar, NAN_VALUE[code])
decs.append(line)
if m == 0:
adverb = "if"
elif m == last:
adverb = "else"
else:
adverb = "elif"
cond = self._get_conditional(fused_types, codes, adverb)
lines.append(2*tab + cond)
lines.extend([3*tab + x for x in decs])
return lines
def _get_tmp_decs(self, all_tmpvars):
"""Generate the declarations of any necessary temporary
variables.
"""
tab = " "*4
tmpvars = list(all_tmpvars)
tmpvars.sort()
tmpdecs = [tab + "cdef npy_cdouble {}".format(tmpvar)
for tmpvar in tmpvars]
return tmpdecs
def _get_python_wrap(self):
"""Generate a Python wrapper for functions which pass their
arguments as pointers.
"""
tab = " "*4
body, callvars = [], []
for (intype, _), invar in zip(self.intypes, self.invars):
callvars.append("{} {}".format(intype, invar))
line = "def _{}_pywrap({}):".format(self.name, ", ".join(callvars))
body.append(line)
for (outtype, _), outvar in zip(self.outtypes, self.outvars):
line = "cdef {} {}".format(outtype, outvar)
body.append(tab + line)
addr_outvars = [f"&{x}" for x in self.outvars]
line = "{}({}, {})".format(self.name, ", ".join(self.invars),
", ".join(addr_outvars))
body.append(tab + line)
line = "return {}".format(", ".join(self.outvars))
body.append(tab + line)
body = "\n".join(body)
return body
def _get_common(self, signum, sig):
"""Generate code common to all the _generate_* methods."""
tab = " "*4
func_name, incodes, outcodes, retcode, header = sig
# Convert ints to longs; cf. iter_variants()
incodes = incodes.replace('i', 'l')
outcodes = outcodes.replace('i', 'l')
retcode = retcode.replace('i', 'l')
if header.endswith("h"):
c = True
else:
c = False
if header.endswith("++"):
cpp = True
else:
cpp = False
intypes = [CY_TYPES[x] for x in incodes]
outtypes = [CY_TYPES[x] for x in outcodes]
retcode = re.sub(r'\*.*', '', retcode)
if not retcode:
retcode = 'v'
rettype = CY_TYPES[retcode]
if cpp:
# Functions from _ufuncs_cxx are exported as a void*
# pointers; cast them to the correct types
func_name = "scipy.special._ufuncs_cxx._export_{}".format(func_name)
func_name = "(<{}(*)({}) nogil>{})"\
.format(rettype, ", ".join(intypes + outtypes), func_name)
else:
func_name = self.cython_func_name(func_name, specialized=True)
if signum == 0:
adverb = "if"
else:
adverb = "elif"
cond = self._get_conditional(self.intypes, incodes, adverb)
if cond:
lines = [tab + cond]
sp = 2*tab
else:
lines = []
sp = tab
return func_name, incodes, outcodes, retcode, \
intypes, outtypes, rettype, c, lines, sp
def _generate_from_return_and_no_outargs(self):
tab = " "*4
specs, body = [], []
for signum, sig in enumerate(self.signatures):
func_name, incodes, outcodes, retcode, intypes, outtypes, \
rettype, c, lines, sp = self._get_common(signum, sig)
body.extend(lines)
# Generate the call to the specialized function
callvars = self._get_incallvars(intypes, c)
call = "{}({})".format(func_name, ", ".join(callvars))
if c and rettype == "double complex":
call = double_complex_from_npy_cdouble(call)
line = sp + "return {}".format(call)
body.append(line)
sig = "{}->{}".format(incodes, retcode)
specs.append(sig)
if len(specs) > 1:
# Return nan for signatures without a specialization
body.append(tab + "else:")
outtype, outcodes = self.outtypes[0]
last = len(outcodes) - 1
if len(outcodes) == 1:
line = "return {}".format(NAN_VALUE[outcodes])
body.append(2*tab + line)
else:
for n, code in enumerate(outcodes):
if n == 0:
adverb = "if"
elif n == last:
adverb = "else"
else:
adverb = "elif"
cond = self._get_conditional(self.outtypes, code, adverb)
body.append(2*tab + cond)
line = "return {}".format(NAN_VALUE[code])
body.append(3*tab + line)
# Generate the head of the function
callvars, head = [], []
for n, (intype, _) in enumerate(self.intypes):
callvars.append("{} {}".format(intype, self.invars[n]))
(outtype, _) = self.outtypes[0]
dec = "cpdef {} {}({}) nogil".format(outtype, self.name, ", ".join(callvars))
head.append(dec + ":")
head.append(tab + '"""{}"""'.format(self.doc))
src = "\n".join(head + body)
return dec, src, specs
def _generate_from_outargs_and_no_return(self):
tab = " "*4
all_tmpvars = set()
specs, body = [], []
for signum, sig in enumerate(self.signatures):
func_name, incodes, outcodes, retcode, intypes, outtypes, \
rettype, c, lines, sp = self._get_common(signum, sig)
body.extend(lines)
# Generate the call to the specialized function
callvars = self._get_incallvars(intypes, c)
outcallvars, tmpvars, casts = self._get_outcallvars(outtypes, c)
callvars.extend(outcallvars)
all_tmpvars.update(tmpvars)
call = "{}({})".format(func_name, ", ".join(callvars))
body.append(sp + call)
body.extend([sp + x for x in casts])
if len(outcodes) == 1:
sig = "{}->{}".format(incodes, outcodes)
specs.append(sig)
else:
sig = "{}*{}->v".format(incodes, outcodes)
specs.append(sig)
if len(specs) > 1:
lines = self._get_nan_decs()
body.extend(lines)
if len(self.outvars) == 1:
line = "return {}[0]".format(self.outvars[0])
body.append(tab + line)
# Generate the head of the function
callvars, head = [], []
for invar, (intype, _) in zip(self.invars, self.intypes):
callvars.append("{} {}".format(intype, invar))
if len(self.outvars) > 1:
for outvar, (outtype, _) in zip(self.outvars, self.outtypes):
callvars.append("{} *{}".format(outtype, outvar))
if len(self.outvars) == 1:
outtype, _ = self.outtypes[0]
dec = "cpdef {} {}({}) nogil".format(outtype, self.name, ", ".join(callvars))
else:
dec = "cdef void {}({}) nogil".format(self.name, ", ".join(callvars))
head.append(dec + ":")
head.append(tab + '"""{}"""'.format(self.doc))
if len(self.outvars) == 1:
outvar = self.outvars[0]
outtype, _ = self.outtypes[0]
line = "cdef {} {}".format(outtype, outvar)
head.append(tab + line)
head.extend(self._get_tmp_decs(all_tmpvars))
src = "\n".join(head + body)
return dec, src, specs
def _generate_from_outargs_and_return(self):
tab = " "*4
all_tmpvars = set()
specs, body = [], []
for signum, sig in enumerate(self.signatures):
func_name, incodes, outcodes, retcode, intypes, outtypes, \
rettype, c, lines, sp = self._get_common(signum, sig)
body.extend(lines)
# Generate the call to the specialized function
callvars = self._get_incallvars(intypes, c)
outcallvars, tmpvars, casts = self._get_outcallvars(outtypes, c)
callvars.extend(outcallvars)
all_tmpvars.update(tmpvars)
call = "{}({})".format(func_name, ", ".join(callvars))
if c and rettype == "double complex":
call = double_complex_from_npy_cdouble(call)
call = "{}[0] = {}".format(self.outvars[0], call)
body.append(sp + call)
body.extend([sp + x for x in casts])
sig = "{}*{}->v".format(incodes, outcodes + retcode)
specs.append(sig)
if len(specs) > 1:
lines = self._get_nan_decs()
body.extend(lines)
# Generate the head of the function
callvars, head = [], []
for invar, (intype, _) in zip(self.invars, self.intypes):
callvars.append("{} {}".format(intype, invar))
for outvar, (outtype, _) in zip(self.outvars, self.outtypes):
callvars.append("{} *{}".format(outtype, outvar))
dec = "cdef void {}({}) nogil".format(self.name, ", ".join(callvars))
head.append(dec + ":")
head.append(tab + '"""{}"""'.format(self.doc))
head.extend(self._get_tmp_decs(all_tmpvars))
src = "\n".join(head + body)
return dec, src, specs
def generate(self):
_, _, outcodes, retcode, _ = self.signatures[0]
retcode = re.sub(r'\*.*', '', retcode)
if not retcode:
retcode = 'v'
if len(outcodes) == 0 and retcode != 'v':
dec, src, specs = self._generate_from_return_and_no_outargs()
elif len(outcodes) > 0 and retcode == 'v':
dec, src, specs = self._generate_from_outargs_and_no_return()
elif len(outcodes) > 0 and retcode != 'v':
dec, src, specs = self._generate_from_outargs_and_return()
else:
raise ValueError("Invalid signature")
if len(self.outvars) > 1:
wrap = self._get_python_wrap()
else:
wrap = None
return dec, src, specs, self.fused_types, wrap
def get_declaration(ufunc, c_name, c_proto, cy_proto, header,
proto_h_filename):
"""
Construct a Cython declaration of a function coming either from a
pxd or a header file. Do sufficient tricks to enable compile-time
type checking against the signature expected by the ufunc.
"""
defs = []
defs_h = []
var_name = c_name.replace('[', '_').replace(']', '_').replace(' ', '_')
if header.endswith('.pxd'):
defs.append("from .%s cimport %s as %s" % (
header[:-4], ufunc.cython_func_name(c_name, prefix=""),
ufunc.cython_func_name(c_name)))
# check function signature at compile time
proto_name = '_proto_%s_t' % var_name
defs.append("ctypedef %s" % (cy_proto.replace('(*)', proto_name)))
defs.append("cdef %s *%s_var = &%s" % (
proto_name, proto_name, ufunc.cython_func_name(c_name, specialized=True)))
else:
# redeclare the function, so that the assumed
# signature is checked at compile time
new_name = "%s \"%s\"" % (ufunc.cython_func_name(c_name), c_name)
defs.append(f'cdef extern from r"{proto_h_filename}":')
defs.append(" cdef %s" % (cy_proto.replace('(*)', new_name)))
defs_h.append(f'#include "{header}"')
defs_h.append("%s;" % (c_proto.replace('(*)', c_name)))
return defs, defs_h, var_name
def generate_ufuncs(fn_prefix, cxx_fn_prefix, ufuncs):
filename = fn_prefix + ".pyx"
proto_h_filename = fn_prefix + '_defs.h'
cxx_proto_h_filename = cxx_fn_prefix + '_defs.h'
cxx_pyx_filename = cxx_fn_prefix + ".pyx"
cxx_pxd_filename = cxx_fn_prefix + ".pxd"
toplevel = ""
# for _ufuncs*
defs = []
defs_h = []
all_loops = {}
# for _ufuncs_cxx*
cxx_defs = []
cxx_pxd_defs = [
"from . cimport sf_error",
"cdef void _set_action(sf_error.sf_error_t, sf_error.sf_action_t) nogil"
]
cxx_defs_h = []
ufuncs.sort(key=lambda u: u.name)
for ufunc in ufuncs:
# generate function declaration and type checking snippets
cfuncs = ufunc.get_prototypes()
for c_name, c_proto, cy_proto, header in cfuncs:
if header.endswith('++'):
header = header[:-2]
# for the CXX module
item_defs, item_defs_h, var_name = get_declaration(ufunc, c_name, c_proto, cy_proto,
header, cxx_proto_h_filename)
cxx_defs.extend(item_defs)
cxx_defs_h.extend(item_defs_h)
cxx_defs.append("cdef void *_export_%s = <void*>%s" % (
var_name, ufunc.cython_func_name(c_name, specialized=True, override=False)))
cxx_pxd_defs.append("cdef void *_export_%s" % (var_name,))
# let cython grab the function pointer from the c++ shared library
ufunc.function_name_overrides[c_name] = "scipy.special._ufuncs_cxx._export_" + var_name
else:
# usual case
item_defs, item_defs_h, _ = get_declaration(ufunc, c_name, c_proto, cy_proto, header,
proto_h_filename)
defs.extend(item_defs)
defs_h.extend(item_defs_h)
# ufunc creation code snippet
t = ufunc.generate(all_loops)
toplevel += t + "\n"
# Produce output
toplevel = "\n".join(sorted(all_loops.values()) + defs + [toplevel])
# Generate an `__all__` for the module
all_ufuncs = (
[
"'{}'".format(ufunc.name)
for ufunc in ufuncs if not ufunc.name.startswith('_')
]
+ ["'geterr'", "'seterr'", "'errstate'", "'jn'"]
)
module_all = '__all__ = [{}]'.format(', '.join(all_ufuncs))
with open(filename, 'w') as f:
f.write(UFUNCS_EXTRA_CODE_COMMON)
f.write(UFUNCS_EXTRA_CODE)
f.write(module_all)
f.write("\n")
f.write(toplevel)
f.write(UFUNCS_EXTRA_CODE_BOTTOM)
defs_h = unique(defs_h)
with open(proto_h_filename, 'w') as f:
f.write("#ifndef UFUNCS_PROTO_H\n#define UFUNCS_PROTO_H 1\n")
f.write("\n".join(defs_h))
f.write("\n#endif\n")
cxx_defs_h = unique(cxx_defs_h)
with open(cxx_proto_h_filename, 'w') as f:
f.write("#ifndef UFUNCS_PROTO_H\n#define UFUNCS_PROTO_H 1\n")
f.write("\n".join(cxx_defs_h))
f.write("\n#endif\n")
with open(cxx_pyx_filename, 'w') as f:
f.write(UFUNCS_EXTRA_CODE_COMMON)
f.write("\n")
f.write("\n".join(cxx_defs))
f.write("\n# distutils: language = c++\n")
with open(cxx_pxd_filename, 'w') as f:
f.write("\n".join(cxx_pxd_defs))
def generate_fused_funcs(modname, ufunc_fn_prefix, fused_funcs):
pxdfile = modname + ".pxd"
pyxfile = modname + ".pyx"
proto_h_filename = ufunc_fn_prefix + '_defs.h'
sources = []
declarations = []
# Code for benchmarks
bench_aux = []
fused_types = set()
# Parameters for the tests
doc = []
defs = []
for func in fused_funcs:
if func.name.startswith("_"):
# Don't try to deal with functions that have extra layers
# of wrappers.
continue
# Get the function declaration for the .pxd and the source
# code for the .pyx
dec, src, specs, func_fused_types, wrap = func.generate()
declarations.append(dec)
sources.append(src)
if wrap:
sources.append(wrap)
fused_types.update(func_fused_types)
# Declare the specializations
cfuncs = func.get_prototypes(nptypes_for_h=True)
for c_name, c_proto, cy_proto, header in cfuncs:
if header.endswith('++'):
# We grab the c++ functions from the c++ module
continue
item_defs, _, _ = get_declaration(func, c_name, c_proto,
cy_proto, header,
proto_h_filename)
defs.extend(item_defs)
# Add a line to the documentation
doc.append(generate_doc(func.name, specs))
# Generate code for benchmarks
if func.name in CYTHON_SPECIAL_BENCHFUNCS:
for codes in CYTHON_SPECIAL_BENCHFUNCS[func.name]:
pybench, cybench = generate_bench(func.name, codes)
bench_aux.extend([pybench, cybench])
fused_types = list(fused_types)
fused_types.sort()
with open(pxdfile, 'w') as f:
f.write(CYTHON_SPECIAL_PXD)
f.write("\n")
f.write("\n\n".join(fused_types))
f.write("\n\n")
f.write("\n".join(declarations))
with open(pyxfile, 'w') as f:
header = CYTHON_SPECIAL_PYX
header = header.replace("FUNCLIST", "\n".join(doc))
f.write(header)
f.write("\n")
f.write("\n".join(defs))
f.write("\n\n")
f.write("\n\n".join(sources))
f.write("\n\n")
f.write("\n\n".join(bench_aux))
def generate_ufuncs_type_stubs(module_name: str, ufuncs: List[Ufunc]):
stubs, module_all = [], []
for ufunc in ufuncs:
stubs.append(f'{ufunc.name}: np.ufunc')
if not ufunc.name.startswith('_'):
module_all.append(f"'{ufunc.name}'")
# jn is an alias for jv.
module_all.append("'jn'")
stubs.append('jn: np.ufunc')
module_all.sort()
stubs.sort()
contents = STUBS.format(
ALL=',\n '.join(module_all),
STUBS='\n'.join(stubs),
)
stubs_file = f'{module_name}.pyi'
with open(stubs_file, 'w') as f:
f.write(contents)
def unique(lst):
"""
Return a list without repeated entries (first occurrence is kept),
preserving order.
"""
seen = set()
new_lst = []
for item in lst:
if item in seen:
continue
seen.add(item)
new_lst.append(item)
return new_lst
def newer(source, target):
"""
Return true if 'source' exists and is more recently modified than
'target', or if 'source' exists and 'target' doesn't. Return false if
both exist and 'target' is the same age or younger than 'source'.
"""
if not os.path.exists(source):
raise ValueError("file '%s' does not exist" % os.path.abspath(source))
if not os.path.exists(target):
return 1
mtime1 = os.stat(source)[ST_MTIME]
mtime2 = os.stat(target)[ST_MTIME]
return mtime1 > mtime2
def all_newer(src_files, dst_files):
return all(os.path.exists(dst) and newer(dst, src)
for dst in dst_files for src in src_files)
def main(outdir):
pwd = os.path.dirname(__file__)
src_files = (os.path.abspath(__file__),
os.path.abspath(os.path.join(pwd, 'functions.json')),
os.path.abspath(os.path.join(pwd, '_add_newdocs.py')))
dst_files = ('_ufuncs.pyx',
'_ufuncs_defs.h',
'_ufuncs_cxx.pyx',
'_ufuncs_cxx.pxd',
'_ufuncs_cxx_defs.h',
'_ufuncs.pyi',
'cython_special.pyx',
'cython_special.pxd')
dst_files = (os.path.join(outdir, f) for f in dst_files)
os.chdir(BASE_DIR)
if all_newer(src_files, dst_files):
print("scipy/special/_generate_pyx.py: all files up-to-date")
return
ufuncs, fused_funcs = [], []
with open('functions.json') as data:
functions = json.load(data)
for f, sig in functions.items():
ufuncs.append(Ufunc(f, sig))
fused_funcs.append(FusedFunc(f, sig))
generate_ufuncs(os.path.join(outdir, "_ufuncs"),
os.path.join(outdir, "_ufuncs_cxx"),
ufuncs)
generate_ufuncs_type_stubs(os.path.join(outdir, "_ufuncs"),
ufuncs)
generate_fused_funcs(os.path.join(outdir, "cython_special"),
os.path.join(outdir, "_ufuncs"),
fused_funcs)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--outdir", type=str,
help="Path to the output directory")
args = parser.parse_args()
if not args.outdir:
#raise ValueError(f"Missing `--outdir` argument to _generate_pyx.py")
# We're dealing with a distutils build here, write in-place:
outdir_abs = os.path.abspath(os.path.dirname(__file__))
else:
outdir_abs = os.path.join(os.getcwd(), args.outdir)
main(outdir_abs)
| bsd-3-clause | bd71c018bdb541b7d6858a013fd0d59b | 33.451613 | 108 | 0.543644 | 3.561454 | false | false | false | false |
scipy/scipy | scipy/linalg/_cython_signature_generator.py | 10 | 10595 | """
A script that uses f2py to generate the signature files used to make
the Cython BLAS and LAPACK wrappers from the fortran source code for
LAPACK and the reference BLAS.
To generate the BLAS wrapper signatures call:
python _cython_signature_generator.py blas <blas_directory> <out_file>
To generate the LAPACK wrapper signatures call:
python _cython_signature_generator.py lapack <lapack_src_directory> <out_file>
This script expects to be run on the source directory for
the oldest supported version of LAPACK (currently 3.4.0).
"""
import glob
import os
from numpy.f2py import crackfortran
sig_types = {'integer': 'int',
'complex': 'c',
'double precision': 'd',
'real': 's',
'complex*16': 'z',
'double complex': 'z',
'character': 'char',
'logical': 'bint'}
def get_type(info, arg):
argtype = sig_types[info['vars'][arg]['typespec']]
if argtype == 'c' and info['vars'][arg].get('kindselector') is not None:
argtype = 'z'
return argtype
def make_signature(filename):
info = crackfortran.crackfortran(filename)[0]
name = info['name']
if info['block'] == 'subroutine':
return_type = 'void'
else:
return_type = get_type(info, name)
arglist = [' *'.join([get_type(info, arg), arg]) for arg in info['args']]
args = ', '.join(arglist)
# Eliminate strange variable naming that replaces rank with rank_bn.
args = args.replace('rank_bn', 'rank')
return '{0} {1}({2})\n'.format(return_type, name, args)
def get_sig_name(line):
return line.split('(')[0].split(' ')[-1]
def sigs_from_dir(directory, outfile, manual_wrappers=None, exclusions=None):
if directory[-1] in ['/', '\\']:
directory = directory[:-1]
files = sorted(glob.glob(directory + '/*.f*'))
if exclusions is None:
exclusions = []
if manual_wrappers is not None:
exclusions += [get_sig_name(l) for l in manual_wrappers.split('\n')]
signatures = []
for filename in files:
name = os.path.splitext(os.path.basename(filename))[0]
if name in exclusions:
continue
signatures.append(make_signature(filename))
if manual_wrappers is not None:
signatures += [l + '\n' for l in manual_wrappers.split('\n')]
signatures.sort(key=get_sig_name)
comment = ["# This file was generated by _cython_signature_generator.py.\n",
"# Do not edit this file directly.\n\n"]
with open(outfile, 'w') as f:
f.writelines(comment)
f.writelines(signatures)
# slamch and dlamch are not in the lapack src directory, but,since they
# already have Python wrappers, we'll wrap them as well.
# The other manual signatures are used because the signature generating
# functions don't work when function pointer arguments are used.
lapack_manual_wrappers = '''void cgees(char *jobvs, char *sort, cselect1 *select, int *n, c *a, int *lda, int *sdim, c *w, c *vs, int *ldvs, c *work, int *lwork, s *rwork, bint *bwork, int *info)
void cgeesx(char *jobvs, char *sort, cselect1 *select, char *sense, int *n, c *a, int *lda, int *sdim, c *w, c *vs, int *ldvs, s *rconde, s *rcondv, c *work, int *lwork, s *rwork, bint *bwork, int *info)
void cgges(char *jobvsl, char *jobvsr, char *sort, cselect2 *selctg, int *n, c *a, int *lda, c *b, int *ldb, int *sdim, c *alpha, c *beta, c *vsl, int *ldvsl, c *vsr, int *ldvsr, c *work, int *lwork, s *rwork, bint *bwork, int *info)
void cggesx(char *jobvsl, char *jobvsr, char *sort, cselect2 *selctg, char *sense, int *n, c *a, int *lda, c *b, int *ldb, int *sdim, c *alpha, c *beta, c *vsl, int *ldvsl, c *vsr, int *ldvsr, s *rconde, s *rcondv, c *work, int *lwork, s *rwork, int *iwork, int *liwork, bint *bwork, int *info)
void dgees(char *jobvs, char *sort, dselect2 *select, int *n, d *a, int *lda, int *sdim, d *wr, d *wi, d *vs, int *ldvs, d *work, int *lwork, bint *bwork, int *info)
void dgeesx(char *jobvs, char *sort, dselect2 *select, char *sense, int *n, d *a, int *lda, int *sdim, d *wr, d *wi, d *vs, int *ldvs, d *rconde, d *rcondv, d *work, int *lwork, int *iwork, int *liwork, bint *bwork, int *info)
void dgges(char *jobvsl, char *jobvsr, char *sort, dselect3 *selctg, int *n, d *a, int *lda, d *b, int *ldb, int *sdim, d *alphar, d *alphai, d *beta, d *vsl, int *ldvsl, d *vsr, int *ldvsr, d *work, int *lwork, bint *bwork, int *info)
void dggesx(char *jobvsl, char *jobvsr, char *sort, dselect3 *selctg, char *sense, int *n, d *a, int *lda, d *b, int *ldb, int *sdim, d *alphar, d *alphai, d *beta, d *vsl, int *ldvsl, d *vsr, int *ldvsr, d *rconde, d *rcondv, d *work, int *lwork, int *iwork, int *liwork, bint *bwork, int *info)
d dlamch(char *cmach)
void ilaver(int *vers_major, int *vers_minor, int *vers_patch)
void sgees(char *jobvs, char *sort, sselect2 *select, int *n, s *a, int *lda, int *sdim, s *wr, s *wi, s *vs, int *ldvs, s *work, int *lwork, bint *bwork, int *info)
void sgeesx(char *jobvs, char *sort, sselect2 *select, char *sense, int *n, s *a, int *lda, int *sdim, s *wr, s *wi, s *vs, int *ldvs, s *rconde, s *rcondv, s *work, int *lwork, int *iwork, int *liwork, bint *bwork, int *info)
void sgges(char *jobvsl, char *jobvsr, char *sort, sselect3 *selctg, int *n, s *a, int *lda, s *b, int *ldb, int *sdim, s *alphar, s *alphai, s *beta, s *vsl, int *ldvsl, s *vsr, int *ldvsr, s *work, int *lwork, bint *bwork, int *info)
void sggesx(char *jobvsl, char *jobvsr, char *sort, sselect3 *selctg, char *sense, int *n, s *a, int *lda, s *b, int *ldb, int *sdim, s *alphar, s *alphai, s *beta, s *vsl, int *ldvsl, s *vsr, int *ldvsr, s *rconde, s *rcondv, s *work, int *lwork, int *iwork, int *liwork, bint *bwork, int *info)
s slamch(char *cmach)
void zgees(char *jobvs, char *sort, zselect1 *select, int *n, z *a, int *lda, int *sdim, z *w, z *vs, int *ldvs, z *work, int *lwork, d *rwork, bint *bwork, int *info)
void zgeesx(char *jobvs, char *sort, zselect1 *select, char *sense, int *n, z *a, int *lda, int *sdim, z *w, z *vs, int *ldvs, d *rconde, d *rcondv, z *work, int *lwork, d *rwork, bint *bwork, int *info)
void zgges(char *jobvsl, char *jobvsr, char *sort, zselect2 *selctg, int *n, z *a, int *lda, z *b, int *ldb, int *sdim, z *alpha, z *beta, z *vsl, int *ldvsl, z *vsr, int *ldvsr, z *work, int *lwork, d *rwork, bint *bwork, int *info)
void zggesx(char *jobvsl, char *jobvsr, char *sort, zselect2 *selctg, char *sense, int *n, z *a, int *lda, z *b, int *ldb, int *sdim, z *alpha, z *beta, z *vsl, int *ldvsl, z *vsr, int *ldvsr, d *rconde, d *rcondv, z *work, int *lwork, d *rwork, int *iwork, int *liwork, bint *bwork, int *info)'''
# Exclude scabs and sisnan since they aren't currently included
# in the scipy-specific ABI wrappers.
blas_exclusions = ['scabs1', 'xerbla']
# Exclude routines with string arguments to avoid
# compatibility woes with different standards for string arguments.
lapack_exclusions = [
# Not included because people should be using the
# C standard library function instead.
# sisnan is also not currently included in the
# ABI wrappers.
'sisnan', 'dlaisnan', 'slaisnan',
# Exclude slaneg because it isn't currently included
# in the ABI wrappers
'slaneg',
# Excluded because they require Fortran string arguments.
'ilaenv', 'iparmq', 'lsamen', 'xerbla',
# Exclude XBLAS routines since they aren't included
# by default.
'cgesvxx', 'dgesvxx', 'sgesvxx', 'zgesvxx',
'cgerfsx', 'dgerfsx', 'sgerfsx', 'zgerfsx',
'cla_gerfsx_extended', 'dla_gerfsx_extended',
'sla_gerfsx_extended', 'zla_gerfsx_extended',
'cla_geamv', 'dla_geamv', 'sla_geamv', 'zla_geamv',
'dla_gercond', 'sla_gercond',
'cla_gercond_c', 'zla_gercond_c',
'cla_gercond_x', 'zla_gercond_x',
'cla_gerpvgrw', 'dla_gerpvgrw',
'sla_gerpvgrw', 'zla_gerpvgrw',
'csysvxx', 'dsysvxx', 'ssysvxx', 'zsysvxx',
'csyrfsx', 'dsyrfsx', 'ssyrfsx', 'zsyrfsx',
'cla_syrfsx_extended', 'dla_syrfsx_extended',
'sla_syrfsx_extended', 'zla_syrfsx_extended',
'cla_syamv', 'dla_syamv', 'sla_syamv', 'zla_syamv',
'dla_syrcond', 'sla_syrcond',
'cla_syrcond_c', 'zla_syrcond_c',
'cla_syrcond_x', 'zla_syrcond_x',
'cla_syrpvgrw', 'dla_syrpvgrw',
'sla_syrpvgrw', 'zla_syrpvgrw',
'cposvxx', 'dposvxx', 'sposvxx', 'zposvxx',
'cporfsx', 'dporfsx', 'sporfsx', 'zporfsx',
'cla_porfsx_extended', 'dla_porfsx_extended',
'sla_porfsx_extended', 'zla_porfsx_extended',
'dla_porcond', 'sla_porcond',
'cla_porcond_c', 'zla_porcond_c',
'cla_porcond_x', 'zla_porcond_x',
'cla_porpvgrw', 'dla_porpvgrw',
'sla_porpvgrw', 'zla_porpvgrw',
'cgbsvxx', 'dgbsvxx', 'sgbsvxx', 'zgbsvxx',
'cgbrfsx', 'dgbrfsx', 'sgbrfsx', 'zgbrfsx',
'cla_gbrfsx_extended', 'dla_gbrfsx_extended',
'sla_gbrfsx_extended', 'zla_gbrfsx_extended',
'cla_gbamv', 'dla_gbamv', 'sla_gbamv', 'zla_gbamv',
'dla_gbrcond', 'sla_gbrcond',
'cla_gbrcond_c', 'zla_gbrcond_c',
'cla_gbrcond_x', 'zla_gbrcond_x',
'cla_gbrpvgrw', 'dla_gbrpvgrw',
'sla_gbrpvgrw', 'zla_gbrpvgrw',
'chesvxx', 'zhesvxx',
'cherfsx', 'zherfsx',
'cla_herfsx_extended', 'zla_herfsx_extended',
'cla_heamv', 'zla_heamv',
'cla_hercond_c', 'zla_hercond_c',
'cla_hercond_x', 'zla_hercond_x',
'cla_herpvgrw', 'zla_herpvgrw',
'sla_lin_berr', 'cla_lin_berr',
'dla_lin_berr', 'zla_lin_berr',
'clarscl2', 'dlarscl2', 'slarscl2', 'zlarscl2',
'clascl2', 'dlascl2', 'slascl2', 'zlascl2',
'cla_wwaddw', 'dla_wwaddw', 'sla_wwaddw', 'zla_wwaddw',
]
if __name__ == '__main__':
from sys import argv
libname, src_dir, outfile = argv[1:]
if libname.lower() == 'blas':
sigs_from_dir(src_dir, outfile, exclusions=blas_exclusions)
elif libname.lower() == 'lapack':
sigs_from_dir(src_dir, outfile, manual_wrappers=lapack_manual_wrappers,
exclusions=lapack_exclusions)
| bsd-3-clause | b1d65a17ed835c7849f95fd34c441e7e | 56.27027 | 297 | 0.59849 | 2.719456 | false | false | false | false |
scipy/scipy | scipy/special/_spfun_stats.py | 8 | 3806 | # Last Change: Sat Mar 21 02:00 PM 2009 J
# Copyright (c) 2001, 2002 Enthought, Inc.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# a. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# b. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# c. Neither the name of the Enthought nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
"""Some more special functions which may be useful for multivariate statistical
analysis."""
import numpy as np
from scipy.special import gammaln as loggam
__all__ = ['multigammaln']
def multigammaln(a, d):
r"""Returns the log of multivariate gamma, also sometimes called the
generalized gamma.
Parameters
----------
a : ndarray
The multivariate gamma is computed for each item of `a`.
d : int
The dimension of the space of integration.
Returns
-------
res : ndarray
The values of the log multivariate gamma at the given points `a`.
Notes
-----
The formal definition of the multivariate gamma of dimension d for a real
`a` is
.. math::
\Gamma_d(a) = \int_{A>0} e^{-tr(A)} |A|^{a - (d+1)/2} dA
with the condition :math:`a > (d-1)/2`, and :math:`A > 0` being the set of
all the positive definite matrices of dimension `d`. Note that `a` is a
scalar: the integrand only is multivariate, the argument is not (the
function is defined over a subset of the real set).
This can be proven to be equal to the much friendlier equation
.. math::
\Gamma_d(a) = \pi^{d(d-1)/4} \prod_{i=1}^{d} \Gamma(a - (i-1)/2).
References
----------
R. J. Muirhead, Aspects of multivariate statistical theory (Wiley Series in
probability and mathematical statistics).
Examples
--------
>>> import numpy as np
>>> from scipy.special import multigammaln, gammaln
>>> a = 23.5
>>> d = 10
>>> multigammaln(a, d)
454.1488605074416
Verify that the result agrees with the logarithm of the equation
shown above:
>>> d*(d-1)/4*np.log(np.pi) + gammaln(a - 0.5*np.arange(0, d)).sum()
454.1488605074416
"""
a = np.asarray(a)
if not np.isscalar(d) or (np.floor(d) != d):
raise ValueError("d should be a positive integer (dimension)")
if np.any(a <= 0.5 * (d - 1)):
raise ValueError("condition a (%f) > 0.5 * (d-1) (%f) not met"
% (a, 0.5 * (d-1)))
res = (d * (d-1) * 0.25) * np.log(np.pi)
res += np.sum(loggam([(a - (j - 1.)/2) for j in range(1, d+1)]), axis=0)
return res
| bsd-3-clause | 58b0d8bb01637337f179ea128b511163 | 34.570093 | 79 | 0.665791 | 3.738703 | false | false | false | false |
scipy/scipy | scipy/signal/_max_len_seq.py | 8 | 5062 | # Author: Eric Larson
# 2014
"""Tools for MLS generation"""
import numpy as np
from ._max_len_seq_inner import _max_len_seq_inner
__all__ = ['max_len_seq']
# These are definitions of linear shift register taps for use in max_len_seq()
_mls_taps = {2: [1], 3: [2], 4: [3], 5: [3], 6: [5], 7: [6], 8: [7, 6, 1],
9: [5], 10: [7], 11: [9], 12: [11, 10, 4], 13: [12, 11, 8],
14: [13, 12, 2], 15: [14], 16: [15, 13, 4], 17: [14],
18: [11], 19: [18, 17, 14], 20: [17], 21: [19], 22: [21],
23: [18], 24: [23, 22, 17], 25: [22], 26: [25, 24, 20],
27: [26, 25, 22], 28: [25], 29: [27], 30: [29, 28, 7],
31: [28], 32: [31, 30, 10]}
def max_len_seq(nbits, state=None, length=None, taps=None):
"""
Maximum length sequence (MLS) generator.
Parameters
----------
nbits : int
Number of bits to use. Length of the resulting sequence will
be ``(2**nbits) - 1``. Note that generating long sequences
(e.g., greater than ``nbits == 16``) can take a long time.
state : array_like, optional
If array, must be of length ``nbits``, and will be cast to binary
(bool) representation. If None, a seed of ones will be used,
producing a repeatable representation. If ``state`` is all
zeros, an error is raised as this is invalid. Default: None.
length : int, optional
Number of samples to compute. If None, the entire length
``(2**nbits) - 1`` is computed.
taps : array_like, optional
Polynomial taps to use (e.g., ``[7, 6, 1]`` for an 8-bit sequence).
If None, taps will be automatically selected (for up to
``nbits == 32``).
Returns
-------
seq : array
Resulting MLS sequence of 0's and 1's.
state : array
The final state of the shift register.
Notes
-----
The algorithm for MLS generation is generically described in:
https://en.wikipedia.org/wiki/Maximum_length_sequence
The default values for taps are specifically taken from the first
option listed for each value of ``nbits`` in:
https://web.archive.org/web/20181001062252/http://www.newwaveinstruments.com/resources/articles/m_sequence_linear_feedback_shift_register_lfsr.htm
.. versionadded:: 0.15.0
Examples
--------
MLS uses binary convention:
>>> from scipy.signal import max_len_seq
>>> max_len_seq(4)[0]
array([1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0], dtype=int8)
MLS has a white spectrum (except for DC):
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from numpy.fft import fft, ifft, fftshift, fftfreq
>>> seq = max_len_seq(6)[0]*2-1 # +1 and -1
>>> spec = fft(seq)
>>> N = len(seq)
>>> plt.plot(fftshift(fftfreq(N)), fftshift(np.abs(spec)), '.-')
>>> plt.margins(0.1, 0.1)
>>> plt.grid(True)
>>> plt.show()
Circular autocorrelation of MLS is an impulse:
>>> acorrcirc = ifft(spec * np.conj(spec)).real
>>> plt.figure()
>>> plt.plot(np.arange(-N/2+1, N/2+1), fftshift(acorrcirc), '.-')
>>> plt.margins(0.1, 0.1)
>>> plt.grid(True)
>>> plt.show()
Linear autocorrelation of MLS is approximately an impulse:
>>> acorr = np.correlate(seq, seq, 'full')
>>> plt.figure()
>>> plt.plot(np.arange(-N+1, N), acorr, '.-')
>>> plt.margins(0.1, 0.1)
>>> plt.grid(True)
>>> plt.show()
"""
taps_dtype = np.int32 if np.intp().itemsize == 4 else np.int64
if taps is None:
if nbits not in _mls_taps:
known_taps = np.array(list(_mls_taps.keys()))
raise ValueError('nbits must be between %s and %s if taps is None'
% (known_taps.min(), known_taps.max()))
taps = np.array(_mls_taps[nbits], taps_dtype)
else:
taps = np.unique(np.array(taps, taps_dtype))[::-1]
if np.any(taps < 0) or np.any(taps > nbits) or taps.size < 1:
raise ValueError('taps must be non-empty with values between '
'zero and nbits (inclusive)')
taps = np.array(taps) # needed for Cython and Pythran
n_max = (2**nbits) - 1
if length is None:
length = n_max
else:
length = int(length)
if length < 0:
raise ValueError('length must be greater than or equal to 0')
# We use int8 instead of bool here because NumPy arrays of bools
# don't seem to work nicely with Cython
if state is None:
state = np.ones(nbits, dtype=np.int8, order='c')
else:
# makes a copy if need be, ensuring it's 0's and 1's
state = np.array(state, dtype=bool, order='c').astype(np.int8)
if state.ndim != 1 or state.size != nbits:
raise ValueError('state must be a 1-D array of size nbits')
if np.all(state == 0):
raise ValueError('state must not be all zeros')
seq = np.empty(length, dtype=np.int8, order='c')
state = _max_len_seq_inner(taps, state, nbits, length, seq)
return seq, state
| bsd-3-clause | dd5883d12b82548d55f2b271ebd4de2f | 35.417266 | 154 | 0.571711 | 3.236573 | false | false | false | false |
scipy/scipy | scipy/special/_spherical_bessel.py | 1 | 10217 | from ._ufuncs import (_spherical_jn, _spherical_yn, _spherical_in,
_spherical_kn, _spherical_jn_d, _spherical_yn_d,
_spherical_in_d, _spherical_kn_d)
def spherical_jn(n, z, derivative=False):
r"""Spherical Bessel function of the first kind or its derivative.
Defined as [1]_,
.. math:: j_n(z) = \sqrt{\frac{\pi}{2z}} J_{n + 1/2}(z),
where :math:`J_n` is the Bessel function of the first kind.
Parameters
----------
n : int, array_like
Order of the Bessel function (n >= 0).
z : complex or float, array_like
Argument of the Bessel function.
derivative : bool, optional
If True, the value of the derivative (rather than the function
itself) is returned.
Returns
-------
jn : ndarray
Notes
-----
For real arguments greater than the order, the function is computed
using the ascending recurrence [2]_. For small real or complex
arguments, the definitional relation to the cylindrical Bessel function
of the first kind is used.
The derivative is computed using the relations [3]_,
.. math::
j_n'(z) = j_{n-1}(z) - \frac{n + 1}{z} j_n(z).
j_0'(z) = -j_1(z)
.. versionadded:: 0.18.0
References
----------
.. [1] https://dlmf.nist.gov/10.47.E3
.. [2] https://dlmf.nist.gov/10.51.E1
.. [3] https://dlmf.nist.gov/10.51.E2
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
Examples
--------
The spherical Bessel functions of the first kind :math:`j_n` accept
both real and complex second argument. They can return a complex type:
>>> from scipy.special import spherical_jn
>>> spherical_jn(0, 3+5j)
(-9.878987731663194-8.021894345786002j)
>>> type(spherical_jn(0, 3+5j))
<class 'numpy.complex128'>
We can verify the relation for the derivative from the Notes
for :math:`n=3` in the interval :math:`[1, 2]`:
>>> import numpy as np
>>> x = np.arange(1.0, 2.0, 0.01)
>>> np.allclose(spherical_jn(3, x, True),
... spherical_jn(2, x) - 4/x * spherical_jn(3, x))
True
The first few :math:`j_n` with real argument:
>>> import matplotlib.pyplot as plt
>>> x = np.arange(0.0, 10.0, 0.01)
>>> fig, ax = plt.subplots()
>>> ax.set_ylim(-0.5, 1.5)
>>> ax.set_title(r'Spherical Bessel functions $j_n$')
>>> for n in np.arange(0, 4):
... ax.plot(x, spherical_jn(n, x), label=rf'$j_{n}$')
>>> plt.legend(loc='best')
>>> plt.show()
"""
if derivative:
return _spherical_jn_d(n, z)
else:
return _spherical_jn(n, z)
def spherical_yn(n, z, derivative=False):
r"""Spherical Bessel function of the second kind or its derivative.
Defined as [1]_,
.. math:: y_n(z) = \sqrt{\frac{\pi}{2z}} Y_{n + 1/2}(z),
where :math:`Y_n` is the Bessel function of the second kind.
Parameters
----------
n : int, array_like
Order of the Bessel function (n >= 0).
z : complex or float, array_like
Argument of the Bessel function.
derivative : bool, optional
If True, the value of the derivative (rather than the function
itself) is returned.
Returns
-------
yn : ndarray
Notes
-----
For real arguments, the function is computed using the ascending
recurrence [2]_. For complex arguments, the definitional relation to
the cylindrical Bessel function of the second kind is used.
The derivative is computed using the relations [3]_,
.. math::
y_n' = y_{n-1} - \frac{n + 1}{z} y_n.
y_0' = -y_1
.. versionadded:: 0.18.0
References
----------
.. [1] https://dlmf.nist.gov/10.47.E4
.. [2] https://dlmf.nist.gov/10.51.E1
.. [3] https://dlmf.nist.gov/10.51.E2
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
Examples
--------
The spherical Bessel functions of the second kind :math:`y_n` accept
both real and complex second argument. They can return a complex type:
>>> from scipy.special import spherical_yn
>>> spherical_yn(0, 3+5j)
(8.022343088587197-9.880052589376795j)
>>> type(spherical_yn(0, 3+5j))
<class 'numpy.complex128'>
We can verify the relation for the derivative from the Notes
for :math:`n=3` in the interval :math:`[1, 2]`:
>>> import numpy as np
>>> x = np.arange(1.0, 2.0, 0.01)
>>> np.allclose(spherical_yn(3, x, True),
... spherical_yn(2, x) - 4/x * spherical_yn(3, x))
True
The first few :math:`y_n` with real argument:
>>> import matplotlib.pyplot as plt
>>> x = np.arange(0.0, 10.0, 0.01)
>>> fig, ax = plt.subplots()
>>> ax.set_ylim(-2.0, 1.0)
>>> ax.set_title(r'Spherical Bessel functions $y_n$')
>>> for n in np.arange(0, 4):
... ax.plot(x, spherical_yn(n, x), label=rf'$y_{n}$')
>>> plt.legend(loc='best')
>>> plt.show()
"""
if derivative:
return _spherical_yn_d(n, z)
else:
return _spherical_yn(n, z)
def spherical_in(n, z, derivative=False):
r"""Modified spherical Bessel function of the first kind or its derivative.
Defined as [1]_,
.. math:: i_n(z) = \sqrt{\frac{\pi}{2z}} I_{n + 1/2}(z),
where :math:`I_n` is the modified Bessel function of the first kind.
Parameters
----------
n : int, array_like
Order of the Bessel function (n >= 0).
z : complex or float, array_like
Argument of the Bessel function.
derivative : bool, optional
If True, the value of the derivative (rather than the function
itself) is returned.
Returns
-------
in : ndarray
Notes
-----
The function is computed using its definitional relation to the
modified cylindrical Bessel function of the first kind.
The derivative is computed using the relations [2]_,
.. math::
i_n' = i_{n-1} - \frac{n + 1}{z} i_n.
i_1' = i_0
.. versionadded:: 0.18.0
References
----------
.. [1] https://dlmf.nist.gov/10.47.E7
.. [2] https://dlmf.nist.gov/10.51.E5
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
Examples
--------
The modified spherical Bessel functions of the first kind :math:`i_n`
accept both real and complex second argument.
They can return a complex type:
>>> from scipy.special import spherical_in
>>> spherical_in(0, 3+5j)
(-1.1689867793369182-1.2697305267234222j)
>>> type(spherical_in(0, 3+5j))
<class 'numpy.complex128'>
We can verify the relation for the derivative from the Notes
for :math:`n=3` in the interval :math:`[1, 2]`:
>>> import numpy as np
>>> x = np.arange(1.0, 2.0, 0.01)
>>> np.allclose(spherical_in(3, x, True),
... spherical_in(2, x) - 4/x * spherical_in(3, x))
True
The first few :math:`i_n` with real argument:
>>> import matplotlib.pyplot as plt
>>> x = np.arange(0.0, 6.0, 0.01)
>>> fig, ax = plt.subplots()
>>> ax.set_ylim(-0.5, 5.0)
>>> ax.set_title(r'Modified spherical Bessel functions $i_n$')
>>> for n in np.arange(0, 4):
... ax.plot(x, spherical_in(n, x), label=rf'$i_{n}$')
>>> plt.legend(loc='best')
>>> plt.show()
"""
if derivative:
return _spherical_in_d(n, z)
else:
return _spherical_in(n, z)
def spherical_kn(n, z, derivative=False):
r"""Modified spherical Bessel function of the second kind or its derivative.
Defined as [1]_,
.. math:: k_n(z) = \sqrt{\frac{\pi}{2z}} K_{n + 1/2}(z),
where :math:`K_n` is the modified Bessel function of the second kind.
Parameters
----------
n : int, array_like
Order of the Bessel function (n >= 0).
z : complex or float, array_like
Argument of the Bessel function.
derivative : bool, optional
If True, the value of the derivative (rather than the function
itself) is returned.
Returns
-------
kn : ndarray
Notes
-----
The function is computed using its definitional relation to the
modified cylindrical Bessel function of the second kind.
The derivative is computed using the relations [2]_,
.. math::
k_n' = -k_{n-1} - \frac{n + 1}{z} k_n.
k_0' = -k_1
.. versionadded:: 0.18.0
References
----------
.. [1] https://dlmf.nist.gov/10.47.E9
.. [2] https://dlmf.nist.gov/10.51.E5
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
Examples
--------
The modified spherical Bessel functions of the second kind :math:`k_n`
accept both real and complex second argument.
They can return a complex type:
>>> from scipy.special import spherical_kn
>>> spherical_kn(0, 3+5j)
(0.012985785614001561+0.003354691603137546j)
>>> type(spherical_kn(0, 3+5j))
<class 'numpy.complex128'>
We can verify the relation for the derivative from the Notes
for :math:`n=3` in the interval :math:`[1, 2]`:
>>> import numpy as np
>>> x = np.arange(1.0, 2.0, 0.01)
>>> np.allclose(spherical_kn(3, x, True),
... - 4/x * spherical_kn(3, x) - spherical_kn(2, x))
True
The first few :math:`k_n` with real argument:
>>> import matplotlib.pyplot as plt
>>> x = np.arange(0.0, 4.0, 0.01)
>>> fig, ax = plt.subplots()
>>> ax.set_ylim(0.0, 5.0)
>>> ax.set_title(r'Modified spherical Bessel functions $k_n$')
>>> for n in np.arange(0, 4):
... ax.plot(x, spherical_kn(n, x), label=rf'$k_{n}$')
>>> plt.legend(loc='best')
>>> plt.show()
"""
if derivative:
return _spherical_kn_d(n, z)
else:
return _spherical_kn(n, z)
| bsd-3-clause | 0ea146f8a6664a19231be29bd0aba706 | 28.275072 | 80 | 0.585593 | 3.202821 | false | false | false | false |
scipy/scipy | scipy/ndimage/_interpolation.py | 8 | 35437 | # Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import itertools
import warnings
import numpy
from numpy.core.multiarray import normalize_axis_index
from scipy import special
from . import _ni_support
from . import _nd_image
from ._ni_docstrings import docfiller
__all__ = ['spline_filter1d', 'spline_filter', 'geometric_transform',
'map_coordinates', 'affine_transform', 'shift', 'zoom', 'rotate']
@docfiller
def spline_filter1d(input, order=3, axis=-1, output=numpy.float64,
mode='mirror'):
"""
Calculate a 1-D spline filter along the given axis.
The lines of the array along the given axis are filtered by a
spline filter. The order of the spline must be >= 2 and <= 5.
Parameters
----------
%(input)s
order : int, optional
The order of the spline, default is 3.
axis : int, optional
The axis along which the spline filter is applied. Default is the last
axis.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array. Default is ``numpy.float64``.
%(mode_interp_mirror)s
Returns
-------
spline_filter1d : ndarray
The filtered input.
Notes
-----
All of the interpolation functions in `ndimage` do spline interpolation of
the input image. If using B-splines of `order > 1`, the input image
values have to be converted to B-spline coefficients first, which is
done by applying this 1-D filter sequentially along all
axes of the input. All functions that require B-spline coefficients
will automatically filter their inputs, a behavior controllable with
the `prefilter` keyword argument. For functions that accept a `mode`
parameter, the result will only be correct if it matches the `mode`
used when filtering.
For complex-valued `input`, this function processes the real and imaginary
components independently.
.. versionadded:: 1.6.0
Complex-valued support added.
See Also
--------
spline_filter : Multidimensional spline filter.
Examples
--------
We can filter an image using 1-D spline along the given axis:
>>> from scipy.ndimage import spline_filter1d
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> orig_img = np.eye(20) # create an image
>>> orig_img[10, :] = 1.0
>>> sp_filter_axis_0 = spline_filter1d(orig_img, axis=0)
>>> sp_filter_axis_1 = spline_filter1d(orig_img, axis=1)
>>> f, ax = plt.subplots(1, 3, sharex=True)
>>> for ind, data in enumerate([[orig_img, "original image"],
... [sp_filter_axis_0, "spline filter (axis=0)"],
... [sp_filter_axis_1, "spline filter (axis=1)"]]):
... ax[ind].imshow(data[0], cmap='gray_r')
... ax[ind].set_title(data[1])
>>> plt.tight_layout()
>>> plt.show()
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
complex_output = numpy.iscomplexobj(input)
output = _ni_support._get_output(output, input,
complex_output=complex_output)
if complex_output:
spline_filter1d(input.real, order, axis, output.real, mode)
spline_filter1d(input.imag, order, axis, output.imag, mode)
return output
if order in [0, 1]:
output[...] = numpy.array(input)
else:
mode = _ni_support._extend_mode_to_code(mode)
axis = normalize_axis_index(axis, input.ndim)
_nd_image.spline_filter1d(input, order, axis, output, mode)
return output
def spline_filter(input, order=3, output=numpy.float64, mode='mirror'):
"""
Multidimensional spline filter.
For more details, see `spline_filter1d`.
See Also
--------
spline_filter1d : Calculate a 1-D spline filter along the given axis.
Notes
-----
The multidimensional filter is implemented as a sequence of
1-D spline filters. The intermediate arrays are stored
in the same data type as the output. Therefore, for output types
with a limited precision, the results may be imprecise because
intermediate results may be stored with insufficient precision.
For complex-valued `input`, this function processes the real and imaginary
components independently.
.. versionadded:: 1.6.0
Complex-valued support added.
Examples
--------
We can filter an image using multidimentional splines:
>>> from scipy.ndimage import spline_filter
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> orig_img = np.eye(20) # create an image
>>> orig_img[10, :] = 1.0
>>> sp_filter = spline_filter(orig_img, order=3)
>>> f, ax = plt.subplots(1, 2, sharex=True)
>>> for ind, data in enumerate([[orig_img, "original image"],
... [sp_filter, "spline filter"]]):
... ax[ind].imshow(data[0], cmap='gray_r')
... ax[ind].set_title(data[1])
>>> plt.tight_layout()
>>> plt.show()
"""
if order < 2 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
complex_output = numpy.iscomplexobj(input)
output = _ni_support._get_output(output, input,
complex_output=complex_output)
if complex_output:
spline_filter(input.real, order, output.real, mode)
spline_filter(input.imag, order, output.imag, mode)
return output
if order not in [0, 1] and input.ndim > 0:
for axis in range(input.ndim):
spline_filter1d(input, order, axis, output=output, mode=mode)
input = output
else:
output[...] = input[...]
return output
def _prepad_for_spline_filter(input, mode, cval):
if mode in ['nearest', 'grid-constant']:
npad = 12
if mode == 'grid-constant':
padded = numpy.pad(input, npad, mode='constant',
constant_values=cval)
elif mode == 'nearest':
padded = numpy.pad(input, npad, mode='edge')
else:
# other modes have exact boundary conditions implemented so
# no prepadding is needed
npad = 0
padded = input
return padded, npad
@docfiller
def geometric_transform(input, mapping, output_shape=None,
output=None, order=3,
mode='constant', cval=0.0, prefilter=True,
extra_arguments=(), extra_keywords={}):
"""
Apply an arbitrary geometric transform.
The given mapping function is used to find, for each point in the
output, the corresponding coordinates in the input. The value of the
input at those coordinates is determined by spline interpolation of
the requested order.
Parameters
----------
%(input)s
mapping : {callable, scipy.LowLevelCallable}
A callable object that accepts a tuple of length equal to the output
array rank, and returns the corresponding input coordinates as a tuple
of length equal to the input array rank.
output_shape : tuple of ints, optional
Shape tuple.
%(output)s
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
%(mode_interp_constant)s
%(cval)s
%(prefilter)s
extra_arguments : tuple, optional
Extra arguments passed to `mapping`.
extra_keywords : dict, optional
Extra keywords passed to `mapping`.
Returns
-------
output : ndarray
The filtered input.
See Also
--------
map_coordinates, affine_transform, spline_filter1d
Notes
-----
This function also accepts low-level callback functions with one
the following signatures and wrapped in `scipy.LowLevelCallable`:
.. code:: c
int mapping(npy_intp *output_coordinates, double *input_coordinates,
int output_rank, int input_rank, void *user_data)
int mapping(intptr_t *output_coordinates, double *input_coordinates,
int output_rank, int input_rank, void *user_data)
The calling function iterates over the elements of the output array,
calling the callback function at each element. The coordinates of the
current output element are passed through ``output_coordinates``. The
callback function must return the coordinates at which the input must
be interpolated in ``input_coordinates``. The rank of the input and
output arrays are given by ``input_rank`` and ``output_rank``
respectively. ``user_data`` is the data pointer provided
to `scipy.LowLevelCallable` as-is.
The callback function must return an integer error status that is zero
if something went wrong and one otherwise. If an error occurs, you should
normally set the Python error status with an informative message
before returning, otherwise a default error message is set by the
calling function.
In addition, some other low-level function pointer specifications
are accepted, but these are for backward compatibility only and should
not be used in new code.
For complex-valued `input`, this function transforms the real and imaginary
components independently.
.. versionadded:: 1.6.0
Complex-valued support added.
Examples
--------
>>> import numpy as np
>>> from scipy.ndimage import geometric_transform
>>> a = np.arange(12.).reshape((4, 3))
>>> def shift_func(output_coords):
... return (output_coords[0] - 0.5, output_coords[1] - 0.5)
...
>>> geometric_transform(a, shift_func)
array([[ 0. , 0. , 0. ],
[ 0. , 1.362, 2.738],
[ 0. , 4.812, 6.187],
[ 0. , 8.263, 9.637]])
>>> b = [1, 2, 3, 4, 5]
>>> def shift_func(output_coords):
... return (output_coords[0] - 3,)
...
>>> geometric_transform(b, shift_func, mode='constant')
array([0, 0, 0, 1, 2])
>>> geometric_transform(b, shift_func, mode='nearest')
array([1, 1, 1, 1, 2])
>>> geometric_transform(b, shift_func, mode='reflect')
array([3, 2, 1, 1, 2])
>>> geometric_transform(b, shift_func, mode='wrap')
array([2, 3, 4, 1, 2])
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if output_shape is None:
output_shape = input.shape
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
complex_output = numpy.iscomplexobj(input)
output = _ni_support._get_output(output, input, shape=output_shape,
complex_output=complex_output)
if complex_output:
kwargs = dict(order=order, mode=mode, prefilter=prefilter,
output_shape=output_shape,
extra_arguments=extra_arguments,
extra_keywords=extra_keywords)
geometric_transform(input.real, mapping, output=output.real,
cval=numpy.real(cval), **kwargs)
geometric_transform(input.imag, mapping, output=output.imag,
cval=numpy.imag(cval), **kwargs)
return output
if prefilter and order > 1:
padded, npad = _prepad_for_spline_filter(input, mode, cval)
filtered = spline_filter(padded, order, output=numpy.float64,
mode=mode)
else:
npad = 0
filtered = input
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.geometric_transform(filtered, mapping, None, None, None, output,
order, mode, cval, npad, extra_arguments,
extra_keywords)
return output
@docfiller
def map_coordinates(input, coordinates, output=None, order=3,
mode='constant', cval=0.0, prefilter=True):
"""
Map the input array to new coordinates by interpolation.
The array of coordinates is used to find, for each point in the output,
the corresponding coordinates in the input. The value of the input at
those coordinates is determined by spline interpolation of the
requested order.
The shape of the output is derived from that of the coordinate
array by dropping the first axis. The values of the array along
the first axis are the coordinates in the input array at which the
output value is found.
Parameters
----------
%(input)s
coordinates : array_like
The coordinates at which `input` is evaluated.
%(output)s
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
%(mode_interp_constant)s
%(cval)s
%(prefilter)s
Returns
-------
map_coordinates : ndarray
The result of transforming the input. The shape of the output is
derived from that of `coordinates` by dropping the first axis.
See Also
--------
spline_filter, geometric_transform, scipy.interpolate
Notes
-----
For complex-valued `input`, this function maps the real and imaginary
components independently.
.. versionadded:: 1.6.0
Complex-valued support added.
Examples
--------
>>> from scipy import ndimage
>>> import numpy as np
>>> a = np.arange(12.).reshape((4, 3))
>>> a
array([[ 0., 1., 2.],
[ 3., 4., 5.],
[ 6., 7., 8.],
[ 9., 10., 11.]])
>>> ndimage.map_coordinates(a, [[0.5, 2], [0.5, 1]], order=1)
array([ 2., 7.])
Above, the interpolated value of a[0.5, 0.5] gives output[0], while
a[2, 1] is output[1].
>>> inds = np.array([[0.5, 2], [0.5, 4]])
>>> ndimage.map_coordinates(a, inds, order=1, cval=-33.3)
array([ 2. , -33.3])
>>> ndimage.map_coordinates(a, inds, order=1, mode='nearest')
array([ 2., 8.])
>>> ndimage.map_coordinates(a, inds, order=1, cval=0, output=bool)
array([ True, False], dtype=bool)
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
coordinates = numpy.asarray(coordinates)
if numpy.iscomplexobj(coordinates):
raise TypeError('Complex type not supported')
output_shape = coordinates.shape[1:]
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
if coordinates.shape[0] != input.ndim:
raise RuntimeError('invalid shape for coordinate array')
complex_output = numpy.iscomplexobj(input)
output = _ni_support._get_output(output, input, shape=output_shape,
complex_output=complex_output)
if complex_output:
kwargs = dict(order=order, mode=mode, prefilter=prefilter)
map_coordinates(input.real, coordinates, output=output.real,
cval=numpy.real(cval), **kwargs)
map_coordinates(input.imag, coordinates, output=output.imag,
cval=numpy.imag(cval), **kwargs)
return output
if prefilter and order > 1:
padded, npad = _prepad_for_spline_filter(input, mode, cval)
filtered = spline_filter(padded, order, output=numpy.float64,
mode=mode)
else:
npad = 0
filtered = input
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.geometric_transform(filtered, None, coordinates, None, None,
output, order, mode, cval, npad, None, None)
return output
@docfiller
def affine_transform(input, matrix, offset=0.0, output_shape=None,
output=None, order=3,
mode='constant', cval=0.0, prefilter=True):
"""
Apply an affine transformation.
Given an output image pixel index vector ``o``, the pixel value
is determined from the input image at position
``np.dot(matrix, o) + offset``.
This does 'pull' (or 'backward') resampling, transforming the output space
to the input to locate data. Affine transformations are often described in
the 'push' (or 'forward') direction, transforming input to output. If you
have a matrix for the 'push' transformation, use its inverse
(:func:`numpy.linalg.inv`) in this function.
Parameters
----------
%(input)s
matrix : ndarray
The inverse coordinate transformation matrix, mapping output
coordinates to input coordinates. If ``ndim`` is the number of
dimensions of ``input``, the given matrix must have one of the
following shapes:
- ``(ndim, ndim)``: the linear transformation matrix for each
output coordinate.
- ``(ndim,)``: assume that the 2-D transformation matrix is
diagonal, with the diagonal specified by the given value. A more
efficient algorithm is then used that exploits the separability
of the problem.
- ``(ndim + 1, ndim + 1)``: assume that the transformation is
specified using homogeneous coordinates [1]_. In this case, any
value passed to ``offset`` is ignored.
- ``(ndim, ndim + 1)``: as above, but the bottom row of a
homogeneous transformation matrix is always ``[0, 0, ..., 1]``,
and may be omitted.
offset : float or sequence, optional
The offset into the array where the transform is applied. If a float,
`offset` is the same for each axis. If a sequence, `offset` should
contain one value for each axis.
output_shape : tuple of ints, optional
Shape tuple.
%(output)s
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
%(mode_interp_constant)s
%(cval)s
%(prefilter)s
Returns
-------
affine_transform : ndarray
The transformed input.
Notes
-----
The given matrix and offset are used to find for each point in the
output the corresponding coordinates in the input by an affine
transformation. The value of the input at those coordinates is
determined by spline interpolation of the requested order. Points
outside the boundaries of the input are filled according to the given
mode.
.. versionchanged:: 0.18.0
Previously, the exact interpretation of the affine transformation
depended on whether the matrix was supplied as a 1-D or a
2-D array. If a 1-D array was supplied
to the matrix parameter, the output pixel value at index ``o``
was determined from the input image at position
``matrix * (o + offset)``.
For complex-valued `input`, this function transforms the real and imaginary
components independently.
.. versionadded:: 1.6.0
Complex-valued support added.
References
----------
.. [1] https://en.wikipedia.org/wiki/Homogeneous_coordinates
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if output_shape is None:
if isinstance(output, numpy.ndarray):
output_shape = output.shape
else:
output_shape = input.shape
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
complex_output = numpy.iscomplexobj(input)
output = _ni_support._get_output(output, input, shape=output_shape,
complex_output=complex_output)
if complex_output:
kwargs = dict(offset=offset, output_shape=output_shape, order=order,
mode=mode, prefilter=prefilter)
affine_transform(input.real, matrix, output=output.real,
cval=numpy.real(cval), **kwargs)
affine_transform(input.imag, matrix, output=output.imag,
cval=numpy.imag(cval), **kwargs)
return output
if prefilter and order > 1:
padded, npad = _prepad_for_spline_filter(input, mode, cval)
filtered = spline_filter(padded, order, output=numpy.float64,
mode=mode)
else:
npad = 0
filtered = input
mode = _ni_support._extend_mode_to_code(mode)
matrix = numpy.asarray(matrix, dtype=numpy.float64)
if matrix.ndim not in [1, 2] or matrix.shape[0] < 1:
raise RuntimeError('no proper affine matrix provided')
if (matrix.ndim == 2 and matrix.shape[1] == input.ndim + 1 and
(matrix.shape[0] in [input.ndim, input.ndim + 1])):
if matrix.shape[0] == input.ndim + 1:
exptd = [0] * input.ndim + [1]
if not numpy.all(matrix[input.ndim] == exptd):
msg = ('Expected homogeneous transformation matrix with '
'shape %s for image shape %s, but bottom row was '
'not equal to %s' % (matrix.shape, input.shape, exptd))
raise ValueError(msg)
# assume input is homogeneous coordinate transformation matrix
offset = matrix[:input.ndim, input.ndim]
matrix = matrix[:input.ndim, :input.ndim]
if matrix.shape[0] != input.ndim:
raise RuntimeError('affine matrix has wrong number of rows')
if matrix.ndim == 2 and matrix.shape[1] != output.ndim:
raise RuntimeError('affine matrix has wrong number of columns')
if not matrix.flags.contiguous:
matrix = matrix.copy()
offset = _ni_support._normalize_sequence(offset, input.ndim)
offset = numpy.asarray(offset, dtype=numpy.float64)
if offset.ndim != 1 or offset.shape[0] < 1:
raise RuntimeError('no proper offset provided')
if not offset.flags.contiguous:
offset = offset.copy()
if matrix.ndim == 1:
warnings.warn(
"The behavior of affine_transform with a 1-D "
"array supplied for the matrix parameter has changed in "
"SciPy 0.18.0."
)
_nd_image.zoom_shift(filtered, matrix, offset/matrix, output, order,
mode, cval, npad, False)
else:
_nd_image.geometric_transform(filtered, None, None, matrix, offset,
output, order, mode, cval, npad, None,
None)
return output
@docfiller
def shift(input, shift, output=None, order=3, mode='constant', cval=0.0,
prefilter=True):
"""
Shift an array.
The array is shifted using spline interpolation of the requested order.
Points outside the boundaries of the input are filled according to the
given mode.
Parameters
----------
%(input)s
shift : float or sequence
The shift along the axes. If a float, `shift` is the same for each
axis. If a sequence, `shift` should contain one value for each axis.
%(output)s
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
%(mode_interp_constant)s
%(cval)s
%(prefilter)s
Returns
-------
shift : ndarray
The shifted input.
Notes
-----
For complex-valued `input`, this function shifts the real and imaginary
components independently.
.. versionadded:: 1.6.0
Complex-valued support added.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if input.ndim < 1:
raise RuntimeError('input and output rank must be > 0')
complex_output = numpy.iscomplexobj(input)
output = _ni_support._get_output(output, input,
complex_output=complex_output)
if complex_output:
# import under different name to avoid confusion with shift parameter
from scipy.ndimage._interpolation import shift as _shift
kwargs = dict(order=order, mode=mode, prefilter=prefilter)
_shift(input.real, shift, output=output.real, cval=numpy.real(cval),
**kwargs)
_shift(input.imag, shift, output=output.imag, cval=numpy.imag(cval),
**kwargs)
return output
if prefilter and order > 1:
padded, npad = _prepad_for_spline_filter(input, mode, cval)
filtered = spline_filter(padded, order, output=numpy.float64,
mode=mode)
else:
npad = 0
filtered = input
mode = _ni_support._extend_mode_to_code(mode)
shift = _ni_support._normalize_sequence(shift, input.ndim)
shift = [-ii for ii in shift]
shift = numpy.asarray(shift, dtype=numpy.float64)
if not shift.flags.contiguous:
shift = shift.copy()
_nd_image.zoom_shift(filtered, None, shift, output, order, mode, cval,
npad, False)
return output
@docfiller
def zoom(input, zoom, output=None, order=3, mode='constant', cval=0.0,
prefilter=True, *, grid_mode=False):
"""
Zoom an array.
The array is zoomed using spline interpolation of the requested order.
Parameters
----------
%(input)s
zoom : float or sequence
The zoom factor along the axes. If a float, `zoom` is the same for each
axis. If a sequence, `zoom` should contain one value for each axis.
%(output)s
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
%(mode_interp_constant)s
%(cval)s
%(prefilter)s
grid_mode : bool, optional
If False, the distance from the pixel centers is zoomed. Otherwise, the
distance including the full pixel extent is used. For example, a 1d
signal of length 5 is considered to have length 4 when `grid_mode` is
False, but length 5 when `grid_mode` is True. See the following
visual illustration:
.. code-block:: text
| pixel 1 | pixel 2 | pixel 3 | pixel 4 | pixel 5 |
|<-------------------------------------->|
vs.
|<----------------------------------------------->|
The starting point of the arrow in the diagram above corresponds to
coordinate location 0 in each mode.
Returns
-------
zoom : ndarray
The zoomed input.
Notes
-----
For complex-valued `input`, this function zooms the real and imaginary
components independently.
.. versionadded:: 1.6.0
Complex-valued support added.
Examples
--------
>>> from scipy import ndimage, datasets
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = datasets.ascent()
>>> result = ndimage.zoom(ascent, 3.0)
>>> ax1.imshow(ascent, vmin=0, vmax=255)
>>> ax2.imshow(result, vmin=0, vmax=255)
>>> plt.show()
>>> print(ascent.shape)
(512, 512)
>>> print(result.shape)
(1536, 1536)
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if input.ndim < 1:
raise RuntimeError('input and output rank must be > 0')
zoom = _ni_support._normalize_sequence(zoom, input.ndim)
output_shape = tuple(
[int(round(ii * jj)) for ii, jj in zip(input.shape, zoom)])
complex_output = numpy.iscomplexobj(input)
output = _ni_support._get_output(output, input, shape=output_shape,
complex_output=complex_output)
if complex_output:
# import under different name to avoid confusion with zoom parameter
from scipy.ndimage._interpolation import zoom as _zoom
kwargs = dict(order=order, mode=mode, prefilter=prefilter)
_zoom(input.real, zoom, output=output.real, cval=numpy.real(cval),
**kwargs)
_zoom(input.imag, zoom, output=output.imag, cval=numpy.imag(cval),
**kwargs)
return output
if prefilter and order > 1:
padded, npad = _prepad_for_spline_filter(input, mode, cval)
filtered = spline_filter(padded, order, output=numpy.float64,
mode=mode)
else:
npad = 0
filtered = input
if grid_mode:
# warn about modes that may have surprising behavior
suggest_mode = None
if mode == 'constant':
suggest_mode = 'grid-constant'
elif mode == 'wrap':
suggest_mode = 'grid-wrap'
if suggest_mode is not None:
warnings.warn(
("It is recommended to use mode = {} instead of {} when "
"grid_mode is True.").format(suggest_mode, mode)
)
mode = _ni_support._extend_mode_to_code(mode)
zoom_div = numpy.array(output_shape)
zoom_nominator = numpy.array(input.shape)
if not grid_mode:
zoom_div -= 1
zoom_nominator -= 1
# Zooming to infinite values is unpredictable, so just choose
# zoom factor 1 instead
zoom = numpy.divide(zoom_nominator, zoom_div,
out=numpy.ones_like(input.shape, dtype=numpy.float64),
where=zoom_div != 0)
zoom = numpy.ascontiguousarray(zoom)
_nd_image.zoom_shift(filtered, zoom, None, output, order, mode, cval, npad,
grid_mode)
return output
@docfiller
def rotate(input, angle, axes=(1, 0), reshape=True, output=None, order=3,
mode='constant', cval=0.0, prefilter=True):
"""
Rotate an array.
The array is rotated in the plane defined by the two axes given by the
`axes` parameter using spline interpolation of the requested order.
Parameters
----------
%(input)s
angle : float
The rotation angle in degrees.
axes : tuple of 2 ints, optional
The two axes that define the plane of rotation. Default is the first
two axes.
reshape : bool, optional
If `reshape` is true, the output shape is adapted so that the input
array is contained completely in the output. Default is True.
%(output)s
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
%(mode_interp_constant)s
%(cval)s
%(prefilter)s
Returns
-------
rotate : ndarray
The rotated input.
Notes
-----
For complex-valued `input`, this function rotates the real and imaginary
components independently.
.. versionadded:: 1.6.0
Complex-valued support added.
Examples
--------
>>> from scipy import ndimage, datasets
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure(figsize=(10, 3))
>>> ax1, ax2, ax3 = fig.subplots(1, 3)
>>> img = datasets.ascent()
>>> img_45 = ndimage.rotate(img, 45, reshape=False)
>>> full_img_45 = ndimage.rotate(img, 45, reshape=True)
>>> ax1.imshow(img, cmap='gray')
>>> ax1.set_axis_off()
>>> ax2.imshow(img_45, cmap='gray')
>>> ax2.set_axis_off()
>>> ax3.imshow(full_img_45, cmap='gray')
>>> ax3.set_axis_off()
>>> fig.set_layout_engine('tight')
>>> plt.show()
>>> print(img.shape)
(512, 512)
>>> print(img_45.shape)
(512, 512)
>>> print(full_img_45.shape)
(724, 724)
"""
input_arr = numpy.asarray(input)
ndim = input_arr.ndim
if ndim < 2:
raise ValueError('input array should be at least 2D')
axes = list(axes)
if len(axes) != 2:
raise ValueError('axes should contain exactly two values')
if not all([float(ax).is_integer() for ax in axes]):
raise ValueError('axes should contain only integer values')
if axes[0] < 0:
axes[0] += ndim
if axes[1] < 0:
axes[1] += ndim
if axes[0] < 0 or axes[1] < 0 or axes[0] >= ndim or axes[1] >= ndim:
raise ValueError('invalid rotation plane specified')
axes.sort()
c, s = special.cosdg(angle), special.sindg(angle)
rot_matrix = numpy.array([[c, s],
[-s, c]])
img_shape = numpy.asarray(input_arr.shape)
in_plane_shape = img_shape[axes]
if reshape:
# Compute transformed input bounds
iy, ix = in_plane_shape
out_bounds = rot_matrix @ [[0, 0, iy, iy],
[0, ix, 0, ix]]
# Compute the shape of the transformed input plane
out_plane_shape = (out_bounds.ptp(axis=1) + 0.5).astype(int)
else:
out_plane_shape = img_shape[axes]
out_center = rot_matrix @ ((out_plane_shape - 1) / 2)
in_center = (in_plane_shape - 1) / 2
offset = in_center - out_center
output_shape = img_shape
output_shape[axes] = out_plane_shape
output_shape = tuple(output_shape)
complex_output = numpy.iscomplexobj(input_arr)
output = _ni_support._get_output(output, input_arr, shape=output_shape,
complex_output=complex_output)
if ndim <= 2:
affine_transform(input_arr, rot_matrix, offset, output_shape, output,
order, mode, cval, prefilter)
else:
# If ndim > 2, the rotation is applied over all the planes
# parallel to axes
planes_coord = itertools.product(
*[[slice(None)] if ax in axes else range(img_shape[ax])
for ax in range(ndim)])
out_plane_shape = tuple(out_plane_shape)
for coordinates in planes_coord:
ia = input_arr[coordinates]
oa = output[coordinates]
affine_transform(ia, rot_matrix, offset, out_plane_shape,
oa, order, mode, cval, prefilter)
return output
| bsd-3-clause | df4194e07d6f4c28ee74c5d6aed2b61c | 35.913542 | 79 | 0.612354 | 4.061547 | false | false | false | false |
scipy/scipy | scipy/_lib/_finite_differences.py | 9 | 4172 | from numpy import arange, newaxis, hstack, prod, array
def _central_diff_weights(Np, ndiv=1):
"""
Return weights for an Np-point central derivative.
Assumes equally-spaced function points.
If weights are in the vector w, then
derivative is w[0] * f(x-ho*dx) + ... + w[-1] * f(x+h0*dx)
Parameters
----------
Np : int
Number of points for the central derivative.
ndiv : int, optional
Number of divisions. Default is 1.
Returns
-------
w : ndarray
Weights for an Np-point central derivative. Its size is `Np`.
Notes
-----
Can be inaccurate for a large number of points.
Examples
--------
We can calculate a derivative value of a function.
>>> def f(x):
... return 2 * x**2 + 3
>>> x = 3.0 # derivative point
>>> h = 0.1 # differential step
>>> Np = 3 # point number for central derivative
>>> weights = _central_diff_weights(Np) # weights for first derivative
>>> vals = [f(x + (i - Np/2) * h) for i in range(Np)]
>>> sum(w * v for (w, v) in zip(weights, vals))/h
11.79999999999998
This value is close to the analytical solution:
f'(x) = 4x, so f'(3) = 12
References
----------
.. [1] https://en.wikipedia.org/wiki/Finite_difference
"""
if Np < ndiv + 1:
raise ValueError(
"Number of points must be at least the derivative order + 1."
)
if Np % 2 == 0:
raise ValueError("The number of points must be odd.")
from scipy import linalg
ho = Np >> 1
x = arange(-ho, ho + 1.0)
x = x[:, newaxis]
X = x**0.0
for k in range(1, Np):
X = hstack([X, x**k])
w = prod(arange(1, ndiv + 1), axis=0) * linalg.inv(X)[ndiv]
return w
def _derivative(func, x0, dx=1.0, n=1, args=(), order=3):
"""
Find the nth derivative of a function at a point.
Given a function, use a central difference formula with spacing `dx` to
compute the nth derivative at `x0`.
Parameters
----------
func : function
Input function.
x0 : float
The point at which the nth derivative is found.
dx : float, optional
Spacing.
n : int, optional
Order of the derivative. Default is 1.
args : tuple, optional
Arguments
order : int, optional
Number of points to use, must be odd.
Notes
-----
Decreasing the step size too small can result in round-off error.
Examples
--------
>>> def f(x):
... return x**3 + x**2
>>> _derivative(f, 1.0, dx=1e-6)
4.9999999999217337
"""
if order < n + 1:
raise ValueError(
"'order' (the number of points used to compute the derivative), "
"must be at least the derivative order 'n' + 1."
)
if order % 2 == 0:
raise ValueError(
"'order' (the number of points used to compute the derivative) "
"must be odd."
)
# pre-computed for n=1 and 2 and low-order for speed.
if n == 1:
if order == 3:
weights = array([-1, 0, 1]) / 2.0
elif order == 5:
weights = array([1, -8, 0, 8, -1]) / 12.0
elif order == 7:
weights = array([-1, 9, -45, 0, 45, -9, 1]) / 60.0
elif order == 9:
weights = array([3, -32, 168, -672, 0, 672, -168, 32, -3]) / 840.0
else:
weights = _central_diff_weights(order, 1)
elif n == 2:
if order == 3:
weights = array([1, -2.0, 1])
elif order == 5:
weights = array([-1, 16, -30, 16, -1]) / 12.0
elif order == 7:
weights = array([2, -27, 270, -490, 270, -27, 2]) / 180.0
elif order == 9:
weights = (
array([-9, 128, -1008, 8064, -14350, 8064, -1008, 128, -9])
/ 5040.0
)
else:
weights = _central_diff_weights(order, 2)
else:
weights = _central_diff_weights(order, n)
val = 0.0
ho = order >> 1
for k in range(order):
val += weights[k] * func(x0 + (k - ho) * dx, *args)
return val / prod((dx,) * n, axis=0)
| bsd-3-clause | 6a6224780f18433f5600e11254eadbf4 | 27.772414 | 78 | 0.52325 | 3.476667 | false | false | false | false |
scipy/scipy | scipy/sparse/linalg/_eigen/lobpcg/tests/test_lobpcg.py | 8 | 16524 | """ Test functions for the sparse.linalg._eigen.lobpcg module
"""
import itertools
import platform
import sys
import numpy as np
from numpy.testing import (assert_almost_equal, assert_equal,
assert_allclose, assert_array_less)
import pytest
from numpy import ones, r_, diag
from scipy.linalg import eig, eigh, toeplitz, orth
from scipy.sparse import spdiags, diags, eye, csr_matrix
from scipy.sparse.linalg import eigs, LinearOperator
from scipy.sparse.linalg._eigen.lobpcg import lobpcg
_IS_32BIT = (sys.maxsize < 2**32)
def ElasticRod(n):
"""Build the matrices for the generalized eigenvalue problem of the
fixed-free elastic rod vibration model.
"""
L = 1.0
le = L/n
rho = 7.85e3
S = 1.e-4
E = 2.1e11
mass = rho*S*le/6.
k = E*S/le
A = k*(diag(r_[2.*ones(n-1), 1])-diag(ones(n-1), 1)-diag(ones(n-1), -1))
B = mass*(diag(r_[4.*ones(n-1), 2])+diag(ones(n-1), 1)+diag(ones(n-1), -1))
return A, B
def MikotaPair(n):
"""Build a pair of full diagonal matrices for the generalized eigenvalue
problem. The Mikota pair acts as a nice test since the eigenvalues are the
squares of the integers n, n=1,2,...
"""
x = np.arange(1, n+1)
B = diag(1./x)
y = np.arange(n-1, 0, -1)
z = np.arange(2*n-1, 0, -2)
A = diag(z)-diag(y, -1)-diag(y, 1)
return A, B
def compare_solutions(A, B, m):
"""Check eig vs. lobpcg consistency.
"""
n = A.shape[0]
rnd = np.random.RandomState(0)
V = rnd.random((n, m))
X = orth(V)
eigvals, _ = lobpcg(A, X, B=B, tol=1e-2, maxiter=50, largest=False)
eigvals.sort()
w, _ = eig(A, b=B)
w.sort()
assert_almost_equal(w[:int(m/2)], eigvals[:int(m/2)], decimal=2)
def test_Small():
A, B = ElasticRod(10)
with pytest.warns(UserWarning, match="The problem size"):
compare_solutions(A, B, 10)
A, B = MikotaPair(10)
with pytest.warns(UserWarning, match="The problem size"):
compare_solutions(A, B, 10)
def test_ElasticRod():
A, B = ElasticRod(20)
with pytest.warns(UserWarning, match="Exited at iteration"):
compare_solutions(A, B, 2)
def test_MikotaPair():
A, B = MikotaPair(20)
compare_solutions(A, B, 2)
@pytest.mark.filterwarnings("ignore:Exited at iteration 0")
def test_nonhermitian_warning(capsys):
"""Check the warning of a Ritz matrix being not Hermitian
by feeding a non-Hermitian input matrix.
Also check stdout since verbosityLevel=1 and lack of stderr.
"""
n = 10
X = np.arange(n * 2).reshape(n, 2).astype(np.float32)
A = np.arange(n * n).reshape(n, n).astype(np.float32)
with pytest.warns(UserWarning, match="Matrix gramA"):
_, _ = lobpcg(A, X, verbosityLevel=1, maxiter=0)
out, err = capsys.readouterr() # Capture output
assert out.startswith("Solving standard eigenvalue") # Test stdout
assert err == '' # Test empty stderr
# Make the matrix symmetric and the UserWarning dissappears.
A += A.T
_, _ = lobpcg(A, X, verbosityLevel=1, maxiter=0)
out, err = capsys.readouterr() # Capture output
assert out.startswith("Solving standard eigenvalue") # Test stdout
assert err == '' # Test empty stderr
def test_regression():
"""Check the eigenvalue of the identity matrix is one.
"""
# https://mail.python.org/pipermail/scipy-user/2010-October/026944.html
n = 10
X = np.ones((n, 1))
A = np.identity(n)
w, _ = lobpcg(A, X)
assert_allclose(w, [1])
@pytest.mark.filterwarnings("ignore:The problem size")
@pytest.mark.parametrize('n, m, m_excluded', [(100, 4, 3), (4, 2, 0)])
def test_diagonal(n, m, m_excluded):
"""Test ``m - m_excluded`` eigenvalues and eigenvectors of
diagonal matrices of the size ``n`` varying matrix formats:
dense array, spare matrix, and ``LinearOperator`` for both
matrixes in the generalized eigenvalue problem ``Av = cBv``
and for the preconditioner.
"""
rnd = np.random.RandomState(0)
# Define the generalized eigenvalue problem Av = cBv
# where (c, v) is a generalized eigenpair,
# and where we choose A to be the diagonal matrix whose entries are 1..n
# and where B is chosen to be the identity matrix.
vals = np.arange(1, n+1, dtype=float)
A_s = diags([vals], [0], (n, n))
A_a = A_s.toarray()
def A_f(x):
return A_s @ x
A_lo = LinearOperator(matvec=A_f,
matmat=A_f,
shape=(n, n), dtype=float)
B_a = eye(n)
B_s = csr_matrix(B_a)
def B_f(x):
return B_a @ x
B_lo = LinearOperator(matvec=B_f,
matmat=B_f,
shape=(n, n), dtype=float)
# Let the preconditioner M be the inverse of A.
M_s = diags([1./vals], [0], (n, n))
M_a = M_s.toarray()
def M_f(x):
return M_s @ x
M_lo = LinearOperator(matvec=M_f,
matmat=M_f,
shape=(n, n), dtype=float)
# Pick random initial vectors.
X = rnd.normal(size=(n, m))
# Require that the returned eigenvectors be in the orthogonal complement
# of the first few standard basis vectors.
if m_excluded > 0:
Y = np.eye(n, m_excluded)
else:
Y = None
for A in [A_a, A_s, A_lo]:
for B in [B_a, B_s, B_lo]:
for M in [M_a, M_s, M_lo]:
eigvals, vecs = lobpcg(A, X, B, M=M, Y=Y,
maxiter=40, largest=False)
assert_allclose(eigvals, np.arange(1+m_excluded,
1+m_excluded+m))
_check_eigen(A, eigvals, vecs, rtol=1e-3, atol=1e-3)
def _check_eigen(M, w, V, rtol=1e-8, atol=1e-14):
"""Check if the eigenvalue residual is small.
"""
mult_wV = np.multiply(w, V)
dot_MV = M.dot(V)
assert_allclose(mult_wV, dot_MV, rtol=rtol, atol=atol)
def _check_fiedler(n, p):
"""Check the Fiedler vector computation.
"""
# This is not necessarily the recommended way to find the Fiedler vector.
col = np.zeros(n)
col[1] = 1
A = toeplitz(col)
D = np.diag(A.sum(axis=1))
L = D - A
# Compute the full eigendecomposition using tricks, e.g.
# http://www.cs.yale.edu/homes/spielman/561/2009/lect02-09.pdf
tmp = np.pi * np.arange(n) / n
analytic_w = 2 * (1 - np.cos(tmp))
analytic_V = np.cos(np.outer(np.arange(n) + 1/2, tmp))
_check_eigen(L, analytic_w, analytic_V)
# Compute the full eigendecomposition using eigh.
eigh_w, eigh_V = eigh(L)
_check_eigen(L, eigh_w, eigh_V)
# Check that the first eigenvalue is near zero and that the rest agree.
assert_array_less(np.abs([eigh_w[0], analytic_w[0]]), 1e-14)
assert_allclose(eigh_w[1:], analytic_w[1:])
# Check small lobpcg eigenvalues.
X = analytic_V[:, :p]
lobpcg_w, lobpcg_V = lobpcg(L, X, largest=False)
assert_equal(lobpcg_w.shape, (p,))
assert_equal(lobpcg_V.shape, (n, p))
_check_eigen(L, lobpcg_w, lobpcg_V)
assert_array_less(np.abs(np.min(lobpcg_w)), 1e-14)
assert_allclose(np.sort(lobpcg_w)[1:], analytic_w[1:p])
# Check large lobpcg eigenvalues.
X = analytic_V[:, -p:]
lobpcg_w, lobpcg_V = lobpcg(L, X, largest=True)
assert_equal(lobpcg_w.shape, (p,))
assert_equal(lobpcg_V.shape, (n, p))
_check_eigen(L, lobpcg_w, lobpcg_V)
assert_allclose(np.sort(lobpcg_w), analytic_w[-p:])
# Look for the Fiedler vector using good but not exactly correct guesses.
fiedler_guess = np.concatenate((np.ones(n//2), -np.ones(n-n//2)))
X = np.vstack((np.ones(n), fiedler_guess)).T
lobpcg_w, _ = lobpcg(L, X, largest=False)
# Mathematically, the smaller eigenvalue should be zero
# and the larger should be the algebraic connectivity.
lobpcg_w = np.sort(lobpcg_w)
assert_allclose(lobpcg_w, analytic_w[:2], atol=1e-14)
def test_fiedler_small_8():
"""Check the dense workaround path for small matrices.
"""
# This triggers the dense path because 8 < 2*5.
with pytest.warns(UserWarning, match="The problem size"):
_check_fiedler(8, 2)
def test_fiedler_large_12():
"""Check the dense workaround path avoided for non-small matrices.
"""
# This does not trigger the dense path, because 2*5 <= 12.
_check_fiedler(12, 2)
def test_failure_to_run_iterations():
"""Check that the code exists gracefully without breaking. Issue #10974.
"""
rnd = np.random.RandomState(0)
X = rnd.standard_normal((100, 10))
A = X @ X.T
Q = rnd.standard_normal((X.shape[0], 4))
with pytest.warns(UserWarning, match="Exited at iteration"):
eigenvalues, _ = lobpcg(A, Q, maxiter=20)
assert(np.max(eigenvalues) > 0)
@pytest.mark.filterwarnings("ignore:The problem size")
def test_hermitian():
"""Check complex-value Hermitian cases.
"""
rnd = np.random.RandomState(0)
sizes = [3, 10, 50]
ks = [1, 3, 10, 50]
gens = [True, False]
for s, k, gen in itertools.product(sizes, ks, gens):
if k > s:
continue
H = rnd.random((s, s)) + 1.j * rnd.random((s, s))
H = 10 * np.eye(s) + H + H.T.conj()
X = rnd.random((s, k))
if not gen:
B = np.eye(s)
w, v = lobpcg(H, X, maxiter=5000)
w0, _ = eigh(H)
else:
B = rnd.random((s, s)) + 1.j * rnd.random((s, s))
B = 10 * np.eye(s) + B.dot(B.T.conj())
w, v = lobpcg(H, X, B, maxiter=5000, largest=False)
w0, _ = eigh(H, B)
for wx, vx in zip(w, v.T):
# Check eigenvector
assert_allclose(np.linalg.norm(H.dot(vx) - B.dot(vx) * wx)
/ np.linalg.norm(H.dot(vx)),
0, atol=5e-4, rtol=0)
# Compare eigenvalues
j = np.argmin(abs(w0 - wx))
assert_allclose(wx, w0[j], rtol=1e-4)
# The n=5 case tests the alternative small matrix code path that uses eigh().
@pytest.mark.filterwarnings("ignore:The problem size")
@pytest.mark.parametrize('n, atol', [(20, 1e-3), (5, 1e-8)])
def test_eigs_consistency(n, atol):
"""Check eigs vs. lobpcg consistency.
"""
vals = np.arange(1, n+1, dtype=np.float64)
A = spdiags(vals, 0, n, n)
rnd = np.random.RandomState(0)
X = rnd.random((n, 2))
lvals, lvecs = lobpcg(A, X, largest=True, maxiter=100)
vals, _ = eigs(A, k=2)
_check_eigen(A, lvals, lvecs, atol=atol, rtol=0)
assert_allclose(np.sort(vals), np.sort(lvals), atol=1e-14)
def test_verbosity(tmpdir):
"""Check that nonzero verbosity level code runs.
"""
rnd = np.random.RandomState(0)
X = rnd.standard_normal((10, 10))
A = X @ X.T
Q = rnd.standard_normal((X.shape[0], 1))
with pytest.warns(UserWarning, match="Exited at iteration"):
_, _ = lobpcg(A, Q, maxiter=3, verbosityLevel=9)
@pytest.mark.xfail(_IS_32BIT and sys.platform == 'win32',
reason="tolerance violation on windows")
@pytest.mark.xfail(platform.machine() == 'ppc64le',
reason="fails on ppc64le")
def test_tolerance_float32():
"""Check lobpcg for attainable tolerance in float32.
"""
rnd = np.random.RandomState(0)
n = 50
m = 3
vals = -np.arange(1, n + 1)
A = diags([vals], [0], (n, n))
A = A.astype(np.float32)
X = rnd.standard_normal((n, m))
X = X.astype(np.float32)
eigvals, _ = lobpcg(A, X, tol=1.25e-5, maxiter=50, verbosityLevel=0)
assert_allclose(eigvals, -np.arange(1, 1 + m), atol=2e-5, rtol=1e-5)
def test_random_initial_float32():
"""Check lobpcg in float32 for specific initial.
"""
rnd = np.random.RandomState(0)
n = 50
m = 4
vals = -np.arange(1, n + 1)
A = diags([vals], [0], (n, n))
A = A.astype(np.float32)
X = rnd.random((n, m))
X = X.astype(np.float32)
eigvals, _ = lobpcg(A, X, tol=1e-3, maxiter=50, verbosityLevel=1)
assert_allclose(eigvals, -np.arange(1, 1 + m), atol=1e-2)
def test_maxit():
"""Check lobpcg if maxit=10 runs 10 iterations
if maxit=None runs 20 iterations (the default)
by checking the size of the iteration history output, which should
be the number of iterations plus 2 (initial and final values).
"""
rnd = np.random.RandomState(0)
n = 50
m = 4
vals = -np.arange(1, n + 1)
A = diags([vals], [0], (n, n))
A = A.astype(np.float32)
X = rnd.standard_normal((n, m))
X = X.astype(np.float32)
with pytest.warns(UserWarning, match="Exited at iteration"):
_, _, l_h = lobpcg(A, X, tol=1e-8, maxiter=10, retLambdaHistory=True)
assert_allclose(np.shape(l_h)[0], 10+2)
with pytest.warns(UserWarning, match="Exited at iteration"):
_, _, l_h = lobpcg(A, X, tol=1e-8, retLambdaHistory=True)
assert_allclose(np.shape(l_h)[0], 20+2)
@pytest.mark.slow
def test_diagonal_data_types():
"""Check lobpcg for diagonal matrices for all matrix types.
"""
rnd = np.random.RandomState(0)
n = 40
m = 4
# Define the generalized eigenvalue problem Av = cBv
# where (c, v) is a generalized eigenpair,
# and where we choose A and B to be diagonal.
vals = np.arange(1, n + 1)
list_sparse_format = ['bsr', 'coo', 'csc', 'csr', 'dia', 'dok', 'lil']
sparse_formats = len(list_sparse_format)
for s_f_i, s_f in enumerate(list_sparse_format):
As64 = diags([vals * vals], [0], (n, n), format=s_f)
As32 = As64.astype(np.float32)
Af64 = As64.toarray()
Af32 = Af64.astype(np.float32)
listA = [Af64, As64, Af32, As32]
Bs64 = diags([vals], [0], (n, n), format=s_f)
Bf64 = Bs64.toarray()
listB = [Bf64, Bs64]
# Define the preconditioner function as LinearOperator.
Ms64 = diags([1./vals], [0], (n, n), format=s_f)
def Ms64precond(x):
return Ms64 @ x
Ms64precondLO = LinearOperator(matvec=Ms64precond,
matmat=Ms64precond,
shape=(n, n),
dtype=float)
Mf64 = Ms64.toarray()
def Mf64precond(x):
return Mf64 @ x
Mf64precondLO = LinearOperator(matvec=Mf64precond,
matmat=Mf64precond,
shape=(n, n),
dtype=float)
Ms32 = Ms64.astype(np.float32)
def Ms32precond(x):
return Ms32 @ x
Ms32precondLO = LinearOperator(matvec=Ms32precond,
matmat=Ms32precond,
shape=(n, n),
dtype=np.float32)
Mf32 = Ms32.toarray()
def Mf32precond(x):
return Mf32 @ x
Mf32precondLO = LinearOperator(matvec=Mf32precond,
matmat=Mf32precond,
shape=(n, n),
dtype=np.float32)
listM = [None, Ms64precondLO, Mf64precondLO,
Ms32precondLO, Mf32precondLO]
# Setup matrix of the initial approximation to the eigenvectors
# (cannot be sparse array).
Xf64 = rnd.random((n, m))
Xf32 = Xf64.astype(np.float32)
listX = [Xf64, Xf32]
# Require that the returned eigenvectors be in the orthogonal complement
# of the first few standard basis vectors (cannot be sparse array).
m_excluded = 3
Yf64 = np.eye(n, m_excluded, dtype=float)
Yf32 = np.eye(n, m_excluded, dtype=np.float32)
listY = [Yf64, Yf32]
tests = list(itertools.product(listA, listB, listM, listX, listY))
# This is one of the slower tests because there are >1,000 configs
# to test here, instead of checking product of all input, output types
# test each configuration for the first sparse format, and then
# for one additional sparse format. this takes 2/7=30% as long as
# testing all configurations for all sparse formats.
if s_f_i > 0:
tests = tests[s_f_i - 1::sparse_formats-1]
for A, B, M, X, Y in tests:
eigvals, _ = lobpcg(A, X, B=B, M=M, Y=Y, tol=1e-4,
maxiter=100, largest=False)
assert_allclose(eigvals,
np.arange(1 + m_excluded, 1 + m_excluded + m))
| bsd-3-clause | 2a2829470975a98af80baf77fbbf6f87 | 33.21118 | 80 | 0.577463 | 3.078815 | false | true | false | false |
scipy/scipy | scipy/sparse/_extract.py | 15 | 4648 | """Functions to extract parts of sparse matrices
"""
__docformat__ = "restructuredtext en"
__all__ = ['find', 'tril', 'triu']
from ._coo import coo_matrix
def find(A):
"""Return the indices and values of the nonzero elements of a matrix
Parameters
----------
A : dense or sparse matrix
Matrix whose nonzero elements are desired.
Returns
-------
(I,J,V) : tuple of arrays
I,J, and V contain the row indices, column indices, and values
of the nonzero matrix entries.
Examples
--------
>>> from scipy.sparse import csr_matrix, find
>>> A = csr_matrix([[7.0, 8.0, 0],[0, 0, 9.0]])
>>> find(A)
(array([0, 0, 1], dtype=int32), array([0, 1, 2], dtype=int32), array([ 7., 8., 9.]))
"""
A = coo_matrix(A, copy=True)
A.sum_duplicates()
# remove explicit zeros
nz_mask = A.data != 0
return A.row[nz_mask], A.col[nz_mask], A.data[nz_mask]
def tril(A, k=0, format=None):
"""Return the lower triangular portion of a matrix in sparse format
Returns the elements on or below the k-th diagonal of the matrix A.
- k = 0 corresponds to the main diagonal
- k > 0 is above the main diagonal
- k < 0 is below the main diagonal
Parameters
----------
A : dense or sparse matrix
Matrix whose lower trianglar portion is desired.
k : integer : optional
The top-most diagonal of the lower triangle.
format : string
Sparse format of the result, e.g. format="csr", etc.
Returns
-------
L : sparse matrix
Lower triangular portion of A in sparse format.
See Also
--------
triu : upper triangle in sparse format
Examples
--------
>>> from scipy.sparse import csr_matrix, tril
>>> A = csr_matrix([[1, 2, 0, 0, 3], [4, 5, 0, 6, 7], [0, 0, 8, 9, 0]],
... dtype='int32')
>>> A.toarray()
array([[1, 2, 0, 0, 3],
[4, 5, 0, 6, 7],
[0, 0, 8, 9, 0]])
>>> tril(A).toarray()
array([[1, 0, 0, 0, 0],
[4, 5, 0, 0, 0],
[0, 0, 8, 0, 0]])
>>> tril(A).nnz
4
>>> tril(A, k=1).toarray()
array([[1, 2, 0, 0, 0],
[4, 5, 0, 0, 0],
[0, 0, 8, 9, 0]])
>>> tril(A, k=-1).toarray()
array([[0, 0, 0, 0, 0],
[4, 0, 0, 0, 0],
[0, 0, 0, 0, 0]])
>>> tril(A, format='csc')
<3x5 sparse matrix of type '<class 'numpy.int32'>'
with 4 stored elements in Compressed Sparse Column format>
"""
# convert to COOrdinate format where things are easy
A = coo_matrix(A, copy=False)
mask = A.row + k >= A.col
return _masked_coo(A, mask).asformat(format)
def triu(A, k=0, format=None):
"""Return the upper triangular portion of a matrix in sparse format
Returns the elements on or above the k-th diagonal of the matrix A.
- k = 0 corresponds to the main diagonal
- k > 0 is above the main diagonal
- k < 0 is below the main diagonal
Parameters
----------
A : dense or sparse matrix
Matrix whose upper trianglar portion is desired.
k : integer : optional
The bottom-most diagonal of the upper triangle.
format : string
Sparse format of the result, e.g. format="csr", etc.
Returns
-------
L : sparse matrix
Upper triangular portion of A in sparse format.
See Also
--------
tril : lower triangle in sparse format
Examples
--------
>>> from scipy.sparse import csr_matrix, triu
>>> A = csr_matrix([[1, 2, 0, 0, 3], [4, 5, 0, 6, 7], [0, 0, 8, 9, 0]],
... dtype='int32')
>>> A.toarray()
array([[1, 2, 0, 0, 3],
[4, 5, 0, 6, 7],
[0, 0, 8, 9, 0]])
>>> triu(A).toarray()
array([[1, 2, 0, 0, 3],
[0, 5, 0, 6, 7],
[0, 0, 8, 9, 0]])
>>> triu(A).nnz
8
>>> triu(A, k=1).toarray()
array([[0, 2, 0, 0, 3],
[0, 0, 0, 6, 7],
[0, 0, 0, 9, 0]])
>>> triu(A, k=-1).toarray()
array([[1, 2, 0, 0, 3],
[4, 5, 0, 6, 7],
[0, 0, 8, 9, 0]])
>>> triu(A, format='csc')
<3x5 sparse matrix of type '<class 'numpy.int32'>'
with 8 stored elements in Compressed Sparse Column format>
"""
# convert to COOrdinate format where things are easy
A = coo_matrix(A, copy=False)
mask = A.row + k <= A.col
return _masked_coo(A, mask).asformat(format)
def _masked_coo(A, mask):
row = A.row[mask]
col = A.col[mask]
data = A.data[mask]
return coo_matrix((data, (row, col)), shape=A.shape, dtype=A.dtype)
| bsd-3-clause | a45a2d9584b12b32b5acbe7991287a73 | 26.502959 | 90 | 0.523666 | 3.294118 | false | false | false | false |
scipy/scipy | benchmarks/benchmarks/cutest/dfoxs.py | 1 | 2637 | # This is a python implementation of dfoxs.m,
# provided at https://github.com/POptUS/BenDFO
import numpy as np
def dfoxs(n, nprob, factor):
x = np.zeros(n)
if nprob == 1 or nprob == 2 or nprob == 3: # Linear functions.
x = np.ones(n)
elif nprob == 4: # Rosenbrock function.
x[0] = -1.2
x[1] = 1
elif nprob == 5: # Helical valley function.
x[0] = -1
elif nprob == 6: # Powell singular function.
x[0] = 3
x[1] = -1
x[2] = 0
x[3] = 1
elif nprob == 7: # Freudenstein and Roth function.
x[0] = 0.5
x[1] = -2
elif nprob == 8: # Bard function.
x[0] = 1
x[1] = 1
x[2] = 1
elif nprob == 9: # Kowalik and Osborne function.
x[0] = 0.25
x[1] = 0.39
x[2] = 0.415
x[3] = 0.39
elif nprob == 10: # Meyer function.
x[0] = 0.02
x[1] = 4000
x[2] = 250
elif nprob == 11: # Watson function.
x = 0.5 * np.ones(n)
elif nprob == 12: # Box 3-dimensional function.
x[0] = 0
x[1] = 10
x[2] = 20
elif nprob == 13: # Jennrich and Sampson function.
x[0] = 0.3
x[1] = 0.4
elif nprob == 14: # Brown and Dennis function.
x[0] = 25
x[1] = 5
x[2] = -5
x[3] = -1
elif nprob == 15: # Chebyquad function.
for k in range(n):
x[k] = (k + 1) / (n + 1)
elif nprob == 16: # Brown almost-linear function.
x = 0.5 * np.ones(n)
elif nprob == 17: # Osborne 1 function.
x[0] = 0.5
x[1] = 1.5
x[2] = 1
x[3] = 0.01
x[4] = 0.02
elif nprob == 18: # Osborne 2 function.
x[0] = 1.3
x[1] = 0.65
x[2] = 0.65
x[3] = 0.7
x[4] = 0.6
x[5] = 3
x[6] = 5
x[7] = 7
x[8] = 2
x[9] = 4.5
x[10] = 5.5
elif nprob == 19: # Bdqrtic.
x = np.ones(n)
elif nprob == 20: # Cube.
x = 0.5 * np.ones(n)
elif nprob == 21: # Mancino.
for i in range(n):
ss = 0
for j in range(n):
frac = (i + 1) / (j + 1)
ss = ss + np.sqrt(frac) * (
(np.sin(np.log(np.sqrt(frac)))) ** 5
+ (np.cos(np.log(np.sqrt(frac)))) ** 5
)
x[i] = -8.710996e-4 * ((i - 49) ** 3 + ss)
elif nprob == 22: # Heart8ls.
x = np.asarray([-0.3, -0.39, 0.3, -0.344, -1.2, 2.69, 1.59, -1.5])
else:
print(f"unrecognized function number {nprob}")
return None
return factor * x
| bsd-3-clause | 5bef1f9c1e1e830ec3bf8dbf3b4fc9c1 | 27.053191 | 74 | 0.423967 | 2.674442 | false | false | false | false |
scipy/scipy | scipy/integrate/_ode.py | 10 | 47945 | # Authors: Pearu Peterson, Pauli Virtanen, John Travers
"""
First-order ODE integrators.
User-friendly interface to various numerical integrators for solving a
system of first order ODEs with prescribed initial conditions::
d y(t)[i]
--------- = f(t,y(t))[i],
d t
y(t=0)[i] = y0[i],
where::
i = 0, ..., len(y0) - 1
class ode
---------
A generic interface class to numeric integrators. It has the following
methods::
integrator = ode(f, jac=None)
integrator = integrator.set_integrator(name, **params)
integrator = integrator.set_initial_value(y0, t0=0.0)
integrator = integrator.set_f_params(*args)
integrator = integrator.set_jac_params(*args)
y1 = integrator.integrate(t1, step=False, relax=False)
flag = integrator.successful()
class complex_ode
-----------------
This class has the same generic interface as ode, except it can handle complex
f, y and Jacobians by transparently translating them into the equivalent
real-valued system. It supports the real-valued solvers (i.e., not zvode) and is
an alternative to ode with the zvode solver, sometimes performing better.
"""
# XXX: Integrators must have:
# ===========================
# cvode - C version of vode and vodpk with many improvements.
# Get it from http://www.netlib.org/ode/cvode.tar.gz.
# To wrap cvode to Python, one must write the extension module by
# hand. Its interface is too much 'advanced C' that using f2py
# would be too complicated (or impossible).
#
# How to define a new integrator:
# ===============================
#
# class myodeint(IntegratorBase):
#
# runner = <odeint function> or None
#
# def __init__(self,...): # required
# <initialize>
#
# def reset(self,n,has_jac): # optional
# # n - the size of the problem (number of equations)
# # has_jac - whether user has supplied its own routine for Jacobian
# <allocate memory,initialize further>
#
# def run(self,f,jac,y0,t0,t1,f_params,jac_params): # required
# # this method is called to integrate from t=t0 to t=t1
# # with initial condition y0. f and jac are user-supplied functions
# # that define the problem. f_params,jac_params are additional
# # arguments
# # to these functions.
# <calculate y1>
# if <calculation was unsuccessful>:
# self.success = 0
# return t1,y1
#
# # In addition, one can define step() and run_relax() methods (they
# # take the same arguments as run()) if the integrator can support
# # these features (see IntegratorBase doc strings).
#
# if myodeint.runner:
# IntegratorBase.integrator_classes.append(myodeint)
__all__ = ['ode', 'complex_ode']
import re
import warnings
from numpy import asarray, array, zeros, isscalar, real, imag, vstack
from . import _vode
from . import _dop
from . import _lsoda
_dop_int_dtype = _dop.types.intvar.dtype
_vode_int_dtype = _vode.types.intvar.dtype
_lsoda_int_dtype = _lsoda.types.intvar.dtype
# ------------------------------------------------------------------------------
# User interface
# ------------------------------------------------------------------------------
class ode:
"""
A generic interface class to numeric integrators.
Solve an equation system :math:`y'(t) = f(t,y)` with (optional) ``jac = df/dy``.
*Note*: The first two arguments of ``f(t, y, ...)`` are in the
opposite order of the arguments in the system definition function used
by `scipy.integrate.odeint`.
Parameters
----------
f : callable ``f(t, y, *f_args)``
Right-hand side of the differential equation. t is a scalar,
``y.shape == (n,)``.
``f_args`` is set by calling ``set_f_params(*args)``.
`f` should return a scalar, array or list (not a tuple).
jac : callable ``jac(t, y, *jac_args)``, optional
Jacobian of the right-hand side, ``jac[i,j] = d f[i] / d y[j]``.
``jac_args`` is set by calling ``set_jac_params(*args)``.
Attributes
----------
t : float
Current time.
y : ndarray
Current variable values.
See also
--------
odeint : an integrator with a simpler interface based on lsoda from ODEPACK
quad : for finding the area under a curve
Notes
-----
Available integrators are listed below. They can be selected using
the `set_integrator` method.
"vode"
Real-valued Variable-coefficient Ordinary Differential Equation
solver, with fixed-leading-coefficient implementation. It provides
implicit Adams method (for non-stiff problems) and a method based on
backward differentiation formulas (BDF) (for stiff problems).
Source: http://www.netlib.org/ode/vode.f
.. warning::
This integrator is not re-entrant. You cannot have two `ode`
instances using the "vode" integrator at the same time.
This integrator accepts the following parameters in `set_integrator`
method of the `ode` class:
- atol : float or sequence
absolute tolerance for solution
- rtol : float or sequence
relative tolerance for solution
- lband : None or int
- uband : None or int
Jacobian band width, jac[i,j] != 0 for i-lband <= j <= i+uband.
Setting these requires your jac routine to return the jacobian
in packed format, jac_packed[i-j+uband, j] = jac[i,j]. The
dimension of the matrix must be (lband+uband+1, len(y)).
- method: 'adams' or 'bdf'
Which solver to use, Adams (non-stiff) or BDF (stiff)
- with_jacobian : bool
This option is only considered when the user has not supplied a
Jacobian function and has not indicated (by setting either band)
that the Jacobian is banded. In this case, `with_jacobian` specifies
whether the iteration method of the ODE solver's correction step is
chord iteration with an internally generated full Jacobian or
functional iteration with no Jacobian.
- nsteps : int
Maximum number of (internally defined) steps allowed during one
call to the solver.
- first_step : float
- min_step : float
- max_step : float
Limits for the step sizes used by the integrator.
- order : int
Maximum order used by the integrator,
order <= 12 for Adams, <= 5 for BDF.
"zvode"
Complex-valued Variable-coefficient Ordinary Differential Equation
solver, with fixed-leading-coefficient implementation. It provides
implicit Adams method (for non-stiff problems) and a method based on
backward differentiation formulas (BDF) (for stiff problems).
Source: http://www.netlib.org/ode/zvode.f
.. warning::
This integrator is not re-entrant. You cannot have two `ode`
instances using the "zvode" integrator at the same time.
This integrator accepts the same parameters in `set_integrator`
as the "vode" solver.
.. note::
When using ZVODE for a stiff system, it should only be used for
the case in which the function f is analytic, that is, when each f(i)
is an analytic function of each y(j). Analyticity means that the
partial derivative df(i)/dy(j) is a unique complex number, and this
fact is critical in the way ZVODE solves the dense or banded linear
systems that arise in the stiff case. For a complex stiff ODE system
in which f is not analytic, ZVODE is likely to have convergence
failures, and for this problem one should instead use DVODE on the
equivalent real system (in the real and imaginary parts of y).
"lsoda"
Real-valued Variable-coefficient Ordinary Differential Equation
solver, with fixed-leading-coefficient implementation. It provides
automatic method switching between implicit Adams method (for non-stiff
problems) and a method based on backward differentiation formulas (BDF)
(for stiff problems).
Source: http://www.netlib.org/odepack
.. warning::
This integrator is not re-entrant. You cannot have two `ode`
instances using the "lsoda" integrator at the same time.
This integrator accepts the following parameters in `set_integrator`
method of the `ode` class:
- atol : float or sequence
absolute tolerance for solution
- rtol : float or sequence
relative tolerance for solution
- lband : None or int
- uband : None or int
Jacobian band width, jac[i,j] != 0 for i-lband <= j <= i+uband.
Setting these requires your jac routine to return the jacobian
in packed format, jac_packed[i-j+uband, j] = jac[i,j].
- with_jacobian : bool
*Not used.*
- nsteps : int
Maximum number of (internally defined) steps allowed during one
call to the solver.
- first_step : float
- min_step : float
- max_step : float
Limits for the step sizes used by the integrator.
- max_order_ns : int
Maximum order used in the nonstiff case (default 12).
- max_order_s : int
Maximum order used in the stiff case (default 5).
- max_hnil : int
Maximum number of messages reporting too small step size (t + h = t)
(default 0)
- ixpr : int
Whether to generate extra printing at method switches (default False).
"dopri5"
This is an explicit runge-kutta method of order (4)5 due to Dormand &
Prince (with stepsize control and dense output).
Authors:
E. Hairer and G. Wanner
Universite de Geneve, Dept. de Mathematiques
CH-1211 Geneve 24, Switzerland
e-mail: ernst.hairer@math.unige.ch, gerhard.wanner@math.unige.ch
This code is described in [HNW93]_.
This integrator accepts the following parameters in set_integrator()
method of the ode class:
- atol : float or sequence
absolute tolerance for solution
- rtol : float or sequence
relative tolerance for solution
- nsteps : int
Maximum number of (internally defined) steps allowed during one
call to the solver.
- first_step : float
- max_step : float
- safety : float
Safety factor on new step selection (default 0.9)
- ifactor : float
- dfactor : float
Maximum factor to increase/decrease step size by in one step
- beta : float
Beta parameter for stabilised step size control.
- verbosity : int
Switch for printing messages (< 0 for no messages).
"dop853"
This is an explicit runge-kutta method of order 8(5,3) due to Dormand
& Prince (with stepsize control and dense output).
Options and references the same as "dopri5".
Examples
--------
A problem to integrate and the corresponding jacobian:
>>> from scipy.integrate import ode
>>>
>>> y0, t0 = [1.0j, 2.0], 0
>>>
>>> def f(t, y, arg1):
... return [1j*arg1*y[0] + y[1], -arg1*y[1]**2]
>>> def jac(t, y, arg1):
... return [[1j*arg1, 1], [0, -arg1*2*y[1]]]
The integration:
>>> r = ode(f, jac).set_integrator('zvode', method='bdf')
>>> r.set_initial_value(y0, t0).set_f_params(2.0).set_jac_params(2.0)
>>> t1 = 10
>>> dt = 1
>>> while r.successful() and r.t < t1:
... print(r.t+dt, r.integrate(r.t+dt))
1 [-0.71038232+0.23749653j 0.40000271+0.j ]
2.0 [0.19098503-0.52359246j 0.22222356+0.j ]
3.0 [0.47153208+0.52701229j 0.15384681+0.j ]
4.0 [-0.61905937+0.30726255j 0.11764744+0.j ]
5.0 [0.02340997-0.61418799j 0.09523835+0.j ]
6.0 [0.58643071+0.339819j 0.08000018+0.j ]
7.0 [-0.52070105+0.44525141j 0.06896565+0.j ]
8.0 [-0.15986733-0.61234476j 0.06060616+0.j ]
9.0 [0.64850462+0.15048982j 0.05405414+0.j ]
10.0 [-0.38404699+0.56382299j 0.04878055+0.j ]
References
----------
.. [HNW93] E. Hairer, S.P. Norsett and G. Wanner, Solving Ordinary
Differential Equations i. Nonstiff Problems. 2nd edition.
Springer Series in Computational Mathematics,
Springer-Verlag (1993)
"""
def __init__(self, f, jac=None):
self.stiff = 0
self.f = f
self.jac = jac
self.f_params = ()
self.jac_params = ()
self._y = []
@property
def y(self):
return self._y
def set_initial_value(self, y, t=0.0):
"""Set initial conditions y(t) = y."""
if isscalar(y):
y = [y]
n_prev = len(self._y)
if not n_prev:
self.set_integrator('') # find first available integrator
self._y = asarray(y, self._integrator.scalar)
self.t = t
self._integrator.reset(len(self._y), self.jac is not None)
return self
def set_integrator(self, name, **integrator_params):
"""
Set integrator by name.
Parameters
----------
name : str
Name of the integrator.
**integrator_params
Additional parameters for the integrator.
"""
integrator = find_integrator(name)
if integrator is None:
# FIXME: this really should be raise an exception. Will that break
# any code?
warnings.warn('No integrator name match with %r or is not '
'available.' % name)
else:
self._integrator = integrator(**integrator_params)
if not len(self._y):
self.t = 0.0
self._y = array([0.0], self._integrator.scalar)
self._integrator.reset(len(self._y), self.jac is not None)
return self
def integrate(self, t, step=False, relax=False):
"""Find y=y(t), set y as an initial condition, and return y.
Parameters
----------
t : float
The endpoint of the integration step.
step : bool
If True, and if the integrator supports the step method,
then perform a single integration step and return.
This parameter is provided in order to expose internals of
the implementation, and should not be changed from its default
value in most cases.
relax : bool
If True and if the integrator supports the run_relax method,
then integrate until t_1 >= t and return. ``relax`` is not
referenced if ``step=True``.
This parameter is provided in order to expose internals of
the implementation, and should not be changed from its default
value in most cases.
Returns
-------
y : float
The integrated value at t
"""
if step and self._integrator.supports_step:
mth = self._integrator.step
elif relax and self._integrator.supports_run_relax:
mth = self._integrator.run_relax
else:
mth = self._integrator.run
try:
self._y, self.t = mth(self.f, self.jac or (lambda: None),
self._y, self.t, t,
self.f_params, self.jac_params)
except SystemError as e:
# f2py issue with tuple returns, see ticket 1187.
raise ValueError(
'Function to integrate must not return a tuple.'
) from e
return self._y
def successful(self):
"""Check if integration was successful."""
try:
self._integrator
except AttributeError:
self.set_integrator('')
return self._integrator.success == 1
def get_return_code(self):
"""Extracts the return code for the integration to enable better control
if the integration fails.
In general, a return code > 0 implies success, while a return code < 0
implies failure.
Notes
-----
This section describes possible return codes and their meaning, for available
integrators that can be selected by `set_integrator` method.
"vode"
=========== =======
Return Code Message
=========== =======
2 Integration successful.
-1 Excess work done on this call. (Perhaps wrong MF.)
-2 Excess accuracy requested. (Tolerances too small.)
-3 Illegal input detected. (See printed message.)
-4 Repeated error test failures. (Check all input.)
-5 Repeated convergence failures. (Perhaps bad Jacobian
supplied or wrong choice of MF or tolerances.)
-6 Error weight became zero during problem. (Solution
component i vanished, and ATOL or ATOL(i) = 0.)
=========== =======
"zvode"
=========== =======
Return Code Message
=========== =======
2 Integration successful.
-1 Excess work done on this call. (Perhaps wrong MF.)
-2 Excess accuracy requested. (Tolerances too small.)
-3 Illegal input detected. (See printed message.)
-4 Repeated error test failures. (Check all input.)
-5 Repeated convergence failures. (Perhaps bad Jacobian
supplied or wrong choice of MF or tolerances.)
-6 Error weight became zero during problem. (Solution
component i vanished, and ATOL or ATOL(i) = 0.)
=========== =======
"dopri5"
=========== =======
Return Code Message
=========== =======
1 Integration successful.
2 Integration successful (interrupted by solout).
-1 Input is not consistent.
-2 Larger nsteps is needed.
-3 Step size becomes too small.
-4 Problem is probably stiff (interrupted).
=========== =======
"dop853"
=========== =======
Return Code Message
=========== =======
1 Integration successful.
2 Integration successful (interrupted by solout).
-1 Input is not consistent.
-2 Larger nsteps is needed.
-3 Step size becomes too small.
-4 Problem is probably stiff (interrupted).
=========== =======
"lsoda"
=========== =======
Return Code Message
=========== =======
2 Integration successful.
-1 Excess work done on this call (perhaps wrong Dfun type).
-2 Excess accuracy requested (tolerances too small).
-3 Illegal input detected (internal error).
-4 Repeated error test failures (internal error).
-5 Repeated convergence failures (perhaps bad Jacobian or tolerances).
-6 Error weight became zero during problem.
-7 Internal workspace insufficient to finish (internal error).
=========== =======
"""
try:
self._integrator
except AttributeError:
self.set_integrator('')
return self._integrator.istate
def set_f_params(self, *args):
"""Set extra parameters for user-supplied function f."""
self.f_params = args
return self
def set_jac_params(self, *args):
"""Set extra parameters for user-supplied function jac."""
self.jac_params = args
return self
def set_solout(self, solout):
"""
Set callable to be called at every successful integration step.
Parameters
----------
solout : callable
``solout(t, y)`` is called at each internal integrator step,
t is a scalar providing the current independent position
y is the current soloution ``y.shape == (n,)``
solout should return -1 to stop integration
otherwise it should return None or 0
"""
if self._integrator.supports_solout:
self._integrator.set_solout(solout)
if self._y is not None:
self._integrator.reset(len(self._y), self.jac is not None)
else:
raise ValueError("selected integrator does not support solout,"
" choose another one")
def _transform_banded_jac(bjac):
"""
Convert a real matrix of the form (for example)
[0 0 A B] [0 0 0 B]
[0 0 C D] [0 0 A D]
[E F G H] to [0 F C H]
[I J K L] [E J G L]
[I 0 K 0]
That is, every other column is shifted up one.
"""
# Shift every other column.
newjac = zeros((bjac.shape[0] + 1, bjac.shape[1]))
newjac[1:, ::2] = bjac[:, ::2]
newjac[:-1, 1::2] = bjac[:, 1::2]
return newjac
class complex_ode(ode):
"""
A wrapper of ode for complex systems.
This functions similarly as `ode`, but re-maps a complex-valued
equation system to a real-valued one before using the integrators.
Parameters
----------
f : callable ``f(t, y, *f_args)``
Rhs of the equation. t is a scalar, ``y.shape == (n,)``.
``f_args`` is set by calling ``set_f_params(*args)``.
jac : callable ``jac(t, y, *jac_args)``
Jacobian of the rhs, ``jac[i,j] = d f[i] / d y[j]``.
``jac_args`` is set by calling ``set_f_params(*args)``.
Attributes
----------
t : float
Current time.
y : ndarray
Current variable values.
Examples
--------
For usage examples, see `ode`.
"""
def __init__(self, f, jac=None):
self.cf = f
self.cjac = jac
if jac is None:
ode.__init__(self, self._wrap, None)
else:
ode.__init__(self, self._wrap, self._wrap_jac)
def _wrap(self, t, y, *f_args):
f = self.cf(*((t, y[::2] + 1j * y[1::2]) + f_args))
# self.tmp is a real-valued array containing the interleaved
# real and imaginary parts of f.
self.tmp[::2] = real(f)
self.tmp[1::2] = imag(f)
return self.tmp
def _wrap_jac(self, t, y, *jac_args):
# jac is the complex Jacobian computed by the user-defined function.
jac = self.cjac(*((t, y[::2] + 1j * y[1::2]) + jac_args))
# jac_tmp is the real version of the complex Jacobian. Each complex
# entry in jac, say 2+3j, becomes a 2x2 block of the form
# [2 -3]
# [3 2]
jac_tmp = zeros((2 * jac.shape[0], 2 * jac.shape[1]))
jac_tmp[1::2, 1::2] = jac_tmp[::2, ::2] = real(jac)
jac_tmp[1::2, ::2] = imag(jac)
jac_tmp[::2, 1::2] = -jac_tmp[1::2, ::2]
ml = getattr(self._integrator, 'ml', None)
mu = getattr(self._integrator, 'mu', None)
if ml is not None or mu is not None:
# Jacobian is banded. The user's Jacobian function has computed
# the complex Jacobian in packed format. The corresponding
# real-valued version has every other column shifted up.
jac_tmp = _transform_banded_jac(jac_tmp)
return jac_tmp
@property
def y(self):
return self._y[::2] + 1j * self._y[1::2]
def set_integrator(self, name, **integrator_params):
"""
Set integrator by name.
Parameters
----------
name : str
Name of the integrator
**integrator_params
Additional parameters for the integrator.
"""
if name == 'zvode':
raise ValueError("zvode must be used with ode, not complex_ode")
lband = integrator_params.get('lband')
uband = integrator_params.get('uband')
if lband is not None or uband is not None:
# The Jacobian is banded. Override the user-supplied bandwidths
# (which are for the complex Jacobian) with the bandwidths of
# the corresponding real-valued Jacobian wrapper of the complex
# Jacobian.
integrator_params['lband'] = 2 * (lband or 0) + 1
integrator_params['uband'] = 2 * (uband or 0) + 1
return ode.set_integrator(self, name, **integrator_params)
def set_initial_value(self, y, t=0.0):
"""Set initial conditions y(t) = y."""
y = asarray(y)
self.tmp = zeros(y.size * 2, 'float')
self.tmp[::2] = real(y)
self.tmp[1::2] = imag(y)
return ode.set_initial_value(self, self.tmp, t)
def integrate(self, t, step=False, relax=False):
"""Find y=y(t), set y as an initial condition, and return y.
Parameters
----------
t : float
The endpoint of the integration step.
step : bool
If True, and if the integrator supports the step method,
then perform a single integration step and return.
This parameter is provided in order to expose internals of
the implementation, and should not be changed from its default
value in most cases.
relax : bool
If True and if the integrator supports the run_relax method,
then integrate until t_1 >= t and return. ``relax`` is not
referenced if ``step=True``.
This parameter is provided in order to expose internals of
the implementation, and should not be changed from its default
value in most cases.
Returns
-------
y : float
The integrated value at t
"""
y = ode.integrate(self, t, step, relax)
return y[::2] + 1j * y[1::2]
def set_solout(self, solout):
"""
Set callable to be called at every successful integration step.
Parameters
----------
solout : callable
``solout(t, y)`` is called at each internal integrator step,
t is a scalar providing the current independent position
y is the current soloution ``y.shape == (n,)``
solout should return -1 to stop integration
otherwise it should return None or 0
"""
if self._integrator.supports_solout:
self._integrator.set_solout(solout, complex=True)
else:
raise TypeError("selected integrator does not support solouta,"
+ "choose another one")
# ------------------------------------------------------------------------------
# ODE integrators
# ------------------------------------------------------------------------------
def find_integrator(name):
for cl in IntegratorBase.integrator_classes:
if re.match(name, cl.__name__, re.I):
return cl
return None
class IntegratorConcurrencyError(RuntimeError):
"""
Failure due to concurrent usage of an integrator that can be used
only for a single problem at a time.
"""
def __init__(self, name):
msg = ("Integrator `%s` can be used to solve only a single problem "
"at a time. If you want to integrate multiple problems, "
"consider using a different integrator "
"(see `ode.set_integrator`)") % name
RuntimeError.__init__(self, msg)
class IntegratorBase:
runner = None # runner is None => integrator is not available
success = None # success==1 if integrator was called successfully
istate = None # istate > 0 means success, istate < 0 means failure
supports_run_relax = None
supports_step = None
supports_solout = False
integrator_classes = []
scalar = float
def acquire_new_handle(self):
# Some of the integrators have internal state (ancient
# Fortran...), and so only one instance can use them at a time.
# We keep track of this, and fail when concurrent usage is tried.
self.__class__.active_global_handle += 1
self.handle = self.__class__.active_global_handle
def check_handle(self):
if self.handle is not self.__class__.active_global_handle:
raise IntegratorConcurrencyError(self.__class__.__name__)
def reset(self, n, has_jac):
"""Prepare integrator for call: allocate memory, set flags, etc.
n - number of equations.
has_jac - if user has supplied function for evaluating Jacobian.
"""
def run(self, f, jac, y0, t0, t1, f_params, jac_params):
"""Integrate from t=t0 to t=t1 using y0 as an initial condition.
Return 2-tuple (y1,t1) where y1 is the result and t=t1
defines the stoppage coordinate of the result.
"""
raise NotImplementedError('all integrators must define '
'run(f, jac, t0, t1, y0, f_params, jac_params)')
def step(self, f, jac, y0, t0, t1, f_params, jac_params):
"""Make one integration step and return (y1,t1)."""
raise NotImplementedError('%s does not support step() method' %
self.__class__.__name__)
def run_relax(self, f, jac, y0, t0, t1, f_params, jac_params):
"""Integrate from t=t0 to t>=t1 and return (y1,t)."""
raise NotImplementedError('%s does not support run_relax() method' %
self.__class__.__name__)
# XXX: __str__ method for getting visual state of the integrator
def _vode_banded_jac_wrapper(jacfunc, ml, jac_params):
"""
Wrap a banded Jacobian function with a function that pads
the Jacobian with `ml` rows of zeros.
"""
def jac_wrapper(t, y):
jac = asarray(jacfunc(t, y, *jac_params))
padded_jac = vstack((jac, zeros((ml, jac.shape[1]))))
return padded_jac
return jac_wrapper
class vode(IntegratorBase):
runner = getattr(_vode, 'dvode', None)
messages = {-1: 'Excess work done on this call. (Perhaps wrong MF.)',
-2: 'Excess accuracy requested. (Tolerances too small.)',
-3: 'Illegal input detected. (See printed message.)',
-4: 'Repeated error test failures. (Check all input.)',
-5: 'Repeated convergence failures. (Perhaps bad'
' Jacobian supplied or wrong choice of MF or tolerances.)',
-6: 'Error weight became zero during problem. (Solution'
' component i vanished, and ATOL or ATOL(i) = 0.)'
}
supports_run_relax = 1
supports_step = 1
active_global_handle = 0
def __init__(self,
method='adams',
with_jacobian=False,
rtol=1e-6, atol=1e-12,
lband=None, uband=None,
order=12,
nsteps=500,
max_step=0.0, # corresponds to infinite
min_step=0.0,
first_step=0.0, # determined by solver
):
if re.match(method, r'adams', re.I):
self.meth = 1
elif re.match(method, r'bdf', re.I):
self.meth = 2
else:
raise ValueError('Unknown integration method %s' % method)
self.with_jacobian = with_jacobian
self.rtol = rtol
self.atol = atol
self.mu = uband
self.ml = lband
self.order = order
self.nsteps = nsteps
self.max_step = max_step
self.min_step = min_step
self.first_step = first_step
self.success = 1
self.initialized = False
def _determine_mf_and_set_bands(self, has_jac):
"""
Determine the `MF` parameter (Method Flag) for the Fortran subroutine `dvode`.
In the Fortran code, the legal values of `MF` are:
10, 11, 12, 13, 14, 15, 20, 21, 22, 23, 24, 25,
-11, -12, -14, -15, -21, -22, -24, -25
but this Python wrapper does not use negative values.
Returns
mf = 10*self.meth + miter
self.meth is the linear multistep method:
self.meth == 1: method="adams"
self.meth == 2: method="bdf"
miter is the correction iteration method:
miter == 0: Functional iteraton; no Jacobian involved.
miter == 1: Chord iteration with user-supplied full Jacobian.
miter == 2: Chord iteration with internally computed full Jacobian.
miter == 3: Chord iteration with internally computed diagonal Jacobian.
miter == 4: Chord iteration with user-supplied banded Jacobian.
miter == 5: Chord iteration with internally computed banded Jacobian.
Side effects: If either self.mu or self.ml is not None and the other is None,
then the one that is None is set to 0.
"""
jac_is_banded = self.mu is not None or self.ml is not None
if jac_is_banded:
if self.mu is None:
self.mu = 0
if self.ml is None:
self.ml = 0
# has_jac is True if the user provided a Jacobian function.
if has_jac:
if jac_is_banded:
miter = 4
else:
miter = 1
else:
if jac_is_banded:
if self.ml == self.mu == 0:
miter = 3 # Chord iteration with internal diagonal Jacobian.
else:
miter = 5 # Chord iteration with internal banded Jacobian.
else:
# self.with_jacobian is set by the user in the call to ode.set_integrator.
if self.with_jacobian:
miter = 2 # Chord iteration with internal full Jacobian.
else:
miter = 0 # Functional iteraton; no Jacobian involved.
mf = 10 * self.meth + miter
return mf
def reset(self, n, has_jac):
mf = self._determine_mf_and_set_bands(has_jac)
if mf == 10:
lrw = 20 + 16 * n
elif mf in [11, 12]:
lrw = 22 + 16 * n + 2 * n * n
elif mf == 13:
lrw = 22 + 17 * n
elif mf in [14, 15]:
lrw = 22 + 18 * n + (3 * self.ml + 2 * self.mu) * n
elif mf == 20:
lrw = 20 + 9 * n
elif mf in [21, 22]:
lrw = 22 + 9 * n + 2 * n * n
elif mf == 23:
lrw = 22 + 10 * n
elif mf in [24, 25]:
lrw = 22 + 11 * n + (3 * self.ml + 2 * self.mu) * n
else:
raise ValueError('Unexpected mf=%s' % mf)
if mf % 10 in [0, 3]:
liw = 30
else:
liw = 30 + n
rwork = zeros((lrw,), float)
rwork[4] = self.first_step
rwork[5] = self.max_step
rwork[6] = self.min_step
self.rwork = rwork
iwork = zeros((liw,), _vode_int_dtype)
if self.ml is not None:
iwork[0] = self.ml
if self.mu is not None:
iwork[1] = self.mu
iwork[4] = self.order
iwork[5] = self.nsteps
iwork[6] = 2 # mxhnil
self.iwork = iwork
self.call_args = [self.rtol, self.atol, 1, 1,
self.rwork, self.iwork, mf]
self.success = 1
self.initialized = False
def run(self, f, jac, y0, t0, t1, f_params, jac_params):
if self.initialized:
self.check_handle()
else:
self.initialized = True
self.acquire_new_handle()
if self.ml is not None and self.ml > 0:
# Banded Jacobian. Wrap the user-provided function with one
# that pads the Jacobian array with the extra `self.ml` rows
# required by the f2py-generated wrapper.
jac = _vode_banded_jac_wrapper(jac, self.ml, jac_params)
args = ((f, jac, y0, t0, t1) + tuple(self.call_args) +
(f_params, jac_params))
y1, t, istate = self.runner(*args)
self.istate = istate
if istate < 0:
unexpected_istate_msg = 'Unexpected istate={:d}'.format(istate)
warnings.warn('{:s}: {:s}'.format(self.__class__.__name__,
self.messages.get(istate, unexpected_istate_msg)))
self.success = 0
else:
self.call_args[3] = 2 # upgrade istate from 1 to 2
self.istate = 2
return y1, t
def step(self, *args):
itask = self.call_args[2]
self.call_args[2] = 2
r = self.run(*args)
self.call_args[2] = itask
return r
def run_relax(self, *args):
itask = self.call_args[2]
self.call_args[2] = 3
r = self.run(*args)
self.call_args[2] = itask
return r
if vode.runner is not None:
IntegratorBase.integrator_classes.append(vode)
class zvode(vode):
runner = getattr(_vode, 'zvode', None)
supports_run_relax = 1
supports_step = 1
scalar = complex
active_global_handle = 0
def reset(self, n, has_jac):
mf = self._determine_mf_and_set_bands(has_jac)
if mf in (10,):
lzw = 15 * n
elif mf in (11, 12):
lzw = 15 * n + 2 * n ** 2
elif mf in (-11, -12):
lzw = 15 * n + n ** 2
elif mf in (13,):
lzw = 16 * n
elif mf in (14, 15):
lzw = 17 * n + (3 * self.ml + 2 * self.mu) * n
elif mf in (-14, -15):
lzw = 16 * n + (2 * self.ml + self.mu) * n
elif mf in (20,):
lzw = 8 * n
elif mf in (21, 22):
lzw = 8 * n + 2 * n ** 2
elif mf in (-21, -22):
lzw = 8 * n + n ** 2
elif mf in (23,):
lzw = 9 * n
elif mf in (24, 25):
lzw = 10 * n + (3 * self.ml + 2 * self.mu) * n
elif mf in (-24, -25):
lzw = 9 * n + (2 * self.ml + self.mu) * n
lrw = 20 + n
if mf % 10 in (0, 3):
liw = 30
else:
liw = 30 + n
zwork = zeros((lzw,), complex)
self.zwork = zwork
rwork = zeros((lrw,), float)
rwork[4] = self.first_step
rwork[5] = self.max_step
rwork[6] = self.min_step
self.rwork = rwork
iwork = zeros((liw,), _vode_int_dtype)
if self.ml is not None:
iwork[0] = self.ml
if self.mu is not None:
iwork[1] = self.mu
iwork[4] = self.order
iwork[5] = self.nsteps
iwork[6] = 2 # mxhnil
self.iwork = iwork
self.call_args = [self.rtol, self.atol, 1, 1,
self.zwork, self.rwork, self.iwork, mf]
self.success = 1
self.initialized = False
if zvode.runner is not None:
IntegratorBase.integrator_classes.append(zvode)
class dopri5(IntegratorBase):
runner = getattr(_dop, 'dopri5', None)
name = 'dopri5'
supports_solout = True
messages = {1: 'computation successful',
2: 'computation successful (interrupted by solout)',
-1: 'input is not consistent',
-2: 'larger nsteps is needed',
-3: 'step size becomes too small',
-4: 'problem is probably stiff (interrupted)',
}
def __init__(self,
rtol=1e-6, atol=1e-12,
nsteps=500,
max_step=0.0,
first_step=0.0, # determined by solver
safety=0.9,
ifactor=10.0,
dfactor=0.2,
beta=0.0,
method=None,
verbosity=-1, # no messages if negative
):
self.rtol = rtol
self.atol = atol
self.nsteps = nsteps
self.max_step = max_step
self.first_step = first_step
self.safety = safety
self.ifactor = ifactor
self.dfactor = dfactor
self.beta = beta
self.verbosity = verbosity
self.success = 1
self.set_solout(None)
def set_solout(self, solout, complex=False):
self.solout = solout
self.solout_cmplx = complex
if solout is None:
self.iout = 0
else:
self.iout = 1
def reset(self, n, has_jac):
work = zeros((8 * n + 21,), float)
work[1] = self.safety
work[2] = self.dfactor
work[3] = self.ifactor
work[4] = self.beta
work[5] = self.max_step
work[6] = self.first_step
self.work = work
iwork = zeros((21,), _dop_int_dtype)
iwork[0] = self.nsteps
iwork[2] = self.verbosity
self.iwork = iwork
self.call_args = [self.rtol, self.atol, self._solout,
self.iout, self.work, self.iwork]
self.success = 1
def run(self, f, jac, y0, t0, t1, f_params, jac_params):
x, y, iwork, istate = self.runner(*((f, t0, y0, t1) +
tuple(self.call_args) + (f_params,)))
self.istate = istate
if istate < 0:
unexpected_istate_msg = 'Unexpected istate={:d}'.format(istate)
warnings.warn('{:s}: {:s}'.format(self.__class__.__name__,
self.messages.get(istate, unexpected_istate_msg)))
self.success = 0
return y, x
def _solout(self, nr, xold, x, y, nd, icomp, con):
if self.solout is not None:
if self.solout_cmplx:
y = y[::2] + 1j * y[1::2]
return self.solout(x, y)
else:
return 1
if dopri5.runner is not None:
IntegratorBase.integrator_classes.append(dopri5)
class dop853(dopri5):
runner = getattr(_dop, 'dop853', None)
name = 'dop853'
def __init__(self,
rtol=1e-6, atol=1e-12,
nsteps=500,
max_step=0.0,
first_step=0.0, # determined by solver
safety=0.9,
ifactor=6.0,
dfactor=0.3,
beta=0.0,
method=None,
verbosity=-1, # no messages if negative
):
super().__init__(rtol, atol, nsteps, max_step, first_step, safety,
ifactor, dfactor, beta, method, verbosity)
def reset(self, n, has_jac):
work = zeros((11 * n + 21,), float)
work[1] = self.safety
work[2] = self.dfactor
work[3] = self.ifactor
work[4] = self.beta
work[5] = self.max_step
work[6] = self.first_step
self.work = work
iwork = zeros((21,), _dop_int_dtype)
iwork[0] = self.nsteps
iwork[2] = self.verbosity
self.iwork = iwork
self.call_args = [self.rtol, self.atol, self._solout,
self.iout, self.work, self.iwork]
self.success = 1
if dop853.runner is not None:
IntegratorBase.integrator_classes.append(dop853)
class lsoda(IntegratorBase):
runner = getattr(_lsoda, 'lsoda', None)
active_global_handle = 0
messages = {
2: "Integration successful.",
-1: "Excess work done on this call (perhaps wrong Dfun type).",
-2: "Excess accuracy requested (tolerances too small).",
-3: "Illegal input detected (internal error).",
-4: "Repeated error test failures (internal error).",
-5: "Repeated convergence failures (perhaps bad Jacobian or tolerances).",
-6: "Error weight became zero during problem.",
-7: "Internal workspace insufficient to finish (internal error)."
}
def __init__(self,
with_jacobian=False,
rtol=1e-6, atol=1e-12,
lband=None, uband=None,
nsteps=500,
max_step=0.0, # corresponds to infinite
min_step=0.0,
first_step=0.0, # determined by solver
ixpr=0,
max_hnil=0,
max_order_ns=12,
max_order_s=5,
method=None
):
self.with_jacobian = with_jacobian
self.rtol = rtol
self.atol = atol
self.mu = uband
self.ml = lband
self.max_order_ns = max_order_ns
self.max_order_s = max_order_s
self.nsteps = nsteps
self.max_step = max_step
self.min_step = min_step
self.first_step = first_step
self.ixpr = ixpr
self.max_hnil = max_hnil
self.success = 1
self.initialized = False
def reset(self, n, has_jac):
# Calculate parameters for Fortran subroutine dvode.
if has_jac:
if self.mu is None and self.ml is None:
jt = 1
else:
if self.mu is None:
self.mu = 0
if self.ml is None:
self.ml = 0
jt = 4
else:
if self.mu is None and self.ml is None:
jt = 2
else:
if self.mu is None:
self.mu = 0
if self.ml is None:
self.ml = 0
jt = 5
lrn = 20 + (self.max_order_ns + 4) * n
if jt in [1, 2]:
lrs = 22 + (self.max_order_s + 4) * n + n * n
elif jt in [4, 5]:
lrs = 22 + (self.max_order_s + 5 + 2 * self.ml + self.mu) * n
else:
raise ValueError('Unexpected jt=%s' % jt)
lrw = max(lrn, lrs)
liw = 20 + n
rwork = zeros((lrw,), float)
rwork[4] = self.first_step
rwork[5] = self.max_step
rwork[6] = self.min_step
self.rwork = rwork
iwork = zeros((liw,), _lsoda_int_dtype)
if self.ml is not None:
iwork[0] = self.ml
if self.mu is not None:
iwork[1] = self.mu
iwork[4] = self.ixpr
iwork[5] = self.nsteps
iwork[6] = self.max_hnil
iwork[7] = self.max_order_ns
iwork[8] = self.max_order_s
self.iwork = iwork
self.call_args = [self.rtol, self.atol, 1, 1,
self.rwork, self.iwork, jt]
self.success = 1
self.initialized = False
def run(self, f, jac, y0, t0, t1, f_params, jac_params):
if self.initialized:
self.check_handle()
else:
self.initialized = True
self.acquire_new_handle()
args = [f, y0, t0, t1] + self.call_args[:-1] + \
[jac, self.call_args[-1], f_params, 0, jac_params]
y1, t, istate = self.runner(*args)
self.istate = istate
if istate < 0:
unexpected_istate_msg = 'Unexpected istate={:d}'.format(istate)
warnings.warn('{:s}: {:s}'.format(self.__class__.__name__,
self.messages.get(istate, unexpected_istate_msg)))
self.success = 0
else:
self.call_args[3] = 2 # upgrade istate from 1 to 2
self.istate = 2
return y1, t
def step(self, *args):
itask = self.call_args[2]
self.call_args[2] = 2
r = self.run(*args)
self.call_args[2] = itask
return r
def run_relax(self, *args):
itask = self.call_args[2]
self.call_args[2] = 3
r = self.run(*args)
self.call_args[2] = itask
return r
if lsoda.runner:
IntegratorBase.integrator_classes.append(lsoda)
| bsd-3-clause | 0a87c769d5051abd08ae0ccd8697b270 | 33.945335 | 90 | 0.5416 | 3.790119 | false | false | false | false |
scipy/scipy | scipy/io/wavfile.py | 10 | 26642 | """
Module to read / write wav files using NumPy arrays
Functions
---------
`read`: Return the sample rate (in samples/sec) and data from a WAV file.
`write`: Write a NumPy array as a WAV file.
"""
import io
import sys
import numpy
import struct
import warnings
from enum import IntEnum
__all__ = [
'WavFileWarning',
'read',
'write'
]
class WavFileWarning(UserWarning):
pass
class WAVE_FORMAT(IntEnum):
"""
WAVE form wFormatTag IDs
Complete list is in mmreg.h in Windows 10 SDK. ALAC and OPUS are the
newest additions, in v10.0.14393 2016-07
"""
UNKNOWN = 0x0000
PCM = 0x0001
ADPCM = 0x0002
IEEE_FLOAT = 0x0003
VSELP = 0x0004
IBM_CVSD = 0x0005
ALAW = 0x0006
MULAW = 0x0007
DTS = 0x0008
DRM = 0x0009
WMAVOICE9 = 0x000A
WMAVOICE10 = 0x000B
OKI_ADPCM = 0x0010
DVI_ADPCM = 0x0011
IMA_ADPCM = 0x0011 # Duplicate
MEDIASPACE_ADPCM = 0x0012
SIERRA_ADPCM = 0x0013
G723_ADPCM = 0x0014
DIGISTD = 0x0015
DIGIFIX = 0x0016
DIALOGIC_OKI_ADPCM = 0x0017
MEDIAVISION_ADPCM = 0x0018
CU_CODEC = 0x0019
HP_DYN_VOICE = 0x001A
YAMAHA_ADPCM = 0x0020
SONARC = 0x0021
DSPGROUP_TRUESPEECH = 0x0022
ECHOSC1 = 0x0023
AUDIOFILE_AF36 = 0x0024
APTX = 0x0025
AUDIOFILE_AF10 = 0x0026
PROSODY_1612 = 0x0027
LRC = 0x0028
DOLBY_AC2 = 0x0030
GSM610 = 0x0031
MSNAUDIO = 0x0032
ANTEX_ADPCME = 0x0033
CONTROL_RES_VQLPC = 0x0034
DIGIREAL = 0x0035
DIGIADPCM = 0x0036
CONTROL_RES_CR10 = 0x0037
NMS_VBXADPCM = 0x0038
CS_IMAADPCM = 0x0039
ECHOSC3 = 0x003A
ROCKWELL_ADPCM = 0x003B
ROCKWELL_DIGITALK = 0x003C
XEBEC = 0x003D
G721_ADPCM = 0x0040
G728_CELP = 0x0041
MSG723 = 0x0042
INTEL_G723_1 = 0x0043
INTEL_G729 = 0x0044
SHARP_G726 = 0x0045
MPEG = 0x0050
RT24 = 0x0052
PAC = 0x0053
MPEGLAYER3 = 0x0055
LUCENT_G723 = 0x0059
CIRRUS = 0x0060
ESPCM = 0x0061
VOXWARE = 0x0062
CANOPUS_ATRAC = 0x0063
G726_ADPCM = 0x0064
G722_ADPCM = 0x0065
DSAT = 0x0066
DSAT_DISPLAY = 0x0067
VOXWARE_BYTE_ALIGNED = 0x0069
VOXWARE_AC8 = 0x0070
VOXWARE_AC10 = 0x0071
VOXWARE_AC16 = 0x0072
VOXWARE_AC20 = 0x0073
VOXWARE_RT24 = 0x0074
VOXWARE_RT29 = 0x0075
VOXWARE_RT29HW = 0x0076
VOXWARE_VR12 = 0x0077
VOXWARE_VR18 = 0x0078
VOXWARE_TQ40 = 0x0079
VOXWARE_SC3 = 0x007A
VOXWARE_SC3_1 = 0x007B
SOFTSOUND = 0x0080
VOXWARE_TQ60 = 0x0081
MSRT24 = 0x0082
G729A = 0x0083
MVI_MVI2 = 0x0084
DF_G726 = 0x0085
DF_GSM610 = 0x0086
ISIAUDIO = 0x0088
ONLIVE = 0x0089
MULTITUDE_FT_SX20 = 0x008A
INFOCOM_ITS_G721_ADPCM = 0x008B
CONVEDIA_G729 = 0x008C
CONGRUENCY = 0x008D
SBC24 = 0x0091
DOLBY_AC3_SPDIF = 0x0092
MEDIASONIC_G723 = 0x0093
PROSODY_8KBPS = 0x0094
ZYXEL_ADPCM = 0x0097
PHILIPS_LPCBB = 0x0098
PACKED = 0x0099
MALDEN_PHONYTALK = 0x00A0
RACAL_RECORDER_GSM = 0x00A1
RACAL_RECORDER_G720_A = 0x00A2
RACAL_RECORDER_G723_1 = 0x00A3
RACAL_RECORDER_TETRA_ACELP = 0x00A4
NEC_AAC = 0x00B0
RAW_AAC1 = 0x00FF
RHETOREX_ADPCM = 0x0100
IRAT = 0x0101
VIVO_G723 = 0x0111
VIVO_SIREN = 0x0112
PHILIPS_CELP = 0x0120
PHILIPS_GRUNDIG = 0x0121
DIGITAL_G723 = 0x0123
SANYO_LD_ADPCM = 0x0125
SIPROLAB_ACEPLNET = 0x0130
SIPROLAB_ACELP4800 = 0x0131
SIPROLAB_ACELP8V3 = 0x0132
SIPROLAB_G729 = 0x0133
SIPROLAB_G729A = 0x0134
SIPROLAB_KELVIN = 0x0135
VOICEAGE_AMR = 0x0136
G726ADPCM = 0x0140
DICTAPHONE_CELP68 = 0x0141
DICTAPHONE_CELP54 = 0x0142
QUALCOMM_PUREVOICE = 0x0150
QUALCOMM_HALFRATE = 0x0151
TUBGSM = 0x0155
MSAUDIO1 = 0x0160
WMAUDIO2 = 0x0161
WMAUDIO3 = 0x0162
WMAUDIO_LOSSLESS = 0x0163
WMASPDIF = 0x0164
UNISYS_NAP_ADPCM = 0x0170
UNISYS_NAP_ULAW = 0x0171
UNISYS_NAP_ALAW = 0x0172
UNISYS_NAP_16K = 0x0173
SYCOM_ACM_SYC008 = 0x0174
SYCOM_ACM_SYC701_G726L = 0x0175
SYCOM_ACM_SYC701_CELP54 = 0x0176
SYCOM_ACM_SYC701_CELP68 = 0x0177
KNOWLEDGE_ADVENTURE_ADPCM = 0x0178
FRAUNHOFER_IIS_MPEG2_AAC = 0x0180
DTS_DS = 0x0190
CREATIVE_ADPCM = 0x0200
CREATIVE_FASTSPEECH8 = 0x0202
CREATIVE_FASTSPEECH10 = 0x0203
UHER_ADPCM = 0x0210
ULEAD_DV_AUDIO = 0x0215
ULEAD_DV_AUDIO_1 = 0x0216
QUARTERDECK = 0x0220
ILINK_VC = 0x0230
RAW_SPORT = 0x0240
ESST_AC3 = 0x0241
GENERIC_PASSTHRU = 0x0249
IPI_HSX = 0x0250
IPI_RPELP = 0x0251
CS2 = 0x0260
SONY_SCX = 0x0270
SONY_SCY = 0x0271
SONY_ATRAC3 = 0x0272
SONY_SPC = 0x0273
TELUM_AUDIO = 0x0280
TELUM_IA_AUDIO = 0x0281
NORCOM_VOICE_SYSTEMS_ADPCM = 0x0285
FM_TOWNS_SND = 0x0300
MICRONAS = 0x0350
MICRONAS_CELP833 = 0x0351
BTV_DIGITAL = 0x0400
INTEL_MUSIC_CODER = 0x0401
INDEO_AUDIO = 0x0402
QDESIGN_MUSIC = 0x0450
ON2_VP7_AUDIO = 0x0500
ON2_VP6_AUDIO = 0x0501
VME_VMPCM = 0x0680
TPC = 0x0681
LIGHTWAVE_LOSSLESS = 0x08AE
OLIGSM = 0x1000
OLIADPCM = 0x1001
OLICELP = 0x1002
OLISBC = 0x1003
OLIOPR = 0x1004
LH_CODEC = 0x1100
LH_CODEC_CELP = 0x1101
LH_CODEC_SBC8 = 0x1102
LH_CODEC_SBC12 = 0x1103
LH_CODEC_SBC16 = 0x1104
NORRIS = 0x1400
ISIAUDIO_2 = 0x1401
SOUNDSPACE_MUSICOMPRESS = 0x1500
MPEG_ADTS_AAC = 0x1600
MPEG_RAW_AAC = 0x1601
MPEG_LOAS = 0x1602
NOKIA_MPEG_ADTS_AAC = 0x1608
NOKIA_MPEG_RAW_AAC = 0x1609
VODAFONE_MPEG_ADTS_AAC = 0x160A
VODAFONE_MPEG_RAW_AAC = 0x160B
MPEG_HEAAC = 0x1610
VOXWARE_RT24_SPEECH = 0x181C
SONICFOUNDRY_LOSSLESS = 0x1971
INNINGS_TELECOM_ADPCM = 0x1979
LUCENT_SX8300P = 0x1C07
LUCENT_SX5363S = 0x1C0C
CUSEEME = 0x1F03
NTCSOFT_ALF2CM_ACM = 0x1FC4
DVM = 0x2000
DTS2 = 0x2001
MAKEAVIS = 0x3313
DIVIO_MPEG4_AAC = 0x4143
NOKIA_ADAPTIVE_MULTIRATE = 0x4201
DIVIO_G726 = 0x4243
LEAD_SPEECH = 0x434C
LEAD_VORBIS = 0x564C
WAVPACK_AUDIO = 0x5756
OGG_VORBIS_MODE_1 = 0x674F
OGG_VORBIS_MODE_2 = 0x6750
OGG_VORBIS_MODE_3 = 0x6751
OGG_VORBIS_MODE_1_PLUS = 0x676F
OGG_VORBIS_MODE_2_PLUS = 0x6770
OGG_VORBIS_MODE_3_PLUS = 0x6771
ALAC = 0x6C61
_3COM_NBX = 0x7000 # Can't have leading digit
OPUS = 0x704F
FAAD_AAC = 0x706D
AMR_NB = 0x7361
AMR_WB = 0x7362
AMR_WP = 0x7363
GSM_AMR_CBR = 0x7A21
GSM_AMR_VBR_SID = 0x7A22
COMVERSE_INFOSYS_G723_1 = 0xA100
COMVERSE_INFOSYS_AVQSBC = 0xA101
COMVERSE_INFOSYS_SBC = 0xA102
SYMBOL_G729_A = 0xA103
VOICEAGE_AMR_WB = 0xA104
INGENIENT_G726 = 0xA105
MPEG4_AAC = 0xA106
ENCORE_G726 = 0xA107
ZOLL_ASAO = 0xA108
SPEEX_VOICE = 0xA109
VIANIX_MASC = 0xA10A
WM9_SPECTRUM_ANALYZER = 0xA10B
WMF_SPECTRUM_ANAYZER = 0xA10C
GSM_610 = 0xA10D
GSM_620 = 0xA10E
GSM_660 = 0xA10F
GSM_690 = 0xA110
GSM_ADAPTIVE_MULTIRATE_WB = 0xA111
POLYCOM_G722 = 0xA112
POLYCOM_G728 = 0xA113
POLYCOM_G729_A = 0xA114
POLYCOM_SIREN = 0xA115
GLOBAL_IP_ILBC = 0xA116
RADIOTIME_TIME_SHIFT_RADIO = 0xA117
NICE_ACA = 0xA118
NICE_ADPCM = 0xA119
VOCORD_G721 = 0xA11A
VOCORD_G726 = 0xA11B
VOCORD_G722_1 = 0xA11C
VOCORD_G728 = 0xA11D
VOCORD_G729 = 0xA11E
VOCORD_G729_A = 0xA11F
VOCORD_G723_1 = 0xA120
VOCORD_LBC = 0xA121
NICE_G728 = 0xA122
FRACE_TELECOM_G729 = 0xA123
CODIAN = 0xA124
FLAC = 0xF1AC
EXTENSIBLE = 0xFFFE
DEVELOPMENT = 0xFFFF
KNOWN_WAVE_FORMATS = {WAVE_FORMAT.PCM, WAVE_FORMAT.IEEE_FLOAT}
def _raise_bad_format(format_tag):
try:
format_name = WAVE_FORMAT(format_tag).name
except ValueError:
format_name = f'{format_tag:#06x}'
raise ValueError(f"Unknown wave file format: {format_name}. Supported "
"formats: " +
', '.join(x.name for x in KNOWN_WAVE_FORMATS))
def _read_fmt_chunk(fid, is_big_endian):
"""
Returns
-------
size : int
size of format subchunk in bytes (minus 8 for "fmt " and itself)
format_tag : int
PCM, float, or compressed format
channels : int
number of channels
fs : int
sampling frequency in samples per second
bytes_per_second : int
overall byte rate for the file
block_align : int
bytes per sample, including all channels
bit_depth : int
bits per sample
Notes
-----
Assumes file pointer is immediately after the 'fmt ' id
"""
if is_big_endian:
fmt = '>'
else:
fmt = '<'
size = struct.unpack(fmt+'I', fid.read(4))[0]
if size < 16:
raise ValueError("Binary structure of wave file is not compliant")
res = struct.unpack(fmt+'HHIIHH', fid.read(16))
bytes_read = 16
format_tag, channels, fs, bytes_per_second, block_align, bit_depth = res
if format_tag == WAVE_FORMAT.EXTENSIBLE and size >= (16+2):
ext_chunk_size = struct.unpack(fmt+'H', fid.read(2))[0]
bytes_read += 2
if ext_chunk_size >= 22:
extensible_chunk_data = fid.read(22)
bytes_read += 22
raw_guid = extensible_chunk_data[2+4:2+4+16]
# GUID template {XXXXXXXX-0000-0010-8000-00AA00389B71} (RFC-2361)
# MS GUID byte order: first three groups are native byte order,
# rest is Big Endian
if is_big_endian:
tail = b'\x00\x00\x00\x10\x80\x00\x00\xAA\x00\x38\x9B\x71'
else:
tail = b'\x00\x00\x10\x00\x80\x00\x00\xAA\x00\x38\x9B\x71'
if raw_guid.endswith(tail):
format_tag = struct.unpack(fmt+'I', raw_guid[:4])[0]
else:
raise ValueError("Binary structure of wave file is not compliant")
if format_tag not in KNOWN_WAVE_FORMATS:
_raise_bad_format(format_tag)
# move file pointer to next chunk
if size > bytes_read:
fid.read(size - bytes_read)
# fmt should always be 16, 18 or 40, but handle it just in case
_handle_pad_byte(fid, size)
if format_tag == WAVE_FORMAT.PCM:
if bytes_per_second != fs * block_align:
raise ValueError("WAV header is invalid: nAvgBytesPerSec must"
" equal product of nSamplesPerSec and"
" nBlockAlign, but file has nSamplesPerSec ="
f" {fs}, nBlockAlign = {block_align}, and"
f" nAvgBytesPerSec = {bytes_per_second}")
return (size, format_tag, channels, fs, bytes_per_second, block_align,
bit_depth)
def _read_data_chunk(fid, format_tag, channels, bit_depth, is_big_endian,
block_align, mmap=False):
"""
Notes
-----
Assumes file pointer is immediately after the 'data' id
It's possible to not use all available bits in a container, or to store
samples in a container bigger than necessary, so bytes_per_sample uses
the actual reported container size (nBlockAlign / nChannels). Real-world
examples:
Adobe Audition's "24-bit packed int (type 1, 20-bit)"
nChannels = 2, nBlockAlign = 6, wBitsPerSample = 20
http://www-mmsp.ece.mcgill.ca/Documents/AudioFormats/WAVE/Samples/AFsp/M1F1-int12-AFsp.wav
is:
nChannels = 2, nBlockAlign = 4, wBitsPerSample = 12
http://www-mmsp.ece.mcgill.ca/Documents/AudioFormats/WAVE/Docs/multichaudP.pdf
gives an example of:
nChannels = 2, nBlockAlign = 8, wBitsPerSample = 20
"""
if is_big_endian:
fmt = '>'
else:
fmt = '<'
# Size of the data subchunk in bytes
size = struct.unpack(fmt+'I', fid.read(4))[0]
# Number of bytes per sample (sample container size)
bytes_per_sample = block_align // channels
n_samples = size // bytes_per_sample
if format_tag == WAVE_FORMAT.PCM:
if 1 <= bit_depth <= 8:
dtype = 'u1' # WAV of 8-bit integer or less are unsigned
elif bytes_per_sample in {3, 5, 6, 7}:
# No compatible dtype. Load as raw bytes for reshaping later.
dtype = 'V1'
elif bit_depth <= 64:
# Remaining bit depths can map directly to signed numpy dtypes
dtype = f'{fmt}i{bytes_per_sample}'
else:
raise ValueError("Unsupported bit depth: the WAV file "
f"has {bit_depth}-bit integer data.")
elif format_tag == WAVE_FORMAT.IEEE_FLOAT:
if bit_depth in {32, 64}:
dtype = f'{fmt}f{bytes_per_sample}'
else:
raise ValueError("Unsupported bit depth: the WAV file "
f"has {bit_depth}-bit floating-point data.")
else:
_raise_bad_format(format_tag)
start = fid.tell()
if not mmap:
try:
count = size if dtype == 'V1' else n_samples
data = numpy.fromfile(fid, dtype=dtype, count=count)
except io.UnsupportedOperation: # not a C-like file
fid.seek(start, 0) # just in case it seeked, though it shouldn't
data = numpy.frombuffer(fid.read(size), dtype=dtype)
if dtype == 'V1':
# Rearrange raw bytes into smallest compatible numpy dtype
dt = f'{fmt}i4' if bytes_per_sample == 3 else f'{fmt}i8'
a = numpy.zeros((len(data) // bytes_per_sample, numpy.dtype(dt).itemsize),
dtype='V1')
if is_big_endian:
a[:, :bytes_per_sample] = data.reshape((-1, bytes_per_sample))
else:
a[:, -bytes_per_sample:] = data.reshape((-1, bytes_per_sample))
data = a.view(dt).reshape(a.shape[:-1])
else:
if bytes_per_sample in {1, 2, 4, 8}:
start = fid.tell()
data = numpy.memmap(fid, dtype=dtype, mode='c', offset=start,
shape=(n_samples,))
fid.seek(start + size)
else:
raise ValueError("mmap=True not compatible with "
f"{bytes_per_sample}-byte container size.")
_handle_pad_byte(fid, size)
if channels > 1:
data = data.reshape(-1, channels)
return data
def _skip_unknown_chunk(fid, is_big_endian):
if is_big_endian:
fmt = '>I'
else:
fmt = '<I'
data = fid.read(4)
# call unpack() and seek() only if we have really read data from file
# otherwise empty read at the end of the file would trigger
# unnecessary exception at unpack() call
# in case data equals somehow to 0, there is no need for seek() anyway
if data:
size = struct.unpack(fmt, data)[0]
fid.seek(size, 1)
_handle_pad_byte(fid, size)
def _read_riff_chunk(fid):
str1 = fid.read(4) # File signature
if str1 == b'RIFF':
is_big_endian = False
fmt = '<I'
elif str1 == b'RIFX':
is_big_endian = True
fmt = '>I'
else:
# There are also .wav files with "FFIR" or "XFIR" signatures?
raise ValueError(f"File format {repr(str1)} not understood. Only "
"'RIFF' and 'RIFX' supported.")
# Size of entire file
file_size = struct.unpack(fmt, fid.read(4))[0] + 8
str2 = fid.read(4)
if str2 != b'WAVE':
raise ValueError(f"Not a WAV file. RIFF form type is {repr(str2)}.")
return file_size, is_big_endian
def _handle_pad_byte(fid, size):
# "If the chunk size is an odd number of bytes, a pad byte with value zero
# is written after ckData." So we need to seek past this after each chunk.
if size % 2:
fid.seek(1, 1)
def read(filename, mmap=False):
"""
Open a WAV file.
Return the sample rate (in samples/sec) and data from an LPCM WAV file.
Parameters
----------
filename : string or open file handle
Input WAV file.
mmap : bool, optional
Whether to read data as memory-mapped (default: False). Not compatible
with some bit depths; see Notes. Only to be used on real files.
.. versionadded:: 0.12.0
Returns
-------
rate : int
Sample rate of WAV file.
data : numpy array
Data read from WAV file. Data-type is determined from the file;
see Notes. Data is 1-D for 1-channel WAV, or 2-D of shape
(Nsamples, Nchannels) otherwise. If a file-like input without a
C-like file descriptor (e.g., :class:`python:io.BytesIO`) is
passed, this will not be writeable.
Notes
-----
Common data types: [1]_
===================== =========== =========== =============
WAV format Min Max NumPy dtype
===================== =========== =========== =============
32-bit floating-point -1.0 +1.0 float32
32-bit integer PCM -2147483648 +2147483647 int32
24-bit integer PCM -2147483648 +2147483392 int32
16-bit integer PCM -32768 +32767 int16
8-bit integer PCM 0 255 uint8
===================== =========== =========== =============
WAV files can specify arbitrary bit depth, and this function supports
reading any integer PCM depth from 1 to 64 bits. Data is returned in the
smallest compatible numpy int type, in left-justified format. 8-bit and
lower is unsigned, while 9-bit and higher is signed.
For example, 24-bit data will be stored as int32, with the MSB of the
24-bit data stored at the MSB of the int32, and typically the least
significant byte is 0x00. (However, if a file actually contains data past
its specified bit depth, those bits will be read and output, too. [2]_)
This bit justification and sign matches WAV's native internal format, which
allows memory mapping of WAV files that use 1, 2, 4, or 8 bytes per sample
(so 24-bit files cannot be memory-mapped, but 32-bit can).
IEEE float PCM in 32- or 64-bit format is supported, with or without mmap.
Values exceeding [-1, +1] are not clipped.
Non-linear PCM (mu-law, A-law) is not supported.
References
----------
.. [1] IBM Corporation and Microsoft Corporation, "Multimedia Programming
Interface and Data Specifications 1.0", section "Data Format of the
Samples", August 1991
http://www.tactilemedia.com/info/MCI_Control_Info.html
.. [2] Adobe Systems Incorporated, "Adobe Audition 3 User Guide", section
"Audio file formats: 24-bit Packed Int (type 1, 20-bit)", 2007
Examples
--------
>>> from os.path import dirname, join as pjoin
>>> from scipy.io import wavfile
>>> import scipy.io
Get the filename for an example .wav file from the tests/data directory.
>>> data_dir = pjoin(dirname(scipy.io.__file__), 'tests', 'data')
>>> wav_fname = pjoin(data_dir, 'test-44100Hz-2ch-32bit-float-be.wav')
Load the .wav file contents.
>>> samplerate, data = wavfile.read(wav_fname)
>>> print(f"number of channels = {data.shape[1]}")
number of channels = 2
>>> length = data.shape[0] / samplerate
>>> print(f"length = {length}s")
length = 0.01s
Plot the waveform.
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> time = np.linspace(0., length, data.shape[0])
>>> plt.plot(time, data[:, 0], label="Left channel")
>>> plt.plot(time, data[:, 1], label="Right channel")
>>> plt.legend()
>>> plt.xlabel("Time [s]")
>>> plt.ylabel("Amplitude")
>>> plt.show()
"""
if hasattr(filename, 'read'):
fid = filename
mmap = False
else:
fid = open(filename, 'rb')
try:
file_size, is_big_endian = _read_riff_chunk(fid)
fmt_chunk_received = False
data_chunk_received = False
while fid.tell() < file_size:
# read the next chunk
chunk_id = fid.read(4)
if not chunk_id:
if data_chunk_received:
# End of file but data successfully read
warnings.warn(
"Reached EOF prematurely; finished at {:d} bytes, "
"expected {:d} bytes from header."
.format(fid.tell(), file_size),
WavFileWarning, stacklevel=2)
break
else:
raise ValueError("Unexpected end of file.")
elif len(chunk_id) < 4:
msg = f"Incomplete chunk ID: {repr(chunk_id)}"
# If we have the data, ignore the broken chunk
if fmt_chunk_received and data_chunk_received:
warnings.warn(msg + ", ignoring it.", WavFileWarning,
stacklevel=2)
else:
raise ValueError(msg)
if chunk_id == b'fmt ':
fmt_chunk_received = True
fmt_chunk = _read_fmt_chunk(fid, is_big_endian)
format_tag, channels, fs = fmt_chunk[1:4]
bit_depth = fmt_chunk[6]
block_align = fmt_chunk[5]
elif chunk_id == b'fact':
_skip_unknown_chunk(fid, is_big_endian)
elif chunk_id == b'data':
data_chunk_received = True
if not fmt_chunk_received:
raise ValueError("No fmt chunk before data")
data = _read_data_chunk(fid, format_tag, channels, bit_depth,
is_big_endian, block_align, mmap)
elif chunk_id == b'LIST':
# Someday this could be handled properly but for now skip it
_skip_unknown_chunk(fid, is_big_endian)
elif chunk_id in {b'JUNK', b'Fake'}:
# Skip alignment chunks without warning
_skip_unknown_chunk(fid, is_big_endian)
else:
warnings.warn("Chunk (non-data) not understood, skipping it.",
WavFileWarning, stacklevel=2)
_skip_unknown_chunk(fid, is_big_endian)
finally:
if not hasattr(filename, 'read'):
fid.close()
else:
fid.seek(0)
return fs, data
def write(filename, rate, data):
"""
Write a NumPy array as a WAV file.
Parameters
----------
filename : string or open file handle
Output wav file.
rate : int
The sample rate (in samples/sec).
data : ndarray
A 1-D or 2-D NumPy array of either integer or float data-type.
Notes
-----
* Writes a simple uncompressed WAV file.
* To write multiple-channels, use a 2-D array of shape
(Nsamples, Nchannels).
* The bits-per-sample and PCM/float will be determined by the data-type.
Common data types: [1]_
===================== =========== =========== =============
WAV format Min Max NumPy dtype
===================== =========== =========== =============
32-bit floating-point -1.0 +1.0 float32
32-bit PCM -2147483648 +2147483647 int32
16-bit PCM -32768 +32767 int16
8-bit PCM 0 255 uint8
===================== =========== =========== =============
Note that 8-bit PCM is unsigned.
References
----------
.. [1] IBM Corporation and Microsoft Corporation, "Multimedia Programming
Interface and Data Specifications 1.0", section "Data Format of the
Samples", August 1991
http://www.tactilemedia.com/info/MCI_Control_Info.html
Examples
--------
Create a 100Hz sine wave, sampled at 44100Hz.
Write to 16-bit PCM, Mono.
>>> from scipy.io.wavfile import write
>>> import numpy as np
>>> samplerate = 44100; fs = 100
>>> t = np.linspace(0., 1., samplerate)
>>> amplitude = np.iinfo(np.int16).max
>>> data = amplitude * np.sin(2. * np.pi * fs * t)
>>> write("example.wav", samplerate, data.astype(np.int16))
"""
if hasattr(filename, 'write'):
fid = filename
else:
fid = open(filename, 'wb')
fs = rate
try:
dkind = data.dtype.kind
if not (dkind == 'i' or dkind == 'f' or (dkind == 'u' and
data.dtype.itemsize == 1)):
raise ValueError("Unsupported data type '%s'" % data.dtype)
header_data = b''
header_data += b'RIFF'
header_data += b'\x00\x00\x00\x00'
header_data += b'WAVE'
# fmt chunk
header_data += b'fmt '
if dkind == 'f':
format_tag = WAVE_FORMAT.IEEE_FLOAT
else:
format_tag = WAVE_FORMAT.PCM
if data.ndim == 1:
channels = 1
else:
channels = data.shape[1]
bit_depth = data.dtype.itemsize * 8
bytes_per_second = fs*(bit_depth // 8)*channels
block_align = channels * (bit_depth // 8)
fmt_chunk_data = struct.pack('<HHIIHH', format_tag, channels, fs,
bytes_per_second, block_align, bit_depth)
if not (dkind == 'i' or dkind == 'u'):
# add cbSize field for non-PCM files
fmt_chunk_data += b'\x00\x00'
header_data += struct.pack('<I', len(fmt_chunk_data))
header_data += fmt_chunk_data
# fact chunk (non-PCM files)
if not (dkind == 'i' or dkind == 'u'):
header_data += b'fact'
header_data += struct.pack('<II', 4, data.shape[0])
# check data size (needs to be immediately before the data chunk)
if ((len(header_data)-4-4) + (4+4+data.nbytes)) > 0xFFFFFFFF:
raise ValueError("Data exceeds wave file size limit")
fid.write(header_data)
# data chunk
fid.write(b'data')
fid.write(struct.pack('<I', data.nbytes))
if data.dtype.byteorder == '>' or (data.dtype.byteorder == '=' and
sys.byteorder == 'big'):
data = data.byteswap()
_array_tofile(fid, data)
# Determine file size and place it in correct
# position at start of the file.
size = fid.tell()
fid.seek(4)
fid.write(struct.pack('<I', size-8))
finally:
if not hasattr(filename, 'write'):
fid.close()
else:
fid.seek(0)
def _array_tofile(fid, data):
# ravel gives a c-contiguous buffer
fid.write(data.ravel().view('b').data)
| bsd-3-clause | 52c5a471cbaafb1694583f00c381982f | 30.716667 | 94 | 0.578635 | 3.138045 | false | false | false | false |
scipy/scipy | scipy/signal/_wavelets.py | 2 | 14047 | import numpy as np
from scipy.linalg import eig
from scipy.special import comb
from scipy.signal import convolve
__all__ = ['daub', 'qmf', 'cascade', 'morlet', 'ricker', 'morlet2', 'cwt']
def daub(p):
"""
The coefficients for the FIR low-pass filter producing Daubechies wavelets.
p>=1 gives the order of the zero at f=1/2.
There are 2p filter coefficients.
Parameters
----------
p : int
Order of the zero at f=1/2, can have values from 1 to 34.
Returns
-------
daub : ndarray
Return
"""
sqrt = np.sqrt
if p < 1:
raise ValueError("p must be at least 1.")
if p == 1:
c = 1 / sqrt(2)
return np.array([c, c])
elif p == 2:
f = sqrt(2) / 8
c = sqrt(3)
return f * np.array([1 + c, 3 + c, 3 - c, 1 - c])
elif p == 3:
tmp = 12 * sqrt(10)
z1 = 1.5 + sqrt(15 + tmp) / 6 - 1j * (sqrt(15) + sqrt(tmp - 15)) / 6
z1c = np.conj(z1)
f = sqrt(2) / 8
d0 = np.real((1 - z1) * (1 - z1c))
a0 = np.real(z1 * z1c)
a1 = 2 * np.real(z1)
return f / d0 * np.array([a0, 3 * a0 - a1, 3 * a0 - 3 * a1 + 1,
a0 - 3 * a1 + 3, 3 - a1, 1])
elif p < 35:
# construct polynomial and factor it
if p < 35:
P = [comb(p - 1 + k, k, exact=1) for k in range(p)][::-1]
yj = np.roots(P)
else: # try different polynomial --- needs work
P = [comb(p - 1 + k, k, exact=1) / 4.0**k
for k in range(p)][::-1]
yj = np.roots(P) / 4
# for each root, compute two z roots, select the one with |z|>1
# Build up final polynomial
c = np.poly1d([1, 1])**p
q = np.poly1d([1])
for k in range(p - 1):
yval = yj[k]
part = 2 * sqrt(yval * (yval - 1))
const = 1 - 2 * yval
z1 = const + part
if (abs(z1)) < 1:
z1 = const - part
q = q * [1, -z1]
q = c * np.real(q)
# Normalize result
q = q / np.sum(q) * sqrt(2)
return q.c[::-1]
else:
raise ValueError("Polynomial factorization does not work "
"well for p too large.")
def qmf(hk):
"""
Return high-pass qmf filter from low-pass
Parameters
----------
hk : array_like
Coefficients of high-pass filter.
Returns
-------
array_like
High-pass filter coefficients.
"""
N = len(hk) - 1
asgn = [{0: 1, 1: -1}[k % 2] for k in range(N + 1)]
return hk[::-1] * np.array(asgn)
def cascade(hk, J=7):
"""
Return (x, phi, psi) at dyadic points ``K/2**J`` from filter coefficients.
Parameters
----------
hk : array_like
Coefficients of low-pass filter.
J : int, optional
Values will be computed at grid points ``K/2**J``. Default is 7.
Returns
-------
x : ndarray
The dyadic points ``K/2**J`` for ``K=0...N * (2**J)-1`` where
``len(hk) = len(gk) = N+1``.
phi : ndarray
The scaling function ``phi(x)`` at `x`:
``phi(x) = sum(hk * phi(2x-k))``, where k is from 0 to N.
psi : ndarray, optional
The wavelet function ``psi(x)`` at `x`:
``phi(x) = sum(gk * phi(2x-k))``, where k is from 0 to N.
`psi` is only returned if `gk` is not None.
Notes
-----
The algorithm uses the vector cascade algorithm described by Strang and
Nguyen in "Wavelets and Filter Banks". It builds a dictionary of values
and slices for quick reuse. Then inserts vectors into final vector at the
end.
"""
N = len(hk) - 1
if (J > 30 - np.log2(N + 1)):
raise ValueError("Too many levels.")
if (J < 1):
raise ValueError("Too few levels.")
# construct matrices needed
nn, kk = np.ogrid[:N, :N]
s2 = np.sqrt(2)
# append a zero so that take works
thk = np.r_[hk, 0]
gk = qmf(hk)
tgk = np.r_[gk, 0]
indx1 = np.clip(2 * nn - kk, -1, N + 1)
indx2 = np.clip(2 * nn - kk + 1, -1, N + 1)
m = np.empty((2, 2, N, N), 'd')
m[0, 0] = np.take(thk, indx1, 0)
m[0, 1] = np.take(thk, indx2, 0)
m[1, 0] = np.take(tgk, indx1, 0)
m[1, 1] = np.take(tgk, indx2, 0)
m *= s2
# construct the grid of points
x = np.arange(0, N * (1 << J), dtype=float) / (1 << J)
phi = 0 * x
psi = 0 * x
# find phi0, and phi1
lam, v = eig(m[0, 0])
ind = np.argmin(np.absolute(lam - 1))
# a dictionary with a binary representation of the
# evaluation points x < 1 -- i.e. position is 0.xxxx
v = np.real(v[:, ind])
# need scaling function to integrate to 1 so find
# eigenvector normalized to sum(v,axis=0)=1
sm = np.sum(v)
if sm < 0: # need scaling function to integrate to 1
v = -v
sm = -sm
bitdic = {'0': v / sm}
bitdic['1'] = np.dot(m[0, 1], bitdic['0'])
step = 1 << J
phi[::step] = bitdic['0']
phi[(1 << (J - 1))::step] = bitdic['1']
psi[::step] = np.dot(m[1, 0], bitdic['0'])
psi[(1 << (J - 1))::step] = np.dot(m[1, 1], bitdic['0'])
# descend down the levels inserting more and more values
# into bitdic -- store the values in the correct location once we
# have computed them -- stored in the dictionary
# for quicker use later.
prevkeys = ['1']
for level in range(2, J + 1):
newkeys = ['%d%s' % (xx, yy) for xx in [0, 1] for yy in prevkeys]
fac = 1 << (J - level)
for key in newkeys:
# convert key to number
num = 0
for pos in range(level):
if key[pos] == '1':
num += (1 << (level - 1 - pos))
pastphi = bitdic[key[1:]]
ii = int(key[0])
temp = np.dot(m[0, ii], pastphi)
bitdic[key] = temp
phi[num * fac::step] = temp
psi[num * fac::step] = np.dot(m[1, ii], pastphi)
prevkeys = newkeys
return x, phi, psi
def morlet(M, w=5.0, s=1.0, complete=True):
"""
Complex Morlet wavelet.
Parameters
----------
M : int
Length of the wavelet.
w : float, optional
Omega0. Default is 5
s : float, optional
Scaling factor, windowed from ``-s*2*pi`` to ``+s*2*pi``. Default is 1.
complete : bool, optional
Whether to use the complete or the standard version.
Returns
-------
morlet : (M,) ndarray
See Also
--------
morlet2 : Implementation of Morlet wavelet, compatible with `cwt`.
scipy.signal.gausspulse
Notes
-----
The standard version::
pi**-0.25 * exp(1j*w*x) * exp(-0.5*(x**2))
This commonly used wavelet is often referred to simply as the
Morlet wavelet. Note that this simplified version can cause
admissibility problems at low values of `w`.
The complete version::
pi**-0.25 * (exp(1j*w*x) - exp(-0.5*(w**2))) * exp(-0.5*(x**2))
This version has a correction
term to improve admissibility. For `w` greater than 5, the
correction term is negligible.
Note that the energy of the return wavelet is not normalised
according to `s`.
The fundamental frequency of this wavelet in Hz is given
by ``f = 2*s*w*r / M`` where `r` is the sampling rate.
Note: This function was created before `cwt` and is not compatible
with it.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> M = 100
>>> s = 4.0
>>> w = 2.0
>>> wavelet = signal.morlet(M, s, w)
>>> plt.plot(wavelet)
>>> plt.show()
"""
x = np.linspace(-s * 2 * np.pi, s * 2 * np.pi, M)
output = np.exp(1j * w * x)
if complete:
output -= np.exp(-0.5 * (w**2))
output *= np.exp(-0.5 * (x**2)) * np.pi**(-0.25)
return output
def ricker(points, a):
"""
Return a Ricker wavelet, also known as the "Mexican hat wavelet".
It models the function:
``A * (1 - (x/a)**2) * exp(-0.5*(x/a)**2)``,
where ``A = 2/(sqrt(3*a)*(pi**0.25))``.
Parameters
----------
points : int
Number of points in `vector`.
Will be centered around 0.
a : scalar
Width parameter of the wavelet.
Returns
-------
vector : (N,) ndarray
Array of length `points` in shape of ricker curve.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> points = 100
>>> a = 4.0
>>> vec2 = signal.ricker(points, a)
>>> print(len(vec2))
100
>>> plt.plot(vec2)
>>> plt.show()
"""
A = 2 / (np.sqrt(3 * a) * (np.pi**0.25))
wsq = a**2
vec = np.arange(0, points) - (points - 1.0) / 2
xsq = vec**2
mod = (1 - xsq / wsq)
gauss = np.exp(-xsq / (2 * wsq))
total = A * mod * gauss
return total
def morlet2(M, s, w=5):
"""
Complex Morlet wavelet, designed to work with `cwt`.
Returns the complete version of morlet wavelet, normalised
according to `s`::
exp(1j*w*x/s) * exp(-0.5*(x/s)**2) * pi**(-0.25) * sqrt(1/s)
Parameters
----------
M : int
Length of the wavelet.
s : float
Width parameter of the wavelet.
w : float, optional
Omega0. Default is 5
Returns
-------
morlet : (M,) ndarray
See Also
--------
morlet : Implementation of Morlet wavelet, incompatible with `cwt`
Notes
-----
.. versionadded:: 1.4.0
This function was designed to work with `cwt`. Because `morlet2`
returns an array of complex numbers, the `dtype` argument of `cwt`
should be set to `complex128` for best results.
Note the difference in implementation with `morlet`.
The fundamental frequency of this wavelet in Hz is given by::
f = w*fs / (2*s*np.pi)
where ``fs`` is the sampling rate and `s` is the wavelet width parameter.
Similarly we can get the wavelet width parameter at ``f``::
s = w*fs / (2*f*np.pi)
Examples
--------
>>> import numpy as np
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> M = 100
>>> s = 4.0
>>> w = 2.0
>>> wavelet = signal.morlet2(M, s, w)
>>> plt.plot(abs(wavelet))
>>> plt.show()
This example shows basic use of `morlet2` with `cwt` in time-frequency
analysis:
>>> t, dt = np.linspace(0, 1, 200, retstep=True)
>>> fs = 1/dt
>>> w = 6.
>>> sig = np.cos(2*np.pi*(50 + 10*t)*t) + np.sin(40*np.pi*t)
>>> freq = np.linspace(1, fs/2, 100)
>>> widths = w*fs / (2*freq*np.pi)
>>> cwtm = signal.cwt(sig, signal.morlet2, widths, w=w)
>>> plt.pcolormesh(t, freq, np.abs(cwtm), cmap='viridis', shading='gouraud')
>>> plt.show()
"""
x = np.arange(0, M) - (M - 1.0) / 2
x = x / s
wavelet = np.exp(1j * w * x) * np.exp(-0.5 * x**2) * np.pi**(-0.25)
output = np.sqrt(1/s) * wavelet
return output
def cwt(data, wavelet, widths, dtype=None, **kwargs):
"""
Continuous wavelet transform.
Performs a continuous wavelet transform on `data`,
using the `wavelet` function. A CWT performs a convolution
with `data` using the `wavelet` function, which is characterized
by a width parameter and length parameter. The `wavelet` function
is allowed to be complex.
Parameters
----------
data : (N,) ndarray
data on which to perform the transform.
wavelet : function
Wavelet function, which should take 2 arguments.
The first argument is the number of points that the returned vector
will have (len(wavelet(length,width)) == length).
The second is a width parameter, defining the size of the wavelet
(e.g. standard deviation of a gaussian). See `ricker`, which
satisfies these requirements.
widths : (M,) sequence
Widths to use for transform.
dtype : data-type, optional
The desired data type of output. Defaults to ``float64`` if the
output of `wavelet` is real and ``complex128`` if it is complex.
.. versionadded:: 1.4.0
kwargs
Keyword arguments passed to wavelet function.
.. versionadded:: 1.4.0
Returns
-------
cwt: (M, N) ndarray
Will have shape of (len(widths), len(data)).
Notes
-----
.. versionadded:: 1.4.0
For non-symmetric, complex-valued wavelets, the input signal is convolved
with the time-reversed complex-conjugate of the wavelet data [1].
::
length = min(10 * width[ii], len(data))
cwt[ii,:] = signal.convolve(data, np.conj(wavelet(length, width[ii],
**kwargs))[::-1], mode='same')
References
----------
.. [1] S. Mallat, "A Wavelet Tour of Signal Processing (3rd Edition)",
Academic Press, 2009.
Examples
--------
>>> import numpy as np
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(-1, 1, 200, endpoint=False)
>>> sig = np.cos(2 * np.pi * 7 * t) + signal.gausspulse(t - 0.4, fc=2)
>>> widths = np.arange(1, 31)
>>> cwtmatr = signal.cwt(sig, signal.ricker, widths)
.. note:: For cwt matrix plotting it is advisable to flip the y-axis
>>> cwtmatr_yflip = np.flipud(cwtmatr)
>>> plt.imshow(cwtmatr_yflip, extent=[-1, 1, 1, 31], cmap='PRGn', aspect='auto',
... vmax=abs(cwtmatr).max(), vmin=-abs(cwtmatr).max())
>>> plt.show()
"""
# Determine output type
if dtype is None:
if np.asarray(wavelet(1, widths[0], **kwargs)).dtype.char in 'FDG':
dtype = np.complex128
else:
dtype = np.float64
output = np.empty((len(widths), len(data)), dtype=dtype)
for ind, width in enumerate(widths):
N = np.min([10 * width, len(data)])
wavelet_data = np.conj(wavelet(N, width, **kwargs)[::-1])
output[ind] = convolve(data, wavelet_data, mode='same')
return output
| bsd-3-clause | 16d9139c934e8101c775a78081129be3 | 27.550813 | 84 | 0.536413 | 3.24561 | false | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.