repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
chenyyx/scikit-learn-doc-zh | examples/en/cluster/plot_kmeans_assumptions.py | 76 | 2055 | """
====================================
Demonstration of k-means assumptions
====================================
This example is meant to illustrate situations where k-means will produce
unintuitive and possibly unexpected clusters. In the first three plots, the
input data does not conform to some implicit assumption that k-means makes and
undesirable clusters are produced as a result. In the last plot, k-means
returns intuitive clusters despite unevenly sized blobs.
"""
print(__doc__)
# Author: Phil Roth <mr.phil.roth@gmail.com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
plt.figure(figsize=(12, 12))
n_samples = 1500
random_state = 170
X, y = make_blobs(n_samples=n_samples, random_state=random_state)
# Incorrect number of clusters
y_pred = KMeans(n_clusters=2, random_state=random_state).fit_predict(X)
plt.subplot(221)
plt.scatter(X[:, 0], X[:, 1], c=y_pred)
plt.title("Incorrect Number of Blobs")
# Anisotropicly distributed data
transformation = [[0.60834549, -0.63667341], [-0.40887718, 0.85253229]]
X_aniso = np.dot(X, transformation)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_aniso)
plt.subplot(222)
plt.scatter(X_aniso[:, 0], X_aniso[:, 1], c=y_pred)
plt.title("Anisotropicly Distributed Blobs")
# Different variance
X_varied, y_varied = make_blobs(n_samples=n_samples,
cluster_std=[1.0, 2.5, 0.5],
random_state=random_state)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_varied)
plt.subplot(223)
plt.scatter(X_varied[:, 0], X_varied[:, 1], c=y_pred)
plt.title("Unequal Variance")
# Unevenly sized blobs
X_filtered = np.vstack((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10]))
y_pred = KMeans(n_clusters=3,
random_state=random_state).fit_predict(X_filtered)
plt.subplot(224)
plt.scatter(X_filtered[:, 0], X_filtered[:, 1], c=y_pred)
plt.title("Unevenly Sized Blobs")
plt.show()
| gpl-3.0 |
thientu/scikit-learn | examples/model_selection/plot_validation_curve.py | 229 | 1823 | """
==========================
Plotting Validation Curves
==========================
In this plot you can see the training scores and validation scores of an SVM
for different values of the kernel parameter gamma. For very low values of
gamma, you can see that both the training score and the validation score are
low. This is called underfitting. Medium values of gamma will result in high
values for both scores, i.e. the classifier is performing fairly well. If gamma
is too high, the classifier will overfit, which means that the training score
is good but the validation score is poor.
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_digits
from sklearn.svm import SVC
from sklearn.learning_curve import validation_curve
digits = load_digits()
X, y = digits.data, digits.target
param_range = np.logspace(-6, -1, 5)
train_scores, test_scores = validation_curve(
SVC(), X, y, param_name="gamma", param_range=param_range,
cv=10, scoring="accuracy", n_jobs=1)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.title("Validation Curve with SVM")
plt.xlabel("$\gamma$")
plt.ylabel("Score")
plt.ylim(0.0, 1.1)
plt.semilogx(param_range, train_scores_mean, label="Training score", color="r")
plt.fill_between(param_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2, color="r")
plt.semilogx(param_range, test_scores_mean, label="Cross-validation score",
color="g")
plt.fill_between(param_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2, color="g")
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
gfyoung/scipy | scipy/stats/_continuous_distns.py | 1 | 210620 | #
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from scipy.misc.doccer import (extend_notes_in_docstring,
replace_notes_in_docstring)
from scipy import optimize
from scipy import integrate
from scipy import interpolate
import scipy.special as sc
from scipy._lib._numpy_compat import broadcast_to
from scipy._lib._util import _lazyselect, _lazywhere
from . import _stats
from ._tukeylambda_stats import (tukeylambda_variance as _tlvar,
tukeylambda_kurtosis as _tlkurt)
from ._distn_infrastructure import (get_distribution_names, _kurtosis,
_ncx2_cdf, _ncx2_log_pdf, _ncx2_pdf,
rv_continuous, _skew, valarray)
from ._constants import _XMIN, _EULER, _ZETA3, _XMAX, _LOGXMAX
# In numpy 1.12 and above, np.power refuses to raise integers to negative
# powers, and `np.float_power` is a new replacement.
try:
float_power = np.float_power
except AttributeError:
float_power = np.power
## Kolmogorov-Smirnov one-sided and two-sided test statistics
class ksone_gen(rv_continuous):
"""General Kolmogorov-Smirnov one-sided test.
%(default)s
"""
def _cdf(self, x, n):
return 1.0 - sc.smirnov(n, x)
def _ppf(self, q, n):
return sc.smirnovi(n, 1.0 - q)
ksone = ksone_gen(a=0.0, name='ksone')
class kstwobign_gen(rv_continuous):
"""Kolmogorov-Smirnov two-sided test for large N.
%(default)s
"""
def _cdf(self, x):
return 1.0 - sc.kolmogorov(x)
def _sf(self, x):
return sc.kolmogorov(x)
def _ppf(self, q):
return sc.kolmogi(1.0 - q)
kstwobign = kstwobign_gen(a=0.0, name='kstwobign')
## Normal distribution
# loc = mu, scale = std
# Keep these implementations out of the class definition so they can be reused
# by other distributions.
_norm_pdf_C = np.sqrt(2*np.pi)
_norm_pdf_logC = np.log(_norm_pdf_C)
def _norm_pdf(x):
return np.exp(-x**2/2.0) / _norm_pdf_C
def _norm_logpdf(x):
return -x**2 / 2.0 - _norm_pdf_logC
def _norm_cdf(x):
return sc.ndtr(x)
def _norm_logcdf(x):
return sc.log_ndtr(x)
def _norm_ppf(q):
return sc.ndtri(q)
def _norm_sf(x):
return _norm_cdf(-x)
def _norm_logsf(x):
return _norm_logcdf(-x)
def _norm_isf(q):
return -_norm_ppf(q)
class norm_gen(rv_continuous):
r"""A normal continuous random variable.
The location (loc) keyword specifies the mean.
The scale (scale) keyword specifies the standard deviation.
%(before_notes)s
Notes
-----
The probability density function for `norm` is:
.. math::
f(x) = \frac{\exp(-x^2/2)}{\sqrt{2\pi}}
for a real number :math:`x`.
%(after_notes)s
%(example)s
"""
def _rvs(self):
return self._random_state.standard_normal(self._size)
def _pdf(self, x):
# norm.pdf(x) = exp(-x**2/2)/sqrt(2*pi)
return _norm_pdf(x)
def _logpdf(self, x):
return _norm_logpdf(x)
def _cdf(self, x):
return _norm_cdf(x)
def _logcdf(self, x):
return _norm_logcdf(x)
def _sf(self, x):
return _norm_sf(x)
def _logsf(self, x):
return _norm_logsf(x)
def _ppf(self, q):
return _norm_ppf(q)
def _isf(self, q):
return _norm_isf(q)
def _stats(self):
return 0.0, 1.0, 0.0, 0.0
def _entropy(self):
return 0.5*(np.log(2*np.pi)+1)
@replace_notes_in_docstring(rv_continuous, notes="""\
This function uses explicit formulas for the maximum likelihood
estimation of the normal distribution parameters, so the
`optimizer` argument is ignored.\n\n""")
def fit(self, data, **kwds):
floc = kwds.get('floc', None)
fscale = kwds.get('fscale', None)
if floc is not None and fscale is not None:
# This check is for consistency with `rv_continuous.fit`.
# Without this check, this function would just return the
# parameters that were given.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
data = np.asarray(data)
if floc is None:
loc = data.mean()
else:
loc = floc
if fscale is None:
scale = np.sqrt(((data - loc)**2).mean())
else:
scale = fscale
return loc, scale
norm = norm_gen(name='norm')
class alpha_gen(rv_continuous):
r"""An alpha continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `alpha` is:
.. math::
f(x, a) = \frac{1}{x^2 \Phi(a) \sqrt{2\pi}} *
\exp(-\frac{1}{2} (a-1/x)^2)
where :math:`\Phi(a)` is the normal CDF, :math:`x > 0`, and :math:`a > 0`.
`alpha` takes ``a`` as a shape parameter for :math:`a`.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x, a):
# alpha.pdf(x, a) = 1/(x**2*Phi(a)*sqrt(2*pi)) * exp(-1/2 * (a-1/x)**2)
return 1.0/(x**2)/_norm_cdf(a)*_norm_pdf(a-1.0/x)
def _logpdf(self, x, a):
return -2*np.log(x) + _norm_logpdf(a-1.0/x) - np.log(_norm_cdf(a))
def _cdf(self, x, a):
return _norm_cdf(a-1.0/x) / _norm_cdf(a)
def _ppf(self, q, a):
return 1.0/np.asarray(a-sc.ndtri(q*_norm_cdf(a)))
def _stats(self, a):
return [np.inf]*2 + [np.nan]*2
alpha = alpha_gen(a=0.0, name='alpha')
class anglit_gen(rv_continuous):
r"""An anglit continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `anglit` is:
.. math::
f(x) = \sin(2x + \pi/2) = \cos(2x)
for :math:`-\pi/4 \le x \le \pi/4`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
# anglit.pdf(x) = sin(2*x + \pi/2) = cos(2*x)
return np.cos(2*x)
def _cdf(self, x):
return np.sin(x+np.pi/4)**2.0
def _ppf(self, q):
return np.arcsin(np.sqrt(q))-np.pi/4
def _stats(self):
return 0.0, np.pi*np.pi/16-0.5, 0.0, -2*(np.pi**4 - 96)/(np.pi*np.pi-8)**2
def _entropy(self):
return 1-np.log(2)
anglit = anglit_gen(a=-np.pi/4, b=np.pi/4, name='anglit')
class arcsine_gen(rv_continuous):
r"""An arcsine continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `arcsine` is:
.. math::
f(x) = \frac{1}{\pi \sqrt{x (1-x)}}
for :math:`0 < x < 1`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
# arcsine.pdf(x) = 1/(pi*sqrt(x*(1-x)))
return 1.0/np.pi/np.sqrt(x*(1-x))
def _cdf(self, x):
return 2.0/np.pi*np.arcsin(np.sqrt(x))
def _ppf(self, q):
return np.sin(np.pi/2.0*q)**2.0
def _stats(self):
mu = 0.5
mu2 = 1.0/8
g1 = 0
g2 = -3.0/2.0
return mu, mu2, g1, g2
def _entropy(self):
return -0.24156447527049044468
arcsine = arcsine_gen(a=0.0, b=1.0, name='arcsine')
class FitDataError(ValueError):
# This exception is raised by, for example, beta_gen.fit when both floc
# and fscale are fixed and there are values in the data not in the open
# interval (floc, floc+fscale).
def __init__(self, distr, lower, upper):
self.args = (
"Invalid values in `data`. Maximum likelihood "
"estimation with {distr!r} requires that {lower!r} < x "
"< {upper!r} for each x in `data`.".format(
distr=distr, lower=lower, upper=upper),
)
class FitSolverError(RuntimeError):
# This exception is raised by, for example, beta_gen.fit when
# optimize.fsolve returns with ier != 1.
def __init__(self, mesg):
emsg = "Solver for the MLE equations failed to converge: "
emsg += mesg.replace('\n', '')
self.args = (emsg,)
def _beta_mle_a(a, b, n, s1):
# The zeros of this function give the MLE for `a`, with
# `b`, `n` and `s1` given. `s1` is the sum of the logs of
# the data. `n` is the number of data points.
psiab = sc.psi(a + b)
func = s1 - n * (-psiab + sc.psi(a))
return func
def _beta_mle_ab(theta, n, s1, s2):
# Zeros of this function are critical points of
# the maximum likelihood function. Solving this system
# for theta (which contains a and b) gives the MLE for a and b
# given `n`, `s1` and `s2`. `s1` is the sum of the logs of the data,
# and `s2` is the sum of the logs of 1 - data. `n` is the number
# of data points.
a, b = theta
psiab = sc.psi(a + b)
func = [s1 - n * (-psiab + sc.psi(a)),
s2 - n * (-psiab + sc.psi(b))]
return func
class beta_gen(rv_continuous):
r"""A beta continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `beta` is:
.. math::
f(x, a, b) = \frac{\Gamma(a+b) x^{a-1} (1-x)^{b-1}}
{\Gamma(a) \Gamma(b)}
for :math:`0 < x < 1`, :math:`a > 0`, :math:`b > 0`, where
:math:`\Gamma` is the gamma function (`scipy.special.gamma`).
`beta` takes :math:`a` and :math:`b` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, a, b):
return self._random_state.beta(a, b, self._size)
def _pdf(self, x, a, b):
# gamma(a+b) * x**(a-1) * (1-x)**(b-1)
# beta.pdf(x, a, b) = ------------------------------------
# gamma(a)*gamma(b)
return np.exp(self._logpdf(x, a, b))
def _logpdf(self, x, a, b):
lPx = sc.xlog1py(b - 1.0, -x) + sc.xlogy(a - 1.0, x)
lPx -= sc.betaln(a, b)
return lPx
def _cdf(self, x, a, b):
return sc.btdtr(a, b, x)
def _ppf(self, q, a, b):
return sc.btdtri(a, b, q)
def _stats(self, a, b):
mn = a*1.0 / (a + b)
var = (a*b*1.0)/(a+b+1.0)/(a+b)**2.0
g1 = 2.0*(b-a)*np.sqrt((1.0+a+b)/(a*b)) / (2+a+b)
g2 = 6.0*(a**3 + a**2*(1-2*b) + b**2*(1+b) - 2*a*b*(2+b))
g2 /= a*b*(a+b+2)*(a+b+3)
return mn, var, g1, g2
def _fitstart(self, data):
g1 = _skew(data)
g2 = _kurtosis(data)
def func(x):
a, b = x
sk = 2*(b-a)*np.sqrt(a + b + 1) / (a + b + 2) / np.sqrt(a*b)
ku = a**3 - a**2*(2*b-1) + b**2*(b+1) - 2*a*b*(b+2)
ku /= a*b*(a+b+2)*(a+b+3)
ku *= 6
return [sk-g1, ku-g2]
a, b = optimize.fsolve(func, (1.0, 1.0))
return super(beta_gen, self)._fitstart(data, args=(a, b))
@extend_notes_in_docstring(rv_continuous, notes="""\
In the special case where both `floc` and `fscale` are given, a
`ValueError` is raised if any value `x` in `data` does not satisfy
`floc < x < floc + fscale`.\n\n""")
def fit(self, data, *args, **kwds):
# Override rv_continuous.fit, so we can more efficiently handle the
# case where floc and fscale are given.
f0 = (kwds.get('f0', None) or kwds.get('fa', None) or
kwds.get('fix_a', None))
f1 = (kwds.get('f1', None) or kwds.get('fb', None) or
kwds.get('fix_b', None))
floc = kwds.get('floc', None)
fscale = kwds.get('fscale', None)
if floc is None or fscale is None:
# do general fit
return super(beta_gen, self).fit(data, *args, **kwds)
if f0 is not None and f1 is not None:
# This check is for consistency with `rv_continuous.fit`.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
# Special case: loc and scale are constrained, so we are fitting
# just the shape parameters. This can be done much more efficiently
# than the method used in `rv_continuous.fit`. (See the subsection
# "Two unknown parameters" in the section "Maximum likelihood" of
# the Wikipedia article on the Beta distribution for the formulas.)
# Normalize the data to the interval [0, 1].
data = (np.ravel(data) - floc) / fscale
if np.any(data <= 0) or np.any(data >= 1):
raise FitDataError("beta", lower=floc, upper=floc + fscale)
xbar = data.mean()
if f0 is not None or f1 is not None:
# One of the shape parameters is fixed.
if f0 is not None:
# The shape parameter a is fixed, so swap the parameters
# and flip the data. We always solve for `a`. The result
# will be swapped back before returning.
b = f0
data = 1 - data
xbar = 1 - xbar
else:
b = f1
# Initial guess for a. Use the formula for the mean of the beta
# distribution, E[x] = a / (a + b), to generate a reasonable
# starting point based on the mean of the data and the given
# value of b.
a = b * xbar / (1 - xbar)
# Compute the MLE for `a` by solving _beta_mle_a.
theta, info, ier, mesg = optimize.fsolve(
_beta_mle_a, a,
args=(b, len(data), np.log(data).sum()),
full_output=True
)
if ier != 1:
raise FitSolverError(mesg=mesg)
a = theta[0]
if f0 is not None:
# The shape parameter a was fixed, so swap back the
# parameters.
a, b = b, a
else:
# Neither of the shape parameters is fixed.
# s1 and s2 are used in the extra arguments passed to _beta_mle_ab
# by optimize.fsolve.
s1 = np.log(data).sum()
s2 = sc.log1p(-data).sum()
# Use the "method of moments" to estimate the initial
# guess for a and b.
fac = xbar * (1 - xbar) / data.var(ddof=0) - 1
a = xbar * fac
b = (1 - xbar) * fac
# Compute the MLE for a and b by solving _beta_mle_ab.
theta, info, ier, mesg = optimize.fsolve(
_beta_mle_ab, [a, b],
args=(len(data), s1, s2),
full_output=True
)
if ier != 1:
raise FitSolverError(mesg=mesg)
a, b = theta
return a, b, floc, fscale
beta = beta_gen(a=0.0, b=1.0, name='beta')
class betaprime_gen(rv_continuous):
r"""A beta prime continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `betaprime` is:
.. math::
f(x, a, b) = \frac{x^{a-1} (1+x)^{-a-b}}{\beta(a, b)}
for :math:`x > 0`, :math:`a > 0`, :math:`b > 0`, where
:math:`\beta(a, b)` is the beta function (see `scipy.special.beta`).
`betaprime` takes ``a`` and ``b`` as shape parameters.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self, a, b):
sz, rndm = self._size, self._random_state
u1 = gamma.rvs(a, size=sz, random_state=rndm)
u2 = gamma.rvs(b, size=sz, random_state=rndm)
return u1 / u2
def _pdf(self, x, a, b):
# betaprime.pdf(x, a, b) = x**(a-1) * (1+x)**(-a-b) / beta(a, b)
return np.exp(self._logpdf(x, a, b))
def _logpdf(self, x, a, b):
return sc.xlogy(a - 1.0, x) - sc.xlog1py(a + b, x) - sc.betaln(a, b)
def _cdf(self, x, a, b):
return sc.betainc(a, b, x/(1.+x))
def _munp(self, n, a, b):
if n == 1.0:
return np.where(b > 1,
a/(b-1.0),
np.inf)
elif n == 2.0:
return np.where(b > 2,
a*(a+1.0)/((b-2.0)*(b-1.0)),
np.inf)
elif n == 3.0:
return np.where(b > 3,
a*(a+1.0)*(a+2.0)/((b-3.0)*(b-2.0)*(b-1.0)),
np.inf)
elif n == 4.0:
return np.where(b > 4,
(a*(a + 1.0)*(a + 2.0)*(a + 3.0) /
((b - 4.0)*(b - 3.0)*(b - 2.0)*(b - 1.0))),
np.inf)
else:
raise NotImplementedError
betaprime = betaprime_gen(a=0.0, name='betaprime')
class bradford_gen(rv_continuous):
r"""A Bradford continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `bradford` is:
.. math::
f(x, c) = \frac{c}{\log(1+c) (1+cx)}
for :math:`0 < x < 1` and :math:`c > 0`.
`bradford` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
# bradford.pdf(x, c) = c / (k * (1+c*x))
return c / (c*x + 1.0) / sc.log1p(c)
def _cdf(self, x, c):
return sc.log1p(c*x) / sc.log1p(c)
def _ppf(self, q, c):
return sc.expm1(q * sc.log1p(c)) / c
def _stats(self, c, moments='mv'):
k = np.log(1.0+c)
mu = (c-k)/(c*k)
mu2 = ((c+2.0)*k-2.0*c)/(2*c*k*k)
g1 = None
g2 = None
if 's' in moments:
g1 = np.sqrt(2)*(12*c*c-9*c*k*(c+2)+2*k*k*(c*(c+3)+3))
g1 /= np.sqrt(c*(c*(k-2)+2*k))*(3*c*(k-2)+6*k)
if 'k' in moments:
g2 = (c**3*(k-3)*(k*(3*k-16)+24)+12*k*c*c*(k-4)*(k-3) +
6*c*k*k*(3*k-14) + 12*k**3)
g2 /= 3*c*(c*(k-2)+2*k)**2
return mu, mu2, g1, g2
def _entropy(self, c):
k = np.log(1+c)
return k/2.0 - np.log(c/k)
bradford = bradford_gen(a=0.0, b=1.0, name='bradford')
class burr_gen(rv_continuous):
r"""A Burr (Type III) continuous random variable.
%(before_notes)s
See Also
--------
fisk : a special case of either `burr` or `burr12` with ``d=1``
burr12 : Burr Type XII distribution
Notes
-----
The probability density function for `burr` is:
.. math::
f(x, c, d) = c d x^{-c-1} (1+x^{-c})^{-d-1}
for :math:`x > 0` and :math:`c, d > 0`.
`burr` takes :math:`c` and :math:`d` as shape parameters.
This is the PDF corresponding to the third CDF given in Burr's list;
specifically, it is equation (11) in Burr's paper [1]_.
%(after_notes)s
References
----------
.. [1] Burr, I. W. "Cumulative frequency functions", Annals of
Mathematical Statistics, 13(2), pp 215-232 (1942).
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x, c, d):
# burr.pdf(x, c, d) = c * d * x**(-c-1) * (1+x**(-c))**(-d-1)
return c * d * (x**(-c - 1.0)) * ((1 + x**(-c))**(-d - 1.0))
def _cdf(self, x, c, d):
return (1 + x**(-c))**(-d)
def _ppf(self, q, c, d):
return (q**(-1.0/d) - 1)**(-1.0/c)
def _munp(self, n, c, d):
nc = 1. * n / c
return d * sc.beta(1.0 - nc, d + nc)
burr = burr_gen(a=0.0, name='burr')
class burr12_gen(rv_continuous):
r"""A Burr (Type XII) continuous random variable.
%(before_notes)s
See Also
--------
fisk : a special case of either `burr` or `burr12` with ``d=1``
burr : Burr Type III distribution
Notes
-----
The probability density function for `burr` is:
.. math::
f(x, c, d) = c d x^{c-1} (1+x^c)^{-d-1}
for :math:`x > 0` and :math:`c, d > 0`.
`burr12` takes ``c`` and ``d`` as shape parameters for :math:`c`
and :math:`d`.
This is the PDF corresponding to the twelfth CDF given in Burr's list;
specifically, it is equation (20) in Burr's paper [1]_.
%(after_notes)s
The Burr type 12 distribution is also sometimes referred to as
the Singh-Maddala distribution from NIST [2]_.
References
----------
.. [1] Burr, I. W. "Cumulative frequency functions", Annals of
Mathematical Statistics, 13(2), pp 215-232 (1942).
.. [2] https://www.itl.nist.gov/div898/software/dataplot/refman2/auxillar/b12pdf.htm
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x, c, d):
# burr12.pdf(x, c, d) = c * d * x**(c-1) * (1+x**(c))**(-d-1)
return np.exp(self._logpdf(x, c, d))
def _logpdf(self, x, c, d):
return np.log(c) + np.log(d) + sc.xlogy(c - 1, x) + sc.xlog1py(-d-1, x**c)
def _cdf(self, x, c, d):
return -sc.expm1(self._logsf(x, c, d))
def _logcdf(self, x, c, d):
return sc.log1p(-(1 + x**c)**(-d))
def _sf(self, x, c, d):
return np.exp(self._logsf(x, c, d))
def _logsf(self, x, c, d):
return sc.xlog1py(-d, x**c)
def _ppf(self, q, c, d):
# The following is an implementation of
# ((1 - q)**(-1.0/d) - 1)**(1.0/c)
# that does a better job handling small values of q.
return sc.expm1(-1/d * sc.log1p(-q))**(1/c)
def _munp(self, n, c, d):
nc = 1. * n / c
return d * sc.beta(1.0 + nc, d - nc)
burr12 = burr12_gen(a=0.0, name='burr12')
class fisk_gen(burr_gen):
r"""A Fisk continuous random variable.
The Fisk distribution is also known as the log-logistic distribution.
%(before_notes)s
Notes
-----
The probability density function for `fisk` is:
.. math::
f(x, c) = c x^{-c-1} (1 + x^{-c})^{-2}
for :math:`x > 0` and :math:`c > 0`.
`fisk` takes ``c`` as a shape parameter for :math:`c`.
`fisk` is a special case of `burr` or `burr12` with ``d=1``.
%(after_notes)s
See Also
--------
burr
%(example)s
"""
def _pdf(self, x, c):
# fisk.pdf(x, c) = c * x**(-c-1) * (1 + x**(-c))**(-2)
return burr_gen._pdf(self, x, c, 1.0)
def _cdf(self, x, c):
return burr_gen._cdf(self, x, c, 1.0)
def _ppf(self, x, c):
return burr_gen._ppf(self, x, c, 1.0)
def _munp(self, n, c):
return burr_gen._munp(self, n, c, 1.0)
def _entropy(self, c):
return 2 - np.log(c)
fisk = fisk_gen(a=0.0, name='fisk')
# median = loc
class cauchy_gen(rv_continuous):
r"""A Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `cauchy` is
.. math::
f(x) = \frac{1}{\pi (1 + x^2)}
for a real number :math:`x`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
# cauchy.pdf(x) = 1 / (pi * (1 + x**2))
return 1.0/np.pi/(1.0+x*x)
def _cdf(self, x):
return 0.5 + 1.0/np.pi*np.arctan(x)
def _ppf(self, q):
return np.tan(np.pi*q-np.pi/2.0)
def _sf(self, x):
return 0.5 - 1.0/np.pi*np.arctan(x)
def _isf(self, q):
return np.tan(np.pi/2.0-np.pi*q)
def _stats(self):
return np.nan, np.nan, np.nan, np.nan
def _entropy(self):
return np.log(4*np.pi)
def _fitstart(self, data, args=None):
# Initialize ML guesses using quartiles instead of moments.
p25, p50, p75 = np.percentile(data, [25, 50, 75])
return p50, (p75 - p25)/2
cauchy = cauchy_gen(name='cauchy')
class chi_gen(rv_continuous):
r"""A chi continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `chi` is:
.. math::
f(x, k) = \frac{1}{2^{k/2-1} \Gamma \left( k/2 \right)}
x^{k-1} \exp \left( -x^2/2 \right)
for :math:`x > 0` and :math:`k > 0` (degrees of freedom, denoted ``df``
in the implementation). :math:`\Gamma` is the gamma function
(`scipy.special.gamma`).
Special cases of `chi` are:
- ``chi(1, loc, scale)`` is equivalent to `halfnorm`
- ``chi(2, 0, scale)`` is equivalent to `rayleigh`
- ``chi(3, 0, scale)`` is equivalent to `maxwell`
`chi` takes ``df`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, df):
sz, rndm = self._size, self._random_state
return np.sqrt(chi2.rvs(df, size=sz, random_state=rndm))
def _pdf(self, x, df):
# x**(df-1) * exp(-x**2/2)
# chi.pdf(x, df) = -------------------------
# 2**(df/2-1) * gamma(df/2)
return np.exp(self._logpdf(x, df))
def _logpdf(self, x, df):
l = np.log(2) - .5*np.log(2)*df - sc.gammaln(.5*df)
return l + sc.xlogy(df - 1., x) - .5*x**2
def _cdf(self, x, df):
return sc.gammainc(.5*df, .5*x**2)
def _ppf(self, q, df):
return np.sqrt(2*sc.gammaincinv(.5*df, q))
def _stats(self, df):
mu = np.sqrt(2)*sc.gamma(df/2.0+0.5)/sc.gamma(df/2.0)
mu2 = df - mu*mu
g1 = (2*mu**3.0 + mu*(1-2*df))/np.asarray(np.power(mu2, 1.5))
g2 = 2*df*(1.0-df)-6*mu**4 + 4*mu**2 * (2*df-1)
g2 /= np.asarray(mu2**2.0)
return mu, mu2, g1, g2
chi = chi_gen(a=0.0, name='chi')
## Chi-squared (gamma-distributed with loc=0 and scale=2 and shape=df/2)
class chi2_gen(rv_continuous):
r"""A chi-squared continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `chi2` is:
.. math::
f(x, k) = \frac{1}{2^{k/2} \Gamma \left( k/2 \right)}
x^{k/2-1} \exp \left( -x/2 \right)
for :math:`x > 0` and :math:`k > 0` (degrees of freedom, denoted ``df``
in the implementation).
`chi2` takes ``df`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, df):
return self._random_state.chisquare(df, self._size)
def _pdf(self, x, df):
# chi2.pdf(x, df) = 1 / (2*gamma(df/2)) * (x/2)**(df/2-1) * exp(-x/2)
return np.exp(self._logpdf(x, df))
def _logpdf(self, x, df):
return sc.xlogy(df/2.-1, x) - x/2. - sc.gammaln(df/2.) - (np.log(2)*df)/2.
def _cdf(self, x, df):
return sc.chdtr(df, x)
def _sf(self, x, df):
return sc.chdtrc(df, x)
def _isf(self, p, df):
return sc.chdtri(df, p)
def _ppf(self, p, df):
return self._isf(1.0-p, df)
def _stats(self, df):
mu = df
mu2 = 2*df
g1 = 2*np.sqrt(2.0/df)
g2 = 12.0/df
return mu, mu2, g1, g2
chi2 = chi2_gen(a=0.0, name='chi2')
class cosine_gen(rv_continuous):
r"""A cosine continuous random variable.
%(before_notes)s
Notes
-----
The cosine distribution is an approximation to the normal distribution.
The probability density function for `cosine` is:
.. math::
f(x) = \frac{1}{2\pi} (1+\cos(x))
for :math:`-\pi \le x \le \pi`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
# cosine.pdf(x) = 1/(2*pi) * (1+cos(x))
return 1.0/2/np.pi*(1+np.cos(x))
def _cdf(self, x):
return 1.0/2/np.pi*(np.pi + x + np.sin(x))
def _stats(self):
return 0.0, np.pi*np.pi/3.0-2.0, 0.0, -6.0*(np.pi**4-90)/(5.0*(np.pi*np.pi-6)**2)
def _entropy(self):
return np.log(4*np.pi)-1.0
cosine = cosine_gen(a=-np.pi, b=np.pi, name='cosine')
class dgamma_gen(rv_continuous):
r"""A double gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `dgamma` is:
.. math::
f(x, a) = \frac{1}{2\Gamma(a)} |x|^{a-1} \exp(-|x|)
for a real number :math:`x` and :math:`a > 0`. :math:`\Gamma` is the
gamma function (`scipy.special.gamma`).
`dgamma` takes ``a`` as a shape parameter for :math:`a`.
%(after_notes)s
%(example)s
"""
def _rvs(self, a):
sz, rndm = self._size, self._random_state
u = rndm.random_sample(size=sz)
gm = gamma.rvs(a, size=sz, random_state=rndm)
return gm * np.where(u >= 0.5, 1, -1)
def _pdf(self, x, a):
# dgamma.pdf(x, a) = 1 / (2*gamma(a)) * abs(x)**(a-1) * exp(-abs(x))
ax = abs(x)
return 1.0/(2*sc.gamma(a))*ax**(a-1.0) * np.exp(-ax)
def _logpdf(self, x, a):
ax = abs(x)
return sc.xlogy(a - 1.0, ax) - ax - np.log(2) - sc.gammaln(a)
def _cdf(self, x, a):
fac = 0.5*sc.gammainc(a, abs(x))
return np.where(x > 0, 0.5 + fac, 0.5 - fac)
def _sf(self, x, a):
fac = 0.5*sc.gammainc(a, abs(x))
return np.where(x > 0, 0.5-fac, 0.5+fac)
def _ppf(self, q, a):
fac = sc.gammainccinv(a, 1-abs(2*q-1))
return np.where(q > 0.5, fac, -fac)
def _stats(self, a):
mu2 = a*(a+1.0)
return 0.0, mu2, 0.0, (a+2.0)*(a+3.0)/mu2-3.0
dgamma = dgamma_gen(name='dgamma')
class dweibull_gen(rv_continuous):
r"""A double Weibull continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `dweibull` is given by
.. math::
f(x, c) = c / 2 |x|^{c-1} \exp(-|x|^c)
for a real number :math:`x` and :math:`c > 0`.
`dweibull` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _rvs(self, c):
sz, rndm = self._size, self._random_state
u = rndm.random_sample(size=sz)
w = weibull_min.rvs(c, size=sz, random_state=rndm)
return w * (np.where(u >= 0.5, 1, -1))
def _pdf(self, x, c):
# dweibull.pdf(x, c) = c / 2 * abs(x)**(c-1) * exp(-abs(x)**c)
ax = abs(x)
Px = c / 2.0 * ax**(c-1.0) * np.exp(-ax**c)
return Px
def _logpdf(self, x, c):
ax = abs(x)
return np.log(c) - np.log(2.0) + sc.xlogy(c - 1.0, ax) - ax**c
def _cdf(self, x, c):
Cx1 = 0.5 * np.exp(-abs(x)**c)
return np.where(x > 0, 1 - Cx1, Cx1)
def _ppf(self, q, c):
fac = 2. * np.where(q <= 0.5, q, 1. - q)
fac = np.power(-np.log(fac), 1.0 / c)
return np.where(q > 0.5, fac, -fac)
def _munp(self, n, c):
return (1 - (n % 2)) * sc.gamma(1.0 + 1.0 * n / c)
# since we know that all odd moments are zeros, return them at once.
# returning Nones from _stats makes the public stats call _munp
# so overall we're saving one or two gamma function evaluations here.
def _stats(self, c):
return 0, None, 0, None
dweibull = dweibull_gen(name='dweibull')
## Exponential (gamma distributed with a=1.0, loc=loc and scale=scale)
class expon_gen(rv_continuous):
r"""An exponential continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `expon` is:
.. math::
f(x) = \exp(-x)
for :math:`x \ge 0`.
%(after_notes)s
A common parameterization for `expon` is in terms of the rate parameter
``lambda``, such that ``pdf = lambda * exp(-lambda * x)``. This
parameterization corresponds to using ``scale = 1 / lambda``.
%(example)s
"""
def _rvs(self):
return self._random_state.standard_exponential(self._size)
def _pdf(self, x):
# expon.pdf(x) = exp(-x)
return np.exp(-x)
def _logpdf(self, x):
return -x
def _cdf(self, x):
return -sc.expm1(-x)
def _ppf(self, q):
return -sc.log1p(-q)
def _sf(self, x):
return np.exp(-x)
def _logsf(self, x):
return -x
def _isf(self, q):
return -np.log(q)
def _stats(self):
return 1.0, 1.0, 2.0, 6.0
def _entropy(self):
return 1.0
@replace_notes_in_docstring(rv_continuous, notes="""\
This function uses explicit formulas for the maximum likelihood
estimation of the exponential distribution parameters, so the
`optimizer`, `loc` and `scale` keyword arguments are ignored.\n\n""")
def fit(self, data, *args, **kwds):
if len(args) > 0:
raise TypeError("Too many arguments.")
floc = kwds.pop('floc', None)
fscale = kwds.pop('fscale', None)
# Ignore the optimizer-related keyword arguments, if given.
kwds.pop('loc', None)
kwds.pop('scale', None)
kwds.pop('optimizer', None)
if kwds:
raise TypeError("Unknown arguments: %s." % kwds)
if floc is not None and fscale is not None:
# This check is for consistency with `rv_continuous.fit`.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
data = np.asarray(data)
data_min = data.min()
if floc is None:
# ML estimate of the location is the minimum of the data.
loc = data_min
else:
loc = floc
if data_min < loc:
# There are values that are less than the specified loc.
raise FitDataError("expon", lower=floc, upper=np.inf)
if fscale is None:
# ML estimate of the scale is the shifted mean.
scale = data.mean() - loc
else:
scale = fscale
# We expect the return values to be floating point, so ensure it
# by explicitly converting to float.
return float(loc), float(scale)
expon = expon_gen(a=0.0, name='expon')
## Exponentially Modified Normal (exponential distribution
## convolved with a Normal).
## This is called an exponentially modified gaussian on wikipedia
class exponnorm_gen(rv_continuous):
r"""An exponentially modified Normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `exponnorm` is:
.. math::
f(x, K) = \frac{1}{2K} \exp\left(\frac{1}{2 K^2} - x / K \right)
\text{erfc}\left(-\frac{x - 1/K}{\sqrt{2}}\right)
where :math:`x` is a real number and :math:`K > 0`.
It can be thought of as the sum of a standard normal random variable
and an independent exponentially distributed random variable with rate
``1/K``.
%(after_notes)s
An alternative parameterization of this distribution (for example, in
`Wikipedia <https://en.wikipedia.org/wiki/Exponentially_modified_Gaussian_distribution>`_)
involves three parameters, :math:`\mu`, :math:`\lambda` and
:math:`\sigma`.
In the present parameterization this corresponds to having ``loc`` and
``scale`` equal to :math:`\mu` and :math:`\sigma`, respectively, and
shape parameter :math:`K = 1/(\sigma\lambda)`.
.. versionadded:: 0.16.0
%(example)s
"""
def _rvs(self, K):
expval = self._random_state.standard_exponential(self._size) * K
gval = self._random_state.standard_normal(self._size)
return expval + gval
def _pdf(self, x, K):
# exponnorm.pdf(x, K) =
# 1/(2*K) exp(1/(2 * K**2)) exp(-x / K) * erfc-(x - 1/K) / sqrt(2))
invK = 1.0 / K
exparg = 0.5 * invK**2 - invK * x
# Avoid overflows; setting np.exp(exparg) to the max float works
# all right here
expval = _lazywhere(exparg < _LOGXMAX, (exparg,), np.exp, _XMAX)
return 0.5 * invK * expval * sc.erfc(-(x - invK) / np.sqrt(2))
def _logpdf(self, x, K):
invK = 1.0 / K
exparg = 0.5 * invK**2 - invK * x
return exparg + np.log(0.5 * invK * sc.erfc(-(x - invK) / np.sqrt(2)))
def _cdf(self, x, K):
invK = 1.0 / K
expval = invK * (0.5 * invK - x)
return _norm_cdf(x) - np.exp(expval) * _norm_cdf(x - invK)
def _sf(self, x, K):
invK = 1.0 / K
expval = invK * (0.5 * invK - x)
return _norm_cdf(-x) + np.exp(expval) * _norm_cdf(x - invK)
def _stats(self, K):
K2 = K * K
opK2 = 1.0 + K2
skw = 2 * K**3 * opK2**(-1.5)
krt = 6.0 * K2 * K2 * opK2**(-2)
return K, opK2, skw, krt
exponnorm = exponnorm_gen(name='exponnorm')
class exponweib_gen(rv_continuous):
r"""An exponentiated Weibull continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `exponweib` is:
.. math::
f(x, a, c) = a c (1-\exp(-x^c))^{a-1} \exp(-x^c) x^{c-1}
for :math:`x > 0`, :math:`a > 0`, :math:`c > 0`.
`exponweib` takes :math:`a` and :math:`c` as shape parameters.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, a, c):
# exponweib.pdf(x, a, c) =
# a * c * (1-exp(-x**c))**(a-1) * exp(-x**c)*x**(c-1)
return np.exp(self._logpdf(x, a, c))
def _logpdf(self, x, a, c):
negxc = -x**c
exm1c = -sc.expm1(negxc)
logp = (np.log(a) + np.log(c) + sc.xlogy(a - 1.0, exm1c) +
negxc + sc.xlogy(c - 1.0, x))
return logp
def _cdf(self, x, a, c):
exm1c = -sc.expm1(-x**c)
return exm1c**a
def _ppf(self, q, a, c):
return (-sc.log1p(-q**(1.0/a)))**np.asarray(1.0/c)
exponweib = exponweib_gen(a=0.0, name='exponweib')
class exponpow_gen(rv_continuous):
r"""An exponential power continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `exponpow` is:
.. math::
f(x, b) = b x^{b-1} \exp(1 + x^b - \exp(x^b))
for :math:`x \ge 0`, :math:`b > 0`. Note that this is a different
distribution from the exponential power distribution that is also known
under the names "generalized normal" or "generalized Gaussian".
`exponpow` takes ``b`` as a shape parameter for :math:`b`.
%(after_notes)s
References
----------
http://www.math.wm.edu/~leemis/chart/UDR/PDFs/Exponentialpower.pdf
%(example)s
"""
def _pdf(self, x, b):
# exponpow.pdf(x, b) = b * x**(b-1) * exp(1 + x**b - exp(x**b))
return np.exp(self._logpdf(x, b))
def _logpdf(self, x, b):
xb = x**b
f = 1 + np.log(b) + sc.xlogy(b - 1.0, x) + xb - np.exp(xb)
return f
def _cdf(self, x, b):
return -sc.expm1(-sc.expm1(x**b))
def _sf(self, x, b):
return np.exp(-sc.expm1(x**b))
def _isf(self, x, b):
return (sc.log1p(-np.log(x)))**(1./b)
def _ppf(self, q, b):
return pow(sc.log1p(-sc.log1p(-q)), 1.0/b)
exponpow = exponpow_gen(a=0.0, name='exponpow')
class fatiguelife_gen(rv_continuous):
r"""A fatigue-life (Birnbaum-Saunders) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `fatiguelife` is:
.. math::
f(x, c) = \frac{x+1}{2c\sqrt{2\pi x^3}} \exp(-\frac{(x-1)^2}{2x c^2})
for :math:`x > 0` and :math:`c > 0`.
`fatiguelife` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
References
----------
.. [1] "Birnbaum-Saunders distribution",
https://en.wikipedia.org/wiki/Birnbaum-Saunders_distribution
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self, c):
z = self._random_state.standard_normal(self._size)
x = 0.5*c*z
x2 = x*x
t = 1.0 + 2*x2 + 2*x*np.sqrt(1 + x2)
return t
def _pdf(self, x, c):
# fatiguelife.pdf(x, c) =
# (x+1) / (2*c*sqrt(2*pi*x**3)) * exp(-(x-1)**2/(2*x*c**2))
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
return (np.log(x+1) - (x-1)**2 / (2.0*x*c**2) - np.log(2*c) -
0.5*(np.log(2*np.pi) + 3*np.log(x)))
def _cdf(self, x, c):
return _norm_cdf(1.0 / c * (np.sqrt(x) - 1.0/np.sqrt(x)))
def _ppf(self, q, c):
tmp = c*sc.ndtri(q)
return 0.25 * (tmp + np.sqrt(tmp**2 + 4))**2
def _stats(self, c):
# NB: the formula for kurtosis in wikipedia seems to have an error:
# it's 40, not 41. At least it disagrees with the one from Wolfram
# Alpha. And the latter one, below, passes the tests, while the wiki
# one doesn't So far I didn't have the guts to actually check the
# coefficients from the expressions for the raw moments.
c2 = c*c
mu = c2 / 2.0 + 1.0
den = 5.0 * c2 + 4.0
mu2 = c2*den / 4.0
g1 = 4 * c * (11*c2 + 6.0) / np.power(den, 1.5)
g2 = 6 * c2 * (93*c2 + 40.0) / den**2.0
return mu, mu2, g1, g2
fatiguelife = fatiguelife_gen(a=0.0, name='fatiguelife')
class foldcauchy_gen(rv_continuous):
r"""A folded Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `foldcauchy` is:
.. math::
f(x, c) = \frac{1}{\pi (1+(x-c)^2)} + \frac{1}{\pi (1+(x+c)^2)}
for :math:`x \ge 0`.
`foldcauchy` takes ``c`` as a shape parameter for :math:`c`.
%(example)s
"""
def _rvs(self, c):
return abs(cauchy.rvs(loc=c, size=self._size,
random_state=self._random_state))
def _pdf(self, x, c):
# foldcauchy.pdf(x, c) = 1/(pi*(1+(x-c)**2)) + 1/(pi*(1+(x+c)**2))
return 1.0/np.pi*(1.0/(1+(x-c)**2) + 1.0/(1+(x+c)**2))
def _cdf(self, x, c):
return 1.0/np.pi*(np.arctan(x-c) + np.arctan(x+c))
def _stats(self, c):
return np.inf, np.inf, np.nan, np.nan
foldcauchy = foldcauchy_gen(a=0.0, name='foldcauchy')
class f_gen(rv_continuous):
r"""An F continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `f` is:
.. math::
f(x, df_1, df_2) = \frac{df_2^{df_2/2} df_1^{df_1/2} x^{df_1 / 2-1}}
{(df_2+df_1 x)^{(df_1+df_2)/2}
B(df_1/2, df_2/2)}
for :math:`x > 0`.
`f` takes ``dfn`` and ``dfd`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, dfn, dfd):
return self._random_state.f(dfn, dfd, self._size)
def _pdf(self, x, dfn, dfd):
# df2**(df2/2) * df1**(df1/2) * x**(df1/2-1)
# F.pdf(x, df1, df2) = --------------------------------------------
# (df2+df1*x)**((df1+df2)/2) * B(df1/2, df2/2)
return np.exp(self._logpdf(x, dfn, dfd))
def _logpdf(self, x, dfn, dfd):
n = 1.0 * dfn
m = 1.0 * dfd
lPx = m/2 * np.log(m) + n/2 * np.log(n) + (n/2 - 1) * np.log(x)
lPx -= ((n+m)/2) * np.log(m + n*x) + sc.betaln(n/2, m/2)
return lPx
def _cdf(self, x, dfn, dfd):
return sc.fdtr(dfn, dfd, x)
def _sf(self, x, dfn, dfd):
return sc.fdtrc(dfn, dfd, x)
def _ppf(self, q, dfn, dfd):
return sc.fdtri(dfn, dfd, q)
def _stats(self, dfn, dfd):
v1, v2 = 1. * dfn, 1. * dfd
v2_2, v2_4, v2_6, v2_8 = v2 - 2., v2 - 4., v2 - 6., v2 - 8.
mu = _lazywhere(
v2 > 2, (v2, v2_2),
lambda v2, v2_2: v2 / v2_2,
np.inf)
mu2 = _lazywhere(
v2 > 4, (v1, v2, v2_2, v2_4),
lambda v1, v2, v2_2, v2_4:
2 * v2 * v2 * (v1 + v2_2) / (v1 * v2_2**2 * v2_4),
np.inf)
g1 = _lazywhere(
v2 > 6, (v1, v2_2, v2_4, v2_6),
lambda v1, v2_2, v2_4, v2_6:
(2 * v1 + v2_2) / v2_6 * np.sqrt(v2_4 / (v1 * (v1 + v2_2))),
np.nan)
g1 *= np.sqrt(8.)
g2 = _lazywhere(
v2 > 8, (g1, v2_6, v2_8),
lambda g1, v2_6, v2_8: (8 + g1 * g1 * v2_6) / v2_8,
np.nan)
g2 *= 3. / 2.
return mu, mu2, g1, g2
f = f_gen(a=0.0, name='f')
## Folded Normal
## abs(Z) where (Z is normal with mu=L and std=S so that c=abs(L)/S)
##
## note: regress docs have scale parameter correct, but first parameter
## he gives is a shape parameter A = c * scale
## Half-normal is folded normal with shape-parameter c=0.
class foldnorm_gen(rv_continuous):
r"""A folded normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `foldnorm` is:
.. math::
f(x, c) = \sqrt{2/\pi} cosh(c x) \exp(-\frac{x^2+c^2}{2})
for :math:`c \ge 0`.
`foldnorm` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
return c >= 0
def _rvs(self, c):
return abs(self._random_state.standard_normal(self._size) + c)
def _pdf(self, x, c):
# foldnormal.pdf(x, c) = sqrt(2/pi) * cosh(c*x) * exp(-(x**2+c**2)/2)
return _norm_pdf(x + c) + _norm_pdf(x-c)
def _cdf(self, x, c):
return _norm_cdf(x-c) + _norm_cdf(x+c) - 1.0
def _stats(self, c):
# Regina C. Elandt, Technometrics 3, 551 (1961)
# https://www.jstor.org/stable/1266561
#
c2 = c*c
expfac = np.exp(-0.5*c2) / np.sqrt(2.*np.pi)
mu = 2.*expfac + c * sc.erf(c/np.sqrt(2))
mu2 = c2 + 1 - mu*mu
g1 = 2. * (mu*mu*mu - c2*mu - expfac)
g1 /= np.power(mu2, 1.5)
g2 = c2 * (c2 + 6.) + 3 + 8.*expfac*mu
g2 += (2. * (c2 - 3.) - 3. * mu**2) * mu**2
g2 = g2 / mu2**2.0 - 3.
return mu, mu2, g1, g2
foldnorm = foldnorm_gen(a=0.0, name='foldnorm')
class weibull_min_gen(rv_continuous):
r"""Weibull minimum continuous random variable.
%(before_notes)s
See Also
--------
weibull_max
Notes
-----
The probability density function for `weibull_min` is:
.. math::
f(x, c) = c x^{c-1} \exp(-x^c)
for :math:`x > 0`, :math:`c > 0`.
`weibull_min` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
# frechet_r.pdf(x, c) = c * x**(c-1) * exp(-x**c)
return c*pow(x, c-1)*np.exp(-pow(x, c))
def _logpdf(self, x, c):
return np.log(c) + sc.xlogy(c - 1, x) - pow(x, c)
def _cdf(self, x, c):
return -sc.expm1(-pow(x, c))
def _sf(self, x, c):
return np.exp(-pow(x, c))
def _logsf(self, x, c):
return -pow(x, c)
def _ppf(self, q, c):
return pow(-sc.log1p(-q), 1.0/c)
def _munp(self, n, c):
return sc.gamma(1.0+n*1.0/c)
def _entropy(self, c):
return -_EULER / c - np.log(c) + _EULER + 1
weibull_min = weibull_min_gen(a=0.0, name='weibull_min')
class weibull_max_gen(rv_continuous):
r"""Weibull maximum continuous random variable.
%(before_notes)s
See Also
--------
weibull_min
Notes
-----
The probability density function for `weibull_max` is:
.. math::
f(x, c) = c (-x)^{c-1} \exp(-(-x)^c)
for :math:`x < 0`, :math:`c > 0`.
`weibull_max` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
# frechet_l.pdf(x, c) = c * (-x)**(c-1) * exp(-(-x)**c)
return c*pow(-x, c-1)*np.exp(-pow(-x, c))
def _logpdf(self, x, c):
return np.log(c) + sc.xlogy(c-1, -x) - pow(-x, c)
def _cdf(self, x, c):
return np.exp(-pow(-x, c))
def _logcdf(self, x, c):
return -pow(-x, c)
def _sf(self, x, c):
return -sc.expm1(-pow(-x, c))
def _ppf(self, q, c):
return -pow(-np.log(q), 1.0/c)
def _munp(self, n, c):
val = sc.gamma(1.0+n*1.0/c)
if int(n) % 2:
sgn = -1
else:
sgn = 1
return sgn * val
def _entropy(self, c):
return -_EULER / c - np.log(c) + _EULER + 1
weibull_max = weibull_max_gen(b=0.0, name='weibull_max')
# Public methods to be deprecated in frechet_r and frechet_l:
# ['__call__', 'cdf', 'entropy', 'expect', 'fit', 'fit_loc_scale', 'freeze',
# 'interval', 'isf', 'logcdf', 'logpdf', 'logsf', 'mean', 'median', 'moment',
# 'nnlf', 'pdf', 'ppf', 'rvs', 'sf', 'stats', 'std', 'var']
_frechet_r_deprec_msg = """\
The distribution `frechet_r` is a synonym for `weibull_min`; this historical
usage is deprecated because of possible confusion with the (quite different)
Frechet distribution. To preserve the existing behavior of the program, use
`scipy.stats.weibull_min`. For the Frechet distribution (i.e. the Type II
extreme value distribution), use `scipy.stats.invweibull`."""
class frechet_r_gen(weibull_min_gen):
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def __call__(self, *args, **kwargs):
return weibull_min_gen.__call__(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def cdf(self, *args, **kwargs):
return weibull_min_gen.cdf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def entropy(self, *args, **kwargs):
return weibull_min_gen.entropy(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def expect(self, *args, **kwargs):
return weibull_min_gen.expect(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def fit(self, *args, **kwargs):
return weibull_min_gen.fit(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def fit_loc_scale(self, *args, **kwargs):
return weibull_min_gen.fit_loc_scale(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def freeze(self, *args, **kwargs):
return weibull_min_gen.freeze(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def interval(self, *args, **kwargs):
return weibull_min_gen.interval(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def isf(self, *args, **kwargs):
return weibull_min_gen.isf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def logcdf(self, *args, **kwargs):
return weibull_min_gen.logcdf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def logpdf(self, *args, **kwargs):
return weibull_min_gen.logpdf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def logsf(self, *args, **kwargs):
return weibull_min_gen.logsf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def mean(self, *args, **kwargs):
return weibull_min_gen.mean(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def median(self, *args, **kwargs):
return weibull_min_gen.median(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def moment(self, *args, **kwargs):
return weibull_min_gen.moment(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def nnlf(self, *args, **kwargs):
return weibull_min_gen.nnlf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def pdf(self, *args, **kwargs):
return weibull_min_gen.pdf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def ppf(self, *args, **kwargs):
return weibull_min_gen.ppf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def rvs(self, *args, **kwargs):
return weibull_min_gen.rvs(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def sf(self, *args, **kwargs):
return weibull_min_gen.sf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def stats(self, *args, **kwargs):
return weibull_min_gen.stats(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def std(self, *args, **kwargs):
return weibull_min_gen.std(self, *args, **kwargs)
@np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg)
def var(self, *args, **kwargs):
return weibull_min_gen.var(self, *args, **kwargs)
frechet_r = frechet_r_gen(a=0.0, name='frechet_r')
_frechet_l_deprec_msg = """\
The distribution `frechet_l` is a synonym for `weibull_max`; this historical
usage is deprecated because of possible confusion with the (quite different)
Frechet distribution. To preserve the existing behavior of the program, use
`scipy.stats.weibull_max`. For the Frechet distribution (i.e. the Type II
extreme value distribution), use `scipy.stats.invweibull`."""
class frechet_l_gen(weibull_max_gen):
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def __call__(self, *args, **kwargs):
return weibull_max_gen.__call__(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def cdf(self, *args, **kwargs):
return weibull_max_gen.cdf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def entropy(self, *args, **kwargs):
return weibull_max_gen.entropy(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def expect(self, *args, **kwargs):
return weibull_max_gen.expect(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def fit(self, *args, **kwargs):
return weibull_max_gen.fit(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def fit_loc_scale(self, *args, **kwargs):
return weibull_max_gen.fit_loc_scale(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def freeze(self, *args, **kwargs):
return weibull_max_gen.freeze(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def interval(self, *args, **kwargs):
return weibull_max_gen.interval(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def isf(self, *args, **kwargs):
return weibull_max_gen.isf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def logcdf(self, *args, **kwargs):
return weibull_max_gen.logcdf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def logpdf(self, *args, **kwargs):
return weibull_max_gen.logpdf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def logsf(self, *args, **kwargs):
return weibull_max_gen.logsf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def mean(self, *args, **kwargs):
return weibull_max_gen.mean(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def median(self, *args, **kwargs):
return weibull_max_gen.median(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def moment(self, *args, **kwargs):
return weibull_max_gen.moment(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def nnlf(self, *args, **kwargs):
return weibull_max_gen.nnlf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def pdf(self, *args, **kwargs):
return weibull_max_gen.pdf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def ppf(self, *args, **kwargs):
return weibull_max_gen.ppf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def rvs(self, *args, **kwargs):
return weibull_max_gen.rvs(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def sf(self, *args, **kwargs):
return weibull_max_gen.sf(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def stats(self, *args, **kwargs):
return weibull_max_gen.stats(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def std(self, *args, **kwargs):
return weibull_max_gen.std(self, *args, **kwargs)
@np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg)
def var(self, *args, **kwargs):
return weibull_max_gen.var(self, *args, **kwargs)
frechet_l = frechet_l_gen(b=0.0, name='frechet_l')
class genlogistic_gen(rv_continuous):
r"""A generalized logistic continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genlogistic` is:
.. math::
f(x, c) = c \frac{\exp(-x)}
{(1 + \exp(-x))^{c+1}}
for :math:`x > 0`, :math:`c > 0`.
`genlogistic` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
# genlogistic.pdf(x, c) = c * exp(-x) / (1 + exp(-x))**(c+1)
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
return np.log(c) - x - (c+1.0)*sc.log1p(np.exp(-x))
def _cdf(self, x, c):
Cx = (1+np.exp(-x))**(-c)
return Cx
def _ppf(self, q, c):
vals = -np.log(pow(q, -1.0/c)-1)
return vals
def _stats(self, c):
mu = _EULER + sc.psi(c)
mu2 = np.pi*np.pi/6.0 + sc.zeta(2, c)
g1 = -2*sc.zeta(3, c) + 2*_ZETA3
g1 /= np.power(mu2, 1.5)
g2 = np.pi**4/15.0 + 6*sc.zeta(4, c)
g2 /= mu2**2.0
return mu, mu2, g1, g2
genlogistic = genlogistic_gen(name='genlogistic')
class genpareto_gen(rv_continuous):
r"""A generalized Pareto continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genpareto` is:
.. math::
f(x, c) = (1 + c x)^{-1 - 1/c}
defined for :math:`x \ge 0` if :math:`c \ge 0`, and for
:math:`0 \le x \le -1/c` if :math:`c < 0`.
`genpareto` takes ``c`` as a shape parameter for :math:`c`.
For :math:`c=0`, `genpareto` reduces to the exponential
distribution, `expon`:
.. math::
f(x, 0) = \exp(-x)
For :math:`c=-1`, `genpareto` is uniform on ``[0, 1]``:
.. math::
f(x, -1) = 1
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
c = np.asarray(c)
self.b = _lazywhere(c < 0, (c,),
lambda c: -1. / c,
np.inf)
return True
def _pdf(self, x, c):
# genpareto.pdf(x, c) = (1 + c * x)**(-1 - 1/c)
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
return _lazywhere((x == x) & (c != 0), (x, c),
lambda x, c: -sc.xlog1py(c + 1., c*x) / c,
-x)
def _cdf(self, x, c):
return -sc.inv_boxcox1p(-x, -c)
def _sf(self, x, c):
return sc.inv_boxcox(-x, -c)
def _logsf(self, x, c):
return _lazywhere((x == x) & (c != 0), (x, c),
lambda x, c: -sc.log1p(c*x) / c,
-x)
def _ppf(self, q, c):
return -sc.boxcox1p(-q, -c)
def _isf(self, q, c):
return -sc.boxcox(q, -c)
def _munp(self, n, c):
def __munp(n, c):
val = 0.0
k = np.arange(0, n + 1)
for ki, cnk in zip(k, sc.comb(n, k)):
val = val + cnk * (-1) ** ki / (1.0 - c * ki)
return np.where(c * n < 1, val * (-1.0 / c) ** n, np.inf)
return _lazywhere(c != 0, (c,),
lambda c: __munp(n, c),
sc.gamma(n + 1))
def _entropy(self, c):
return 1. + c
genpareto = genpareto_gen(a=0.0, name='genpareto')
class genexpon_gen(rv_continuous):
r"""A generalized exponential continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genexpon` is:
.. math::
f(x, a, b, c) = (a + b (1 - \exp(-c x)))
\exp(-a x - b x + \frac{b}{c} (1-\exp(-c x)))
for :math:`x \ge 0`, :math:`a, b, c > 0`.
`genexpon` takes :math:`a`, :math:`b` and :math:`c` as shape parameters.
%(after_notes)s
References
----------
H.K. Ryu, "An Extension of Marshall and Olkin's Bivariate Exponential
Distribution", Journal of the American Statistical Association, 1993.
N. Balakrishnan, "The Exponential Distribution: Theory, Methods and
Applications", Asit P. Basu.
%(example)s
"""
def _pdf(self, x, a, b, c):
# genexpon.pdf(x, a, b, c) = (a + b * (1 - exp(-c*x))) * \
# exp(-a*x - b*x + b/c * (1-exp(-c*x)))
return (a + b*(-sc.expm1(-c*x)))*np.exp((-a-b)*x +
b*(-sc.expm1(-c*x))/c)
def _cdf(self, x, a, b, c):
return -sc.expm1((-a-b)*x + b*(-sc.expm1(-c*x))/c)
def _logpdf(self, x, a, b, c):
return np.log(a+b*(-sc.expm1(-c*x))) + (-a-b)*x+b*(-sc.expm1(-c*x))/c
genexpon = genexpon_gen(a=0.0, name='genexpon')
class genextreme_gen(rv_continuous):
r"""A generalized extreme value continuous random variable.
%(before_notes)s
See Also
--------
gumbel_r
Notes
-----
For :math:`c=0`, `genextreme` is equal to `gumbel_r`.
The probability density function for `genextreme` is:
.. math::
f(x, c) = \begin{cases}
\exp(-\exp(-x)) \exp(-x) &\text{for } c = 0\\
\exp(-(1-c x)^{1/c}) (1-c x)^{1/c-1} &\text{for }
x \le 1/c, c > 0
\end{cases}
Note that several sources and software packages use the opposite
convention for the sign of the shape parameter :math:`c`.
`genextreme` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
self.b = np.where(c > 0, 1.0 / np.maximum(c, _XMIN), np.inf)
self.a = np.where(c < 0, 1.0 / np.minimum(c, -_XMIN), -np.inf)
return np.where(abs(c) == np.inf, 0, 1)
def _loglogcdf(self, x, c):
return _lazywhere((x == x) & (c != 0), (x, c),
lambda x, c: sc.log1p(-c*x)/c, -x)
def _pdf(self, x, c):
# genextreme.pdf(x, c) =
# exp(-exp(-x))*exp(-x), for c==0
# exp(-(1-c*x)**(1/c))*(1-c*x)**(1/c-1), for x \le 1/c, c > 0
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
cx = _lazywhere((x == x) & (c != 0), (x, c), lambda x, c: c*x, 0.0)
logex2 = sc.log1p(-cx)
logpex2 = self._loglogcdf(x, c)
pex2 = np.exp(logpex2)
# Handle special cases
np.putmask(logpex2, (c == 0) & (x == -np.inf), 0.0)
logpdf = np.where((cx == 1) | (cx == -np.inf),
-np.inf,
-pex2+logpex2-logex2)
np.putmask(logpdf, (c == 1) & (x == 1), 0.0)
return logpdf
def _logcdf(self, x, c):
return -np.exp(self._loglogcdf(x, c))
def _cdf(self, x, c):
return np.exp(self._logcdf(x, c))
def _sf(self, x, c):
return -sc.expm1(self._logcdf(x, c))
def _ppf(self, q, c):
x = -np.log(-np.log(q))
return _lazywhere((x == x) & (c != 0), (x, c),
lambda x, c: -sc.expm1(-c * x) / c, x)
def _isf(self, q, c):
x = -np.log(-sc.log1p(-q))
return _lazywhere((x == x) & (c != 0), (x, c),
lambda x, c: -sc.expm1(-c * x) / c, x)
def _stats(self, c):
g = lambda n: sc.gamma(n*c + 1)
g1 = g(1)
g2 = g(2)
g3 = g(3)
g4 = g(4)
g2mg12 = np.where(abs(c) < 1e-7, (c*np.pi)**2.0/6.0, g2-g1**2.0)
gam2k = np.where(abs(c) < 1e-7, np.pi**2.0/6.0,
sc.expm1(sc.gammaln(2.0*c+1.0)-2*sc.gammaln(c + 1.0))/c**2.0)
eps = 1e-14
gamk = np.where(abs(c) < eps, -_EULER, sc.expm1(sc.gammaln(c + 1))/c)
m = np.where(c < -1.0, np.nan, -gamk)
v = np.where(c < -0.5, np.nan, g1**2.0*gam2k)
# skewness
sk1 = _lazywhere(c >= -1./3,
(c, g1, g2, g3, g2mg12),
lambda c, g1, g2, g3, g2gm12:
np.sign(c)*(-g3 + (g2 + 2*g2mg12)*g1)/g2mg12**1.5,
fillvalue=np.nan)
sk = np.where(abs(c) <= eps**0.29, 12*np.sqrt(6)*_ZETA3/np.pi**3, sk1)
# kurtosis
ku1 = _lazywhere(c >= -1./4,
(g1, g2, g3, g4, g2mg12),
lambda g1, g2, g3, g4, g2mg12:
(g4 + (-4*g3 + 3*(g2 + g2mg12)*g1)*g1)/g2mg12**2,
fillvalue=np.nan)
ku = np.where(abs(c) <= (eps)**0.23, 12.0/5.0, ku1-3.0)
return m, v, sk, ku
def _fitstart(self, data):
# This is better than the default shape of (1,).
g = _skew(data)
if g < 0:
a = 0.5
else:
a = -0.5
return super(genextreme_gen, self)._fitstart(data, args=(a,))
def _munp(self, n, c):
k = np.arange(0, n+1)
vals = 1.0/c**n * np.sum(
sc.comb(n, k) * (-1)**k * sc.gamma(c*k + 1),
axis=0)
return np.where(c*n > -1, vals, np.inf)
def _entropy(self, c):
return _EULER*(1 - c) + 1
genextreme = genextreme_gen(name='genextreme')
def _digammainv(y):
# Inverse of the digamma function (real positive arguments only).
# This function is used in the `fit` method of `gamma_gen`.
# The function uses either optimize.fsolve or optimize.newton
# to solve `sc.digamma(x) - y = 0`. There is probably room for
# improvement, but currently it works over a wide range of y:
# >>> y = 64*np.random.randn(1000000)
# >>> y.min(), y.max()
# (-311.43592651416662, 351.77388222276869)
# x = [_digammainv(t) for t in y]
# np.abs(sc.digamma(x) - y).max()
# 1.1368683772161603e-13
#
_em = 0.5772156649015328606065120
func = lambda x: sc.digamma(x) - y
if y > -0.125:
x0 = np.exp(y) + 0.5
if y < 10:
# Some experimentation shows that newton reliably converges
# must faster than fsolve in this y range. For larger y,
# newton sometimes fails to converge.
value = optimize.newton(func, x0, tol=1e-10)
return value
elif y > -3:
x0 = np.exp(y/2.332) + 0.08661
else:
x0 = 1.0 / (-y - _em)
value, info, ier, mesg = optimize.fsolve(func, x0, xtol=1e-11,
full_output=True)
if ier != 1:
raise RuntimeError("_digammainv: fsolve failed, y = %r" % y)
return value[0]
## Gamma (Use MATLAB and MATHEMATICA (b=theta=scale, a=alpha=shape) definition)
## gamma(a, loc, scale) with a an integer is the Erlang distribution
## gamma(1, loc, scale) is the Exponential distribution
## gamma(df/2, 0, 2) is the chi2 distribution with df degrees of freedom.
class gamma_gen(rv_continuous):
r"""A gamma continuous random variable.
%(before_notes)s
See Also
--------
erlang, expon
Notes
-----
The probability density function for `gamma` is:
.. math::
f(x, a) = \frac{x^{a-1} \exp(-x)}{\Gamma(a)}
for :math:`x \ge 0`, :math:`a > 0`. Here :math:`\Gamma(a)` refers to the
gamma function.
`gamma` takes ``a`` as a shape parameter for :math:`a`.
When :math:`a` is an integer, `gamma` reduces to the Erlang
distribution, and when :math:`a=1` to the exponential distribution.
%(after_notes)s
%(example)s
"""
def _rvs(self, a):
return self._random_state.standard_gamma(a, self._size)
def _pdf(self, x, a):
# gamma.pdf(x, a) = x**(a-1) * exp(-x) / gamma(a)
return np.exp(self._logpdf(x, a))
def _logpdf(self, x, a):
return sc.xlogy(a-1.0, x) - x - sc.gammaln(a)
def _cdf(self, x, a):
return sc.gammainc(a, x)
def _sf(self, x, a):
return sc.gammaincc(a, x)
def _ppf(self, q, a):
return sc.gammaincinv(a, q)
def _stats(self, a):
return a, a, 2.0/np.sqrt(a), 6.0/a
def _entropy(self, a):
return sc.psi(a)*(1-a) + a + sc.gammaln(a)
def _fitstart(self, data):
# The skewness of the gamma distribution is `4 / np.sqrt(a)`.
# We invert that to estimate the shape `a` using the skewness
# of the data. The formula is regularized with 1e-8 in the
# denominator to allow for degenerate data where the skewness
# is close to 0.
a = 4 / (1e-8 + _skew(data)**2)
return super(gamma_gen, self)._fitstart(data, args=(a,))
@extend_notes_in_docstring(rv_continuous, notes="""\
When the location is fixed by using the argument `floc`, this
function uses explicit formulas or solves a simpler numerical
problem than the full ML optimization problem. So in that case,
the `optimizer`, `loc` and `scale` arguments are ignored.\n\n""")
def fit(self, data, *args, **kwds):
f0 = (kwds.get('f0', None) or kwds.get('fa', None) or
kwds.get('fix_a', None))
floc = kwds.get('floc', None)
fscale = kwds.get('fscale', None)
if floc is None:
# loc is not fixed. Use the default fit method.
return super(gamma_gen, self).fit(data, *args, **kwds)
# Special case: loc is fixed.
if f0 is not None and fscale is not None:
# This check is for consistency with `rv_continuous.fit`.
# Without this check, this function would just return the
# parameters that were given.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
# Fixed location is handled by shifting the data.
data = np.asarray(data)
if np.any(data <= floc):
raise FitDataError("gamma", lower=floc, upper=np.inf)
if floc != 0:
# Don't do the subtraction in-place, because `data` might be a
# view of the input array.
data = data - floc
xbar = data.mean()
# Three cases to handle:
# * shape and scale both free
# * shape fixed, scale free
# * shape free, scale fixed
if fscale is None:
# scale is free
if f0 is not None:
# shape is fixed
a = f0
else:
# shape and scale are both free.
# The MLE for the shape parameter `a` is the solution to:
# np.log(a) - sc.digamma(a) - np.log(xbar) +
# np.log(data.mean) = 0
s = np.log(xbar) - np.log(data).mean()
func = lambda a: np.log(a) - sc.digamma(a) - s
aest = (3-s + np.sqrt((s-3)**2 + 24*s)) / (12*s)
xa = aest*(1-0.4)
xb = aest*(1+0.4)
a = optimize.brentq(func, xa, xb, disp=0)
# The MLE for the scale parameter is just the data mean
# divided by the shape parameter.
scale = xbar / a
else:
# scale is fixed, shape is free
# The MLE for the shape parameter `a` is the solution to:
# sc.digamma(a) - np.log(data).mean() + np.log(fscale) = 0
c = np.log(data).mean() - np.log(fscale)
a = _digammainv(c)
scale = fscale
return a, floc, scale
gamma = gamma_gen(a=0.0, name='gamma')
class erlang_gen(gamma_gen):
"""An Erlang continuous random variable.
%(before_notes)s
See Also
--------
gamma
Notes
-----
The Erlang distribution is a special case of the Gamma distribution, with
the shape parameter `a` an integer. Note that this restriction is not
enforced by `erlang`. It will, however, generate a warning the first time
a non-integer value is used for the shape parameter.
Refer to `gamma` for examples.
"""
def _argcheck(self, a):
allint = np.all(np.floor(a) == a)
allpos = np.all(a > 0)
if not allint:
# An Erlang distribution shouldn't really have a non-integer
# shape parameter, so warn the user.
warnings.warn(
'The shape parameter of the erlang distribution '
'has been given a non-integer value %r.' % (a,),
RuntimeWarning)
return allpos
def _fitstart(self, data):
# Override gamma_gen_fitstart so that an integer initial value is
# used. (Also regularize the division, to avoid issues when
# _skew(data) is 0 or close to 0.)
a = int(4.0 / (1e-8 + _skew(data)**2))
return super(gamma_gen, self)._fitstart(data, args=(a,))
# Trivial override of the fit method, so we can monkey-patch its
# docstring.
def fit(self, data, *args, **kwds):
return super(erlang_gen, self).fit(data, *args, **kwds)
if fit.__doc__ is not None:
fit.__doc__ = (rv_continuous.fit.__doc__ +
"""
Notes
-----
The Erlang distribution is generally defined to have integer values
for the shape parameter. This is not enforced by the `erlang` class.
When fitting the distribution, it will generally return a non-integer
value for the shape parameter. By using the keyword argument
`f0=<integer>`, the fit method can be constrained to fit the data to
a specific integer shape parameter.
""")
erlang = erlang_gen(a=0.0, name='erlang')
class gengamma_gen(rv_continuous):
r"""A generalized gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gengamma` is:
.. math::
f(x, a, c) = \frac{|c| x^{c a-1} \exp(-x^c)}{\Gamma(a)}
for :math:`x \ge 0`, :math:`a > 0`, and :math:`c \ne 0`.
:math:`\Gamma` is the gamma function (`scipy.special.gamma`).
`gengamma` takes :math:`a` and :math:`c` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, a, c):
return (a > 0) & (c != 0)
def _pdf(self, x, a, c):
# gengamma.pdf(x, a, c) = abs(c) * x**(c*a-1) * exp(-x**c) / gamma(a)
return np.exp(self._logpdf(x, a, c))
def _logpdf(self, x, a, c):
return np.log(abs(c)) + sc.xlogy(c*a - 1, x) - x**c - sc.gammaln(a)
def _cdf(self, x, a, c):
xc = x**c
val1 = sc.gammainc(a, xc)
val2 = sc.gammaincc(a, xc)
return np.where(c > 0, val1, val2)
def _sf(self, x, a, c):
xc = x**c
val1 = sc.gammainc(a, xc)
val2 = sc.gammaincc(a, xc)
return np.where(c > 0, val2, val1)
def _ppf(self, q, a, c):
val1 = sc.gammaincinv(a, q)
val2 = sc.gammainccinv(a, q)
return np.where(c > 0, val1, val2)**(1.0/c)
def _isf(self, q, a, c):
val1 = sc.gammaincinv(a, q)
val2 = sc.gammainccinv(a, q)
return np.where(c > 0, val2, val1)**(1.0/c)
def _munp(self, n, a, c):
# Pochhammer symbol: sc.pocha,n) = gamma(a+n)/gamma(a)
return sc.poch(a, n*1.0/c)
def _entropy(self, a, c):
val = sc.psi(a)
return a*(1-val) + 1.0/c*val + sc.gammaln(a) - np.log(abs(c))
gengamma = gengamma_gen(a=0.0, name='gengamma')
class genhalflogistic_gen(rv_continuous):
r"""A generalized half-logistic continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genhalflogistic` is:
.. math::
f(x, c) = \frac{2 (1 - c x)^{1/(c-1)}}{[1 + (1 - c x)^{1/c}]^2}
for :math:`0 \le x \le 1/c`, and :math:`c > 0`.
`genhalflogistic` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
self.b = 1.0 / c
return c > 0
def _pdf(self, x, c):
# genhalflogistic.pdf(x, c) =
# 2 * (1-c*x)**(1/c-1) / (1+(1-c*x)**(1/c))**2
limit = 1.0/c
tmp = np.asarray(1-c*x)
tmp0 = tmp**(limit-1)
tmp2 = tmp0*tmp
return 2*tmp0 / (1+tmp2)**2
def _cdf(self, x, c):
limit = 1.0/c
tmp = np.asarray(1-c*x)
tmp2 = tmp**(limit)
return (1.0-tmp2) / (1+tmp2)
def _ppf(self, q, c):
return 1.0/c*(1-((1.0-q)/(1.0+q))**c)
def _entropy(self, c):
return 2 - (2*c+1)*np.log(2)
genhalflogistic = genhalflogistic_gen(a=0.0, name='genhalflogistic')
class gompertz_gen(rv_continuous):
r"""A Gompertz (or truncated Gumbel) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gompertz` is:
.. math::
f(x, c) = c \exp(x) \exp(-c (e^x-1))
for :math:`x \ge 0`, :math:`c > 0`.
`gompertz` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
# gompertz.pdf(x, c) = c * exp(x) * exp(-c*(exp(x)-1))
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
return np.log(c) + x - c * sc.expm1(x)
def _cdf(self, x, c):
return -sc.expm1(-c * sc.expm1(x))
def _ppf(self, q, c):
return sc.log1p(-1.0 / c * sc.log1p(-q))
def _entropy(self, c):
return 1.0 - np.log(c) - np.exp(c)*sc.expn(1, c)
gompertz = gompertz_gen(a=0.0, name='gompertz')
class gumbel_r_gen(rv_continuous):
r"""A right-skewed Gumbel continuous random variable.
%(before_notes)s
See Also
--------
gumbel_l, gompertz, genextreme
Notes
-----
The probability density function for `gumbel_r` is:
.. math::
f(x) = \exp(-(x + e^{-x}))
The Gumbel distribution is sometimes referred to as a type I Fisher-Tippett
distribution. It is also related to the extreme value distribution,
log-Weibull and Gompertz distributions.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
# gumbel_r.pdf(x) = exp(-(x + exp(-x)))
return np.exp(self._logpdf(x))
def _logpdf(self, x):
return -x - np.exp(-x)
def _cdf(self, x):
return np.exp(-np.exp(-x))
def _logcdf(self, x):
return -np.exp(-x)
def _ppf(self, q):
return -np.log(-np.log(q))
def _stats(self):
return _EULER, np.pi*np.pi/6.0, 12*np.sqrt(6)/np.pi**3 * _ZETA3, 12.0/5
def _entropy(self):
# https://en.wikipedia.org/wiki/Gumbel_distribution
return _EULER + 1.
gumbel_r = gumbel_r_gen(name='gumbel_r')
class gumbel_l_gen(rv_continuous):
r"""A left-skewed Gumbel continuous random variable.
%(before_notes)s
See Also
--------
gumbel_r, gompertz, genextreme
Notes
-----
The probability density function for `gumbel_l` is:
.. math::
f(x) = \exp(x - e^x)
The Gumbel distribution is sometimes referred to as a type I Fisher-Tippett
distribution. It is also related to the extreme value distribution,
log-Weibull and Gompertz distributions.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
# gumbel_l.pdf(x) = exp(x - exp(x))
return np.exp(self._logpdf(x))
def _logpdf(self, x):
return x - np.exp(x)
def _cdf(self, x):
return -sc.expm1(-np.exp(x))
def _ppf(self, q):
return np.log(-sc.log1p(-q))
def _logsf(self, x):
return -np.exp(x)
def _sf(self, x):
return np.exp(-np.exp(x))
def _isf(self, x):
return np.log(-np.log(x))
def _stats(self):
return -_EULER, np.pi*np.pi/6.0, \
-12*np.sqrt(6)/np.pi**3 * _ZETA3, 12.0/5
def _entropy(self):
return _EULER + 1.
gumbel_l = gumbel_l_gen(name='gumbel_l')
class halfcauchy_gen(rv_continuous):
r"""A Half-Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halfcauchy` is:
.. math::
f(x) = \frac{2}{\pi (1 + x^2)}
for :math:`x \ge 0`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
# halfcauchy.pdf(x) = 2 / (pi * (1 + x**2))
return 2.0/np.pi/(1.0+x*x)
def _logpdf(self, x):
return np.log(2.0/np.pi) - sc.log1p(x*x)
def _cdf(self, x):
return 2.0/np.pi*np.arctan(x)
def _ppf(self, q):
return np.tan(np.pi/2*q)
def _stats(self):
return np.inf, np.inf, np.nan, np.nan
def _entropy(self):
return np.log(2*np.pi)
halfcauchy = halfcauchy_gen(a=0.0, name='halfcauchy')
class halflogistic_gen(rv_continuous):
r"""A half-logistic continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halflogistic` is:
.. math::
f(x) = \frac{ 2 e^{-x} }{ (1+e^{-x})^2 }
= \frac{1}{2} \text{sech}(x/2)^2
for :math:`x \ge 0`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
# halflogistic.pdf(x) = 2 * exp(-x) / (1+exp(-x))**2
# = 1/2 * sech(x/2)**2
return np.exp(self._logpdf(x))
def _logpdf(self, x):
return np.log(2) - x - 2. * sc.log1p(np.exp(-x))
def _cdf(self, x):
return np.tanh(x/2.0)
def _ppf(self, q):
return 2*np.arctanh(q)
def _munp(self, n):
if n == 1:
return 2*np.log(2)
if n == 2:
return np.pi*np.pi/3.0
if n == 3:
return 9*_ZETA3
if n == 4:
return 7*np.pi**4 / 15.0
return 2*(1-pow(2.0, 1-n))*sc.gamma(n+1)*sc.zeta(n, 1)
def _entropy(self):
return 2-np.log(2)
halflogistic = halflogistic_gen(a=0.0, name='halflogistic')
class halfnorm_gen(rv_continuous):
r"""A half-normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halfnorm` is:
.. math::
f(x) = \sqrt{2/\pi} \exp(-x^2 / 2)
for :math:`x > 0`.
`halfnorm` is a special case of `chi` with ``df=1``.
%(after_notes)s
%(example)s
"""
def _rvs(self):
return abs(self._random_state.standard_normal(size=self._size))
def _pdf(self, x):
# halfnorm.pdf(x) = sqrt(2/pi) * exp(-x**2/2)
return np.sqrt(2.0/np.pi)*np.exp(-x*x/2.0)
def _logpdf(self, x):
return 0.5 * np.log(2.0/np.pi) - x*x/2.0
def _cdf(self, x):
return _norm_cdf(x)*2-1.0
def _ppf(self, q):
return sc.ndtri((1+q)/2.0)
def _stats(self):
return (np.sqrt(2.0/np.pi),
1-2.0/np.pi,
np.sqrt(2)*(4-np.pi)/(np.pi-2)**1.5,
8*(np.pi-3)/(np.pi-2)**2)
def _entropy(self):
return 0.5*np.log(np.pi/2.0)+0.5
halfnorm = halfnorm_gen(a=0.0, name='halfnorm')
class hypsecant_gen(rv_continuous):
r"""A hyperbolic secant continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `hypsecant` is:
.. math::
f(x) = \frac{1}{\pi} \text{sech}(x)
for a real number :math:`x`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
# hypsecant.pdf(x) = 1/pi * sech(x)
return 1.0/(np.pi*np.cosh(x))
def _cdf(self, x):
return 2.0/np.pi*np.arctan(np.exp(x))
def _ppf(self, q):
return np.log(np.tan(np.pi*q/2.0))
def _stats(self):
return 0, np.pi*np.pi/4, 0, 2
def _entropy(self):
return np.log(2*np.pi)
hypsecant = hypsecant_gen(name='hypsecant')
class gausshyper_gen(rv_continuous):
r"""A Gauss hypergeometric continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gausshyper` is:
.. math::
f(x, a, b, c, z) = C x^{a-1} (1-x)^{b-1} (1+zx)^{-c}
for :math:`0 \le x \le 1`, :math:`a > 0`, :math:`b > 0`, and
:math:`C = \frac{1}{B(a, b) F[2, 1](c, a; a+b; -z)}`.
:math:`F[2, 1]` is the Gauss hypergeometric function
`scipy.special.hyp2f1`.
`gausshyper` takes :math:`a`, :math:`b`, :math:`c` and :math:`z` as shape
parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, a, b, c, z):
return (a > 0) & (b > 0) & (c == c) & (z == z)
def _pdf(self, x, a, b, c, z):
# gausshyper.pdf(x, a, b, c, z) =
# C * x**(a-1) * (1-x)**(b-1) * (1+z*x)**(-c)
Cinv = sc.gamma(a)*sc.gamma(b)/sc.gamma(a+b)*sc.hyp2f1(c, a, a+b, -z)
return 1.0/Cinv * x**(a-1.0) * (1.0-x)**(b-1.0) / (1.0+z*x)**c
def _munp(self, n, a, b, c, z):
fac = sc.beta(n+a, b) / sc.beta(a, b)
num = sc.hyp2f1(c, a+n, a+b+n, -z)
den = sc.hyp2f1(c, a, a+b, -z)
return fac*num / den
gausshyper = gausshyper_gen(a=0.0, b=1.0, name='gausshyper')
class invgamma_gen(rv_continuous):
r"""An inverted gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `invgamma` is:
.. math::
f(x, a) = \frac{x^{-a-1}}{\Gamma(a)} \exp(-\frac{1}{x})
for :math:`x > 0`, :math:`a > 0`. :math:`\Gamma` is the gamma function
(`scipy.special.gamma`).
`invgamma` takes ``a`` as a shape parameter for :math:`a`.
`invgamma` is a special case of `gengamma` with ``c=-1``.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x, a):
# invgamma.pdf(x, a) = x**(-a-1) / gamma(a) * exp(-1/x)
return np.exp(self._logpdf(x, a))
def _logpdf(self, x, a):
return -(a+1) * np.log(x) - sc.gammaln(a) - 1.0/x
def _cdf(self, x, a):
return sc.gammaincc(a, 1.0 / x)
def _ppf(self, q, a):
return 1.0 / sc.gammainccinv(a, q)
def _sf(self, x, a):
return sc.gammainc(a, 1.0 / x)
def _isf(self, q, a):
return 1.0 / sc.gammaincinv(a, q)
def _stats(self, a, moments='mvsk'):
m1 = _lazywhere(a > 1, (a,), lambda x: 1. / (x - 1.), np.inf)
m2 = _lazywhere(a > 2, (a,), lambda x: 1. / (x - 1.)**2 / (x - 2.),
np.inf)
g1, g2 = None, None
if 's' in moments:
g1 = _lazywhere(
a > 3, (a,),
lambda x: 4. * np.sqrt(x - 2.) / (x - 3.), np.nan)
if 'k' in moments:
g2 = _lazywhere(
a > 4, (a,),
lambda x: 6. * (5. * x - 11.) / (x - 3.) / (x - 4.), np.nan)
return m1, m2, g1, g2
def _entropy(self, a):
return a - (a+1.0) * sc.psi(a) + sc.gammaln(a)
invgamma = invgamma_gen(a=0.0, name='invgamma')
# scale is gamma from DATAPLOT and B from Regress
class invgauss_gen(rv_continuous):
r"""An inverse Gaussian continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `invgauss` is:
.. math::
f(x, \mu) = \frac{1}{\sqrt{2 \pi x^3}}
\exp(-\frac{(x-\mu)^2}{2 x \mu^2})
for :math:`x > 0` and :math:`\mu > 0`.
`invgauss` takes ``mu`` as a shape parameter for :math:`\mu`.
%(after_notes)s
When :math:`\mu` is too small, evaluating the cumulative distribution
function will be inaccurate due to ``cdf(mu -> 0) = inf * 0``.
NaNs are returned for :math:`\mu \le 0.0028`.
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self, mu):
return self._random_state.wald(mu, 1.0, size=self._size)
def _pdf(self, x, mu):
# invgauss.pdf(x, mu) =
# 1 / sqrt(2*pi*x**3) * exp(-(x-mu)**2/(2*x*mu**2))
return 1.0/np.sqrt(2*np.pi*x**3.0)*np.exp(-1.0/(2*x)*((x-mu)/mu)**2)
def _logpdf(self, x, mu):
return -0.5*np.log(2*np.pi) - 1.5*np.log(x) - ((x-mu)/mu)**2/(2*x)
def _cdf(self, x, mu):
fac = np.sqrt(1.0/x)
# Numerical accuracy for small `mu` is bad. See #869.
C1 = _norm_cdf(fac*(x-mu)/mu)
C1 += np.exp(1.0/mu) * _norm_cdf(-fac*(x+mu)/mu) * np.exp(1.0/mu)
return C1
def _stats(self, mu):
return mu, mu**3.0, 3*np.sqrt(mu), 15*mu
invgauss = invgauss_gen(a=0.0, name='invgauss')
class norminvgauss_gen(rv_continuous):
r"""A Normal Inverse Gaussian continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `norminvgauss` is:
.. math::
f(x, a, b) = (a \exp(\sqrt{a^2 - b^2} + b x)) /
(\pi \sqrt{1 + x^2} \, K_1(a \sqrt{1 + x^2}))
where `x` is a real number, the parameter `a` is the tail heaviness
and `b` is the asymmetry parameter satisfying `a > 0` and `abs(b) <= a`.
:math:`K_1` is the modified Bessel function of second kind
(`scipy.special.k1`).
%(after_notes)s
A normal inverse Gaussian random variable `Y` with parameters `a` and `b`
can be expressed as a normal mean-variance mixture:
`Y = b * V + sqrt(V) * X` where `X` is `norm(0,1)` and `V` is
`invgauss(mu=1/sqrt(a**2 - b**2))`. This representation is used
to generate random variates.
References
----------
O. Barndorff-Nielsen, "Hyperbolic Distributions and Distributions on
Hyperbolae", Scandinavian Journal of Statistics, Vol. 5(3),
pp. 151-157, 1978.
O. Barndorff-Nielsen, "Normal Inverse Gaussian Distributions and Stochastic
Volatility Modelling", Scandinavian Journal of Statistics, Vol. 24,
pp. 1–13, 1997.
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _argcheck(self, a, b):
return (a > 0) & (np.absolute(b) < a)
def _pdf(self, x, a, b):
gamma = np.sqrt(a**2 - b**2)
fac1 = a / np.pi * np.exp(gamma)
sq = np.hypot(1, x) # reduce overflows
return fac1 * sc.k1e(a * sq) * np.exp(b*x - a*sq) / sq
def _rvs(self, a, b):
# note: X = b * V + sqrt(V) * X is norminvgaus(a,b) if X is standard
# normal and V is invgauss(mu=1/sqrt(a**2 - b**2))
gamma = np.sqrt(a**2 - b**2)
sz, rndm = self._size, self._random_state
ig = invgauss.rvs(mu=1/gamma, size=sz, random_state=rndm)
return b * ig + np.sqrt(ig) * norm.rvs(size=sz, random_state=rndm)
def _stats(self, a, b):
gamma = np.sqrt(a**2 - b**2)
mean = b / gamma
variance = a**2 / gamma**3
skewness = 3.0 * b / (a * np.sqrt(gamma))
kurtosis = 3.0 * (1 + 4 * b**2 / a**2) / gamma
return mean, variance, skewness, kurtosis
norminvgauss = norminvgauss_gen(name="norminvgauss")
class invweibull_gen(rv_continuous):
r"""An inverted Weibull continuous random variable.
This distribution is also known as the Fréchet distribution or the
type II extreme value distribution.
%(before_notes)s
Notes
-----
The probability density function for `invweibull` is:
.. math::
f(x, c) = c x^{-c-1} \exp(-x^{-c})
for :math:`x > 0`, :math:`c > 0`.
`invweibull` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
References
----------
F.R.S. de Gusmao, E.M.M Ortega and G.M. Cordeiro, "The generalized inverse
Weibull distribution", Stat. Papers, vol. 52, pp. 591-619, 2011.
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x, c):
# invweibull.pdf(x, c) = c * x**(-c-1) * exp(-x**(-c))
xc1 = np.power(x, -c - 1.0)
xc2 = np.power(x, -c)
xc2 = np.exp(-xc2)
return c * xc1 * xc2
def _cdf(self, x, c):
xc1 = np.power(x, -c)
return np.exp(-xc1)
def _ppf(self, q, c):
return np.power(-np.log(q), -1.0/c)
def _munp(self, n, c):
return sc.gamma(1 - n / c)
def _entropy(self, c):
return 1+_EULER + _EULER / c - np.log(c)
invweibull = invweibull_gen(a=0, name='invweibull')
class johnsonsb_gen(rv_continuous):
r"""A Johnson SB continuous random variable.
%(before_notes)s
See Also
--------
johnsonsu
Notes
-----
The probability density function for `johnsonsb` is:
.. math::
f(x, a, b) = \frac{b}{x(1-x)} \phi(a + b \log \frac{x}{1-x} )
for :math:`0 < x < 1` and :math:`a, b > 0`, and :math:`\phi` is the normal
pdf.
`johnsonsb` takes :math:`a` and :math:`b` as shape parameters.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _argcheck(self, a, b):
return (b > 0) & (a == a)
def _pdf(self, x, a, b):
# johnsonsb.pdf(x, a, b) = b / (x*(1-x)) * phi(a + b * log(x/(1-x)))
trm = _norm_pdf(a + b*np.log(x/(1.0-x)))
return b*1.0/(x*(1-x))*trm
def _cdf(self, x, a, b):
return _norm_cdf(a + b*np.log(x/(1.0-x)))
def _ppf(self, q, a, b):
return 1.0 / (1 + np.exp(-1.0 / b * (_norm_ppf(q) - a)))
johnsonsb = johnsonsb_gen(a=0.0, b=1.0, name='johnsonsb')
class johnsonsu_gen(rv_continuous):
r"""A Johnson SU continuous random variable.
%(before_notes)s
See Also
--------
johnsonsb
Notes
-----
The probability density function for `johnsonsu` is:
.. math::
f(x, a, b) = \frac{b}{\sqrt{x^2 + 1}}
\phi(a + b \log(x + \sqrt{x^2 + 1}))
for all :math:`x, a, b > 0`, and :math:`\phi` is the normal pdf.
`johnsonsu` takes :math:`a` and :math:`b` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, a, b):
return (b > 0) & (a == a)
def _pdf(self, x, a, b):
# johnsonsu.pdf(x, a, b) = b / sqrt(x**2 + 1) *
# phi(a + b * log(x + sqrt(x**2 + 1)))
x2 = x*x
trm = _norm_pdf(a + b * np.log(x + np.sqrt(x2+1)))
return b*1.0/np.sqrt(x2+1.0)*trm
def _cdf(self, x, a, b):
return _norm_cdf(a + b * np.log(x + np.sqrt(x*x + 1)))
def _ppf(self, q, a, b):
return np.sinh((_norm_ppf(q) - a) / b)
johnsonsu = johnsonsu_gen(name='johnsonsu')
class laplace_gen(rv_continuous):
r"""A Laplace continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `laplace` is
.. math::
f(x) = \frac{1}{2} \exp(-|x|)
for a real number :math:`x`.
%(after_notes)s
%(example)s
"""
def _rvs(self):
return self._random_state.laplace(0, 1, size=self._size)
def _pdf(self, x):
# laplace.pdf(x) = 1/2 * exp(-abs(x))
return 0.5*np.exp(-abs(x))
def _cdf(self, x):
return np.where(x > 0, 1.0-0.5*np.exp(-x), 0.5*np.exp(x))
def _ppf(self, q):
return np.where(q > 0.5, -np.log(2*(1-q)), np.log(2*q))
def _stats(self):
return 0, 2, 0, 3
def _entropy(self):
return np.log(2)+1
laplace = laplace_gen(name='laplace')
class levy_gen(rv_continuous):
r"""A Levy continuous random variable.
%(before_notes)s
See Also
--------
levy_stable, levy_l
Notes
-----
The probability density function for `levy` is:
.. math::
f(x) = \frac{1}{\sqrt{2\pi x^3}} \exp\left(-\frac{1}{2x}\right)
for :math:`x > 0`.
This is the same as the Levy-stable distribution with :math:`a=1/2` and
:math:`b=1`.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x):
# levy.pdf(x) = 1 / (x * sqrt(2*pi*x)) * exp(-1/(2*x))
return 1 / np.sqrt(2*np.pi*x) / x * np.exp(-1/(2*x))
def _cdf(self, x):
# Equivalent to 2*norm.sf(np.sqrt(1/x))
return sc.erfc(np.sqrt(0.5 / x))
def _ppf(self, q):
# Equivalent to 1.0/(norm.isf(q/2)**2) or 0.5/(erfcinv(q)**2)
val = -sc.ndtri(q/2)
return 1.0 / (val * val)
def _stats(self):
return np.inf, np.inf, np.nan, np.nan
levy = levy_gen(a=0.0, name="levy")
class levy_l_gen(rv_continuous):
r"""A left-skewed Levy continuous random variable.
%(before_notes)s
See Also
--------
levy, levy_stable
Notes
-----
The probability density function for `levy_l` is:
.. math::
f(x) = \frac{1}{|x| \sqrt{2\pi |x|}} \exp{ \left(-\frac{1}{2|x|} \right)}
for :math:`x < 0`.
This is the same as the Levy-stable distribution with :math:`a=1/2` and
:math:`b=-1`.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x):
# levy_l.pdf(x) = 1 / (abs(x) * sqrt(2*pi*abs(x))) * exp(-1/(2*abs(x)))
ax = abs(x)
return 1/np.sqrt(2*np.pi*ax)/ax*np.exp(-1/(2*ax))
def _cdf(self, x):
ax = abs(x)
return 2 * _norm_cdf(1 / np.sqrt(ax)) - 1
def _ppf(self, q):
val = _norm_ppf((q + 1.0) / 2)
return -1.0 / (val * val)
def _stats(self):
return np.inf, np.inf, np.nan, np.nan
levy_l = levy_l_gen(b=0.0, name="levy_l")
class levy_stable_gen(rv_continuous):
r"""A Levy-stable continuous random variable.
%(before_notes)s
See Also
--------
levy, levy_l
Notes
-----
The distribution for `levy_stable` has characteristic function:
.. math::
\varphi(t, \alpha, \beta, c, \mu) =
e^{it\mu -|ct|^{\alpha}(1-i\beta \operatorname{sign}(t)\Phi(\alpha, t))}
where:
.. math::
\Phi = \begin{cases}
\tan \left({\frac {\pi \alpha }{2}}\right)&\alpha \neq 1\\
-{\frac {2}{\pi }}\log |t|&\alpha =1
\end{cases}
The probability density function for `levy_stable` is:
.. math::
f(x) = \frac{1}{2\pi}\int_{-\infty}^\infty \varphi(t)e^{-ixt}\,dt
where :math:`-\infty < t < \infty`. This integral does not have a known closed form.
For evaluation of pdf we use either Zolotarev :math:`S_0` parameterization with integration,
direct integration of standard parameterization of characteristic function or FFT of
characteristic function. If set to other than None and if number of points is greater than
``levy_stable.pdf_fft_min_points_threshold`` (defaults to None) we use FFT otherwise we use one
of the other methods.
The default method is 'best' which uses Zolotarev's method if alpha = 1 and integration of
characteristic function otherwise. The default method can be changed by setting
``levy_stable.pdf_default_method`` to either 'zolotarev', 'quadrature' or 'best'.
To increase accuracy of FFT calculation one can specify ``levy_stable.pdf_fft_grid_spacing``
(defaults to 0.001) and ``pdf_fft_n_points_two_power`` (defaults to a value that covers the
input range * 4). Setting ``pdf_fft_n_points_two_power`` to 16 should be sufficiently accurate
in most cases at the expense of CPU time.
For evaluation of cdf we use Zolatarev :math:`S_0` parameterization with integration or integral of
the pdf FFT interpolated spline. The settings affecting FFT calculation are the same as
for pdf calculation. Setting the threshold to ``None`` (default) will disable FFT. For cdf
calculations the Zolatarev method is superior in accuracy, so FFT is disabled by default.
Fitting estimate uses quantile estimation method in [MC]. MLE estimation of parameters in
fit method uses this quantile estimate initially. Note that MLE doesn't always converge if
using FFT for pdf calculations; so it's best that ``pdf_fft_min_points_threshold`` is left unset.
.. warning::
For pdf calculations implementation of Zolatarev is unstable for values where alpha = 1 and
beta != 0. In this case the quadrature method is recommended. FFT calculation is also
considered experimental.
For cdf calculations FFT calculation is considered experimental. Use Zolatarev's method
instead (default).
%(after_notes)s
References
----------
.. [MC] McCulloch, J., 1986. Simple consistent estimators of stable distribution parameters.
Communications in Statistics - Simulation and Computation 15, 11091136.
.. [MS] Mittnik, S.T. Rachev, T. Doganoglu, D. Chenyao, 1999. Maximum likelihood estimation
of stable Paretian models, Mathematical and Computer Modelling, Volume 29, Issue 10,
1999, Pages 275-293.
.. [BS] Borak, S., Hardle, W., Rafal, W. 2005. Stable distributions, Economic Risk.
%(example)s
"""
def _rvs(self, alpha, beta):
def alpha1func(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W):
return (2/np.pi*(np.pi/2 + bTH)*tanTH -
beta*np.log((np.pi/2*W*cosTH)/(np.pi/2 + bTH)))
def beta0func(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W):
return (W/(cosTH/np.tan(aTH) + np.sin(TH)) *
((np.cos(aTH) + np.sin(aTH)*tanTH)/W)**(1.0/alpha))
def otherwise(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W):
# alpha is not 1 and beta is not 0
val0 = beta*np.tan(np.pi*alpha/2)
th0 = np.arctan(val0)/alpha
val3 = W/(cosTH/np.tan(alpha*(th0 + TH)) + np.sin(TH))
res3 = val3*((np.cos(aTH) + np.sin(aTH)*tanTH -
val0*(np.sin(aTH) - np.cos(aTH)*tanTH))/W)**(1.0/alpha)
return res3
def alphanot1func(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W):
res = _lazywhere(beta == 0,
(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W),
beta0func, f2=otherwise)
return res
sz = self._size
alpha = broadcast_to(alpha, sz)
beta = broadcast_to(beta, sz)
TH = uniform.rvs(loc=-np.pi/2.0, scale=np.pi, size=sz,
random_state=self._random_state)
W = expon.rvs(size=sz, random_state=self._random_state)
aTH = alpha*TH
bTH = beta*TH
cosTH = np.cos(TH)
tanTH = np.tan(TH)
res = _lazywhere(alpha == 1,
(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W),
alpha1func, f2=alphanot1func)
return res
def _argcheck(self, alpha, beta):
return (alpha > 0) & (alpha <= 2) & (beta <= 1) & (beta >= -1)
@staticmethod
def _cf(t, alpha, beta):
Phi = lambda alpha, t: np.tan(np.pi*alpha/2) if alpha != 1 else -2.0*np.log(np.abs(t))/np.pi
return np.exp(-(np.abs(t)**alpha)*(1-1j*beta*np.sign(t)*Phi(alpha, t)))
@staticmethod
def _pdf_from_cf_with_fft(cf, h=0.01, q=9):
"""Calculates pdf from cf using fft. Using region around 0 with N=2**q points
separated by distance h. As suggested by [MS].
"""
N = 2**q
n = np.arange(1,N+1)
density = ((-1)**(n-1-N/2))*np.fft.fft(((-1)**(n-1))*cf(2*np.pi*(n-1-N/2)/h/N))/h/N
x = (n-1-N/2)*h
return (x, density)
@staticmethod
def _pdf_single_value_best(x, alpha, beta):
if alpha != 1. or (alpha == 1. and beta == 0.):
return levy_stable_gen._pdf_single_value_zolotarev(x, alpha, beta)
else:
return levy_stable_gen._pdf_single_value_cf_integrate(x, alpha, beta)
@staticmethod
def _pdf_single_value_cf_integrate(x, alpha, beta):
cf = lambda t: levy_stable_gen._cf(t, alpha, beta)
return integrate.quad(lambda t: np.real(np.exp(-1j*t*x)*cf(t)), -np.inf, np.inf, limit=1000)[0]/np.pi/2
@staticmethod
def _pdf_single_value_zolotarev(x, alpha, beta):
"""Calculate pdf using Zolotarev's methods as detailed in [BS].
"""
zeta = -beta*np.tan(np.pi*alpha/2.)
if alpha != 1:
x0 = x + zeta # convert to S_0 parameterization
xi = np.arctan(-zeta)/alpha
def V(theta):
return np.cos(alpha*xi)**(1/(alpha-1)) * \
(np.cos(theta)/np.sin(alpha*(xi+theta)))**(alpha/(alpha-1)) * \
(np.cos(alpha*xi+(alpha-1)*theta)/np.cos(theta))
if x0 > zeta:
def g(theta):
return V(theta)*np.real(np.complex(x0-zeta)**(alpha/(alpha-1)))
def f(theta):
return g(theta) * np.exp(-g(theta))
# spare calculating integral on null set
# use isclose as macos has fp differences
if np.isclose(-xi, np.pi/2, rtol=1e-014, atol=1e-014):
return 0.
with np.errstate(all="ignore"):
intg_max = optimize.minimize_scalar(lambda theta: -f(theta), bounds=[-xi, np.pi/2])
intg_kwargs = {}
# windows quadpack less forgiving with points out of bounds
if intg_max.success and not np.isnan(intg_max.fun)\
and intg_max.x > -xi and intg_max.x < np.pi/2:
intg_kwargs["points"] = [intg_max.x]
intg = integrate.quad(f, -xi, np.pi/2, **intg_kwargs)[0]
return alpha * intg / np.pi / np.abs(alpha-1) / (x0-zeta)
elif x0 == zeta:
return sc.gamma(1+1/alpha)*np.cos(xi)/np.pi/((1+zeta**2)**(1/alpha/2))
else:
return levy_stable_gen._pdf_single_value_zolotarev(-x, alpha, -beta)
else:
# since location zero, no need to reposition x for S_0 parameterization
xi = np.pi/2
if beta != 0:
warnings.warn('Density calculation unstable for alpha=1 and beta!=0.' +
' Use quadrature method instead.', RuntimeWarning)
def V(theta):
expr_1 = np.pi/2+beta*theta
return 2. * expr_1 * np.exp(expr_1*np.tan(theta)/beta) / np.cos(theta) / np.pi
def g(theta):
return np.exp(-np.pi * x / 2. / beta) * V(theta)
def f(theta):
return g(theta) * np.exp(-g(theta))
with np.errstate(all="ignore"):
intg_max = optimize.minimize_scalar(lambda theta: -f(theta), bounds=[-np.pi/2, np.pi/2])
intg = integrate.fixed_quad(f, -np.pi/2, intg_max.x)[0] + integrate.fixed_quad(f, intg_max.x, np.pi/2)[0]
return intg / np.abs(beta) / 2.
else:
return 1/(1+x**2)/np.pi
@staticmethod
def _cdf_single_value_zolotarev(x, alpha, beta):
"""Calculate cdf using Zolotarev's methods as detailed in [BS].
"""
zeta = -beta*np.tan(np.pi*alpha/2.)
if alpha != 1:
x0 = x + zeta # convert to S_0 parameterization
xi = np.arctan(-zeta)/alpha
def V(theta):
return np.cos(alpha*xi)**(1/(alpha-1)) * \
(np.cos(theta)/np.sin(alpha*(xi+theta)))**(alpha/(alpha-1)) * \
(np.cos(alpha*xi+(alpha-1)*theta)/np.cos(theta))
if x0 > zeta:
c_1 = 1 if alpha > 1 else .5 - xi/np.pi
def f(theta):
return np.exp(-V(theta)*np.real(np.complex(x0-zeta)**(alpha/(alpha-1))))
with np.errstate(all="ignore"):
# spare calculating integral on null set
# use isclose as macos has fp differences
if np.isclose(-xi, np.pi/2, rtol=1e-014, atol=1e-014):
intg = 0
else:
intg = integrate.quad(f, -xi, np.pi/2)[0]
return c_1 + np.sign(1-alpha) * intg / np.pi
elif x0 == zeta:
return .5 - xi/np.pi
else:
return 1 - levy_stable_gen._cdf_single_value_zolotarev(-x, alpha, -beta)
else:
# since location zero, no need to reposition x for S_0 parameterization
xi = np.pi/2
if beta > 0:
def V(theta):
expr_1 = np.pi/2+beta*theta
return 2. * expr_1 * np.exp(expr_1*np.tan(theta)/beta) / np.cos(theta) / np.pi
with np.errstate(all="ignore"):
expr_1 = np.exp(-np.pi*x/beta/2.)
int_1 = integrate.quad(lambda theta: np.exp(-expr_1 * V(theta)), -np.pi/2, np.pi/2)[0]
return int_1 / np.pi
elif beta == 0:
return .5 + np.arctan(x)/np.pi
else:
return 1 - levy_stable_gen._cdf_single_value_zolotarev(-x, 1, -beta)
def _pdf(self, x, alpha, beta):
x = np.asarray(x).reshape(1, -1)[0,:]
x, alpha, beta = np.broadcast_arrays(x, alpha, beta)
data_in = np.dstack((x, alpha, beta))[0]
data_out = np.empty(shape=(len(data_in),1))
pdf_default_method_name = getattr(self, 'pdf_default_method', 'best')
if pdf_default_method_name == 'best':
pdf_single_value_method = levy_stable_gen._pdf_single_value_best
elif pdf_default_method_name == 'zolotarev':
pdf_single_value_method = levy_stable_gen._pdf_single_value_zolotarev
else:
pdf_single_value_method = levy_stable_gen._pdf_single_value_cf_integrate
fft_min_points_threshold = getattr(self, 'pdf_fft_min_points_threshold', None)
fft_grid_spacing = getattr(self, 'pdf_fft_grid_spacing', 0.001)
fft_n_points_two_power = getattr(self, 'pdf_fft_n_points_two_power', None)
# group data in unique arrays of alpha, beta pairs
uniq_param_pairs = np.vstack({tuple(row) for row in data_in[:,1:]})
for pair in uniq_param_pairs:
data_mask = np.all(data_in[:,1:] == pair, axis=-1)
data_subset = data_in[data_mask]
if fft_min_points_threshold is None or len(data_subset) < fft_min_points_threshold:
data_out[data_mask] = np.array([pdf_single_value_method(_x, _alpha, _beta)
for _x, _alpha, _beta in data_subset]).reshape(len(data_subset), 1)
else:
warnings.warn('Density calculations experimental for FFT method.' +
' Use combination of zolatarev and quadrature methods instead.', RuntimeWarning)
_alpha, _beta = pair
_x = data_subset[:,(0,)]
# need enough points to "cover" _x for interpolation
h = fft_grid_spacing
q = np.ceil(np.log(2*np.max(np.abs(_x))/h)/np.log(2)) + 2 if fft_n_points_two_power is None else int(fft_n_points_two_power)
density_x, density = levy_stable_gen._pdf_from_cf_with_fft(lambda t: levy_stable_gen._cf(t, _alpha, _beta), h=h, q=q)
f = interpolate.interp1d(density_x, np.real(density))
data_out[data_mask] = f(_x)
return data_out.T[0]
def _cdf(self, x, alpha, beta):
x = np.asarray(x).reshape(1, -1)[0,:]
x, alpha, beta = np.broadcast_arrays(x, alpha, beta)
data_in = np.dstack((x, alpha, beta))[0]
data_out = np.empty(shape=(len(data_in),1))
fft_min_points_threshold = getattr(self, 'pdf_fft_min_points_threshold', None)
fft_grid_spacing = getattr(self, 'pdf_fft_grid_spacing', 0.001)
fft_n_points_two_power = getattr(self, 'pdf_fft_n_points_two_power', None)
# group data in unique arrays of alpha, beta pairs
uniq_param_pairs = np.vstack({tuple(row) for row in data_in[:,1:]})
for pair in uniq_param_pairs:
data_mask = np.all(data_in[:,1:] == pair, axis=-1)
data_subset = data_in[data_mask]
if fft_min_points_threshold is None or len(data_subset) < fft_min_points_threshold:
data_out[data_mask] = np.array([levy_stable._cdf_single_value_zolotarev(_x, _alpha, _beta)
for _x, _alpha, _beta in data_subset]).reshape(len(data_subset), 1)
else:
warnings.warn('Cumulative density calculations experimental for FFT method.' +
' Use zolatarev method instead.', RuntimeWarning)
_alpha, _beta = pair
_x = data_subset[:,(0,)]
# need enough points to "cover" _x for interpolation
h = fft_grid_spacing
q = 16 if fft_n_points_two_power is None else int(fft_n_points_two_power)
density_x, density = levy_stable_gen._pdf_from_cf_with_fft(lambda t: levy_stable_gen._cf(t, _alpha, _beta), h=h, q=q)
f = interpolate.InterpolatedUnivariateSpline(density_x, np.real(density))
data_out[data_mask] = np.array([f.integral(self.a, x_1) for x_1 in _x]).reshape(data_out[data_mask].shape)
return data_out.T[0]
def _fitstart(self, data):
# We follow McCullock 1986 method - Simple Consistent Estimators
# of Stable Distribution Parameters
# Table III and IV
nu_alpha_range = [2.439, 2.5, 2.6, 2.7, 2.8, 3, 3.2, 3.5, 4, 5, 6, 8, 10, 15, 25]
nu_beta_range = [0, 0.1, 0.2, 0.3, 0.5, 0.7, 1]
# table III - alpha = psi_1(nu_alpha, nu_beta)
alpha_table = [
[2.000, 2.000, 2.000, 2.000, 2.000, 2.000, 2.000],
[1.916, 1.924, 1.924, 1.924, 1.924, 1.924, 1.924],
[1.808, 1.813, 1.829, 1.829, 1.829, 1.829, 1.829],
[1.729, 1.730, 1.737, 1.745, 1.745, 1.745, 1.745],
[1.664, 1.663, 1.663, 1.668, 1.676, 1.676, 1.676],
[1.563, 1.560, 1.553, 1.548, 1.547, 1.547, 1.547],
[1.484, 1.480, 1.471, 1.460, 1.448, 1.438, 1.438],
[1.391, 1.386, 1.378, 1.364, 1.337, 1.318, 1.318],
[1.279, 1.273, 1.266, 1.250, 1.210, 1.184, 1.150],
[1.128, 1.121, 1.114, 1.101, 1.067, 1.027, 0.973],
[1.029, 1.021, 1.014, 1.004, 0.974, 0.935, 0.874],
[0.896, 0.892, 0.884, 0.883, 0.855, 0.823, 0.769],
[0.818, 0.812, 0.806, 0.801, 0.780, 0.756, 0.691],
[0.698, 0.695, 0.692, 0.689, 0.676, 0.656, 0.597],
[0.593, 0.590, 0.588, 0.586, 0.579, 0.563, 0.513]]
# table IV - beta = psi_2(nu_alpha, nu_beta)
beta_table = [
[0, 2.160, 1.000, 1.000, 1.000, 1.000, 1.000],
[0, 1.592, 3.390, 1.000, 1.000, 1.000, 1.000],
[0, 0.759, 1.800, 1.000, 1.000, 1.000, 1.000],
[0, 0.482, 1.048, 1.694, 1.000, 1.000, 1.000],
[0, 0.360, 0.760, 1.232, 2.229, 1.000, 1.000],
[0, 0.253, 0.518, 0.823, 1.575, 1.000, 1.000],
[0, 0.203, 0.410, 0.632, 1.244, 1.906, 1.000],
[0, 0.165, 0.332, 0.499, 0.943, 1.560, 1.000],
[0, 0.136, 0.271, 0.404, 0.689, 1.230, 2.195],
[0, 0.109, 0.216, 0.323, 0.539, 0.827, 1.917],
[0, 0.096, 0.190, 0.284, 0.472, 0.693, 1.759],
[0, 0.082, 0.163, 0.243, 0.412, 0.601, 1.596],
[0, 0.074, 0.147, 0.220, 0.377, 0.546, 1.482],
[0, 0.064, 0.128, 0.191, 0.330, 0.478, 1.362],
[0, 0.056, 0.112, 0.167, 0.285, 0.428, 1.274]]
# Table V and VII
alpha_range = [2, 1.9, 1.8, 1.7, 1.6, 1.5, 1.4, 1.3, 1.2, 1.1, 1, 0.9, 0.8, 0.7, 0.6, 0.5]
beta_range = [0, 0.25, 0.5, 0.75, 1]
# Table V - nu_c = psi_3(alpha, beta)
nu_c_table = [
[1.908, 1.908, 1.908, 1.908, 1.908],
[1.914, 1.915, 1.916, 1.918, 1.921],
[1.921, 1.922, 1.927, 1.936, 1.947],
[1.927, 1.930, 1.943, 1.961, 1.987],
[1.933, 1.940, 1.962, 1.997, 2.043],
[1.939, 1.952, 1.988, 2.045, 2.116],
[1.946, 1.967, 2.022, 2.106, 2.211],
[1.955, 1.984, 2.067, 2.188, 2.333],
[1.965, 2.007, 2.125, 2.294, 2.491],
[1.980, 2.040, 2.205, 2.435, 2.696],
[2.000, 2.085, 2.311, 2.624, 2.973],
[2.040, 2.149, 2.461, 2.886, 3.356],
[2.098, 2.244, 2.676, 3.265, 3.912],
[2.189, 2.392, 3.004, 3.844, 4.775],
[2.337, 2.634, 3.542, 4.808, 6.247],
[2.588, 3.073, 4.534, 6.636, 9.144]]
# Table VII - nu_zeta = psi_5(alpha, beta)
nu_zeta_table = [
[0, 0.000, 0.000, 0.000, 0.000],
[0, -0.017, -0.032, -0.049, -0.064],
[0, -0.030, -0.061, -0.092, -0.123],
[0, -0.043, -0.088, -0.132, -0.179],
[0, -0.056, -0.111, -0.170, -0.232],
[0, -0.066, -0.134, -0.206, -0.283],
[0, -0.075, -0.154, -0.241, -0.335],
[0, -0.084, -0.173, -0.276, -0.390],
[0, -0.090, -0.192, -0.310, -0.447],
[0, -0.095, -0.208, -0.346, -0.508],
[0, -0.098, -0.223, -0.380, -0.576],
[0, -0.099, -0.237, -0.424, -0.652],
[0, -0.096, -0.250, -0.469, -0.742],
[0, -0.089, -0.262, -0.520, -0.853],
[0, -0.078, -0.272, -0.581, -0.997],
[0, -0.061, -0.279, -0.659, -1.198]]
psi_1 = interpolate.interp2d(nu_beta_range, nu_alpha_range, alpha_table, kind='linear')
psi_2 = interpolate.interp2d(nu_beta_range, nu_alpha_range, beta_table, kind='linear')
psi_2_1 = lambda nu_beta, nu_alpha: psi_2(nu_beta, nu_alpha) if nu_beta > 0 else -psi_2(-nu_beta, nu_alpha)
phi_3 = interpolate.interp2d(beta_range, alpha_range, nu_c_table, kind='linear')
phi_3_1 = lambda beta, alpha: phi_3(beta, alpha) if beta > 0 else phi_3(-beta, alpha)
phi_5 = interpolate.interp2d(beta_range, alpha_range, nu_zeta_table, kind='linear')
phi_5_1 = lambda beta, alpha: phi_5(beta, alpha) if beta > 0 else -phi_5(-beta, alpha)
# quantiles
p05 = np.percentile(data, 5)
p50 = np.percentile(data, 50)
p95 = np.percentile(data, 95)
p25 = np.percentile(data, 25)
p75 = np.percentile(data, 75)
nu_alpha = (p95 - p05)/(p75 - p25)
nu_beta = (p95 + p05 - 2*p50)/(p95 - p05)
if nu_alpha >= 2.439:
alpha = np.clip(psi_1(nu_beta, nu_alpha)[0], np.finfo(float).eps, 2.)
beta = np.clip(psi_2_1(nu_beta, nu_alpha)[0], -1., 1.)
else:
alpha = 2.0
beta = np.sign(nu_beta)
c = (p75 - p25) / phi_3_1(beta, alpha)[0]
zeta = p50 + c*phi_5_1(beta, alpha)[0]
delta = np.clip(zeta-beta*c*np.tan(np.pi*alpha/2.) if alpha == 1. else zeta, np.finfo(float).eps, np.inf)
return (alpha, beta, delta, c)
def _stats(self, alpha, beta):
mu = 0 if alpha > 1 else np.nan
mu2 = 2 if alpha == 2 else np.inf
g1 = 0. if alpha == 2. else np.NaN
g2 = 0. if alpha == 2. else np.NaN
return mu, mu2, g1, g2
levy_stable = levy_stable_gen(name='levy_stable')
class logistic_gen(rv_continuous):
r"""A logistic (or Sech-squared) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `logistic` is:
.. math::
f(x) = \frac{\exp(-x)}
{(1+\exp(-x))^2}
`logistic` is a special case of `genlogistic` with ``c=1``.
%(after_notes)s
%(example)s
"""
def _rvs(self):
return self._random_state.logistic(size=self._size)
def _pdf(self, x):
# logistic.pdf(x) = exp(-x) / (1+exp(-x))**2
return np.exp(self._logpdf(x))
def _logpdf(self, x):
return -x - 2. * sc.log1p(np.exp(-x))
def _cdf(self, x):
return sc.expit(x)
def _ppf(self, q):
return sc.logit(q)
def _sf(self, x):
return sc.expit(-x)
def _isf(self, q):
return -sc.logit(q)
def _stats(self):
return 0, np.pi*np.pi/3.0, 0, 6.0/5.0
def _entropy(self):
# https://en.wikipedia.org/wiki/Logistic_distribution
return 2.0
logistic = logistic_gen(name='logistic')
class loggamma_gen(rv_continuous):
r"""A log gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `loggamma` is:
.. math::
f(x, c) = \frac{\exp(c x - \exp(x))}
{\Gamma(c)}
for all :math:`x, c > 0`. Here, :math:`\Gamma` is the
gamma function (`scipy.special.gamma`).
`loggamma` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _rvs(self, c):
return np.log(self._random_state.gamma(c, size=self._size))
def _pdf(self, x, c):
# loggamma.pdf(x, c) = exp(c*x-exp(x)) / gamma(c)
return np.exp(c*x-np.exp(x)-sc.gammaln(c))
def _cdf(self, x, c):
return sc.gammainc(c, np.exp(x))
def _ppf(self, q, c):
return np.log(sc.gammaincinv(c, q))
def _stats(self, c):
# See, for example, "A Statistical Study of Log-Gamma Distribution", by
# Ping Shing Chan (thesis, McMaster University, 1993).
mean = sc.digamma(c)
var = sc.polygamma(1, c)
skewness = sc.polygamma(2, c) / np.power(var, 1.5)
excess_kurtosis = sc.polygamma(3, c) / (var*var)
return mean, var, skewness, excess_kurtosis
loggamma = loggamma_gen(name='loggamma')
class loglaplace_gen(rv_continuous):
r"""A log-Laplace continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `loglaplace` is:
.. math::
f(x, c) = \begin{cases}\frac{c}{2} x^{ c-1} &\text{for } 0 < x < 1\\
\frac{c}{2} x^{-c-1} &\text{for } x \ge 1
\end{cases}
for :math:`c > 0`.
`loglaplace` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
References
----------
T.J. Kozubowski and K. Podgorski, "A log-Laplace growth rate model",
The Mathematical Scientist, vol. 28, pp. 49-60, 2003.
%(example)s
"""
def _pdf(self, x, c):
# loglaplace.pdf(x, c) = c / 2 * x**(c-1), for 0 < x < 1
# = c / 2 * x**(-c-1), for x >= 1
cd2 = c/2.0
c = np.where(x < 1, c, -c)
return cd2*x**(c-1)
def _cdf(self, x, c):
return np.where(x < 1, 0.5*x**c, 1-0.5*x**(-c))
def _ppf(self, q, c):
return np.where(q < 0.5, (2.0*q)**(1.0/c), (2*(1.0-q))**(-1.0/c))
def _munp(self, n, c):
return c**2 / (c**2 - n**2)
def _entropy(self, c):
return np.log(2.0/c) + 1.0
loglaplace = loglaplace_gen(a=0.0, name='loglaplace')
def _lognorm_logpdf(x, s):
return _lazywhere(x != 0, (x, s),
lambda x, s: -np.log(x)**2 / (2*s**2) - np.log(s*x*np.sqrt(2*np.pi)),
-np.inf)
class lognorm_gen(rv_continuous):
r"""A lognormal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `lognorm` is:
.. math::
f(x, s) = \frac{1}{s x \sqrt{2\pi}}
\exp\left(-\frac{\log^2(x)}{2s^2}\right)
for :math:`x > 0`, :math:`s > 0`.
`lognorm` takes ``s`` as a shape parameter for :math:`s`.
%(after_notes)s
A common parametrization for a lognormal random variable ``Y`` is in
terms of the mean, ``mu``, and standard deviation, ``sigma``, of the
unique normally distributed random variable ``X`` such that exp(X) = Y.
This parametrization corresponds to setting ``s = sigma`` and ``scale =
exp(mu)``.
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self, s):
return np.exp(s * self._random_state.standard_normal(self._size))
def _pdf(self, x, s):
# lognorm.pdf(x, s) = 1 / (s*x*sqrt(2*pi)) * exp(-1/2*(log(x)/s)**2)
return np.exp(self._logpdf(x, s))
def _logpdf(self, x, s):
return _lognorm_logpdf(x, s)
def _cdf(self, x, s):
return _norm_cdf(np.log(x) / s)
def _logcdf(self, x, s):
return _norm_logcdf(np.log(x) / s)
def _ppf(self, q, s):
return np.exp(s * _norm_ppf(q))
def _sf(self, x, s):
return _norm_sf(np.log(x) / s)
def _logsf(self, x, s):
return _norm_logsf(np.log(x) / s)
def _stats(self, s):
p = np.exp(s*s)
mu = np.sqrt(p)
mu2 = p*(p-1)
g1 = np.sqrt((p-1))*(2+p)
g2 = np.polyval([1, 2, 3, 0, -6.0], p)
return mu, mu2, g1, g2
def _entropy(self, s):
return 0.5 * (1 + np.log(2*np.pi) + 2 * np.log(s))
@extend_notes_in_docstring(rv_continuous, notes="""\
When the location parameter is fixed by using the `floc` argument,
this function uses explicit formulas for the maximum likelihood
estimation of the log-normal shape and scale parameters, so the
`optimizer`, `loc` and `scale` keyword arguments are ignored.\n\n""")
def fit(self, data, *args, **kwds):
floc = kwds.get('floc', None)
if floc is None:
# loc is not fixed. Use the default fit method.
return super(lognorm_gen, self).fit(data, *args, **kwds)
f0 = (kwds.get('f0', None) or kwds.get('fs', None) or
kwds.get('fix_s', None))
fscale = kwds.get('fscale', None)
if len(args) > 1:
raise TypeError("Too many input arguments.")
for name in ['f0', 'fs', 'fix_s', 'floc', 'fscale', 'loc', 'scale',
'optimizer']:
kwds.pop(name, None)
if kwds:
raise TypeError("Unknown arguments: %s." % kwds)
# Special case: loc is fixed. Use the maximum likelihood formulas
# instead of the numerical solver.
if f0 is not None and fscale is not None:
# This check is for consistency with `rv_continuous.fit`.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
data = np.asarray(data)
floc = float(floc)
if floc != 0:
# Shifting the data by floc. Don't do the subtraction in-place,
# because `data` might be a view of the input array.
data = data - floc
if np.any(data <= 0):
raise FitDataError("lognorm", lower=floc, upper=np.inf)
lndata = np.log(data)
# Three cases to handle:
# * shape and scale both free
# * shape fixed, scale free
# * shape free, scale fixed
if fscale is None:
# scale is free.
scale = np.exp(lndata.mean())
if f0 is None:
# shape is free.
shape = lndata.std()
else:
# shape is fixed.
shape = float(f0)
else:
# scale is fixed, shape is free
scale = float(fscale)
shape = np.sqrt(((lndata - np.log(scale))**2).mean())
return shape, floc, scale
lognorm = lognorm_gen(a=0.0, name='lognorm')
class gilbrat_gen(rv_continuous):
r"""A Gilbrat continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gilbrat` is:
.. math::
f(x) = \frac{1}{x \sqrt{2\pi}} \exp(-\frac{1}{2} (\log(x))^2)
`gilbrat` is a special case of `lognorm` with ``s=1``.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self):
return np.exp(self._random_state.standard_normal(self._size))
def _pdf(self, x):
# gilbrat.pdf(x) = 1/(x*sqrt(2*pi)) * exp(-1/2*(log(x))**2)
return np.exp(self._logpdf(x))
def _logpdf(self, x):
return _lognorm_logpdf(x, 1.0)
def _cdf(self, x):
return _norm_cdf(np.log(x))
def _ppf(self, q):
return np.exp(_norm_ppf(q))
def _stats(self):
p = np.e
mu = np.sqrt(p)
mu2 = p * (p - 1)
g1 = np.sqrt((p - 1)) * (2 + p)
g2 = np.polyval([1, 2, 3, 0, -6.0], p)
return mu, mu2, g1, g2
def _entropy(self):
return 0.5 * np.log(2 * np.pi) + 0.5
gilbrat = gilbrat_gen(a=0.0, name='gilbrat')
class maxwell_gen(rv_continuous):
r"""A Maxwell continuous random variable.
%(before_notes)s
Notes
-----
A special case of a `chi` distribution, with ``df=3``, ``loc=0.0``,
and given ``scale = a``, where ``a`` is the parameter used in the
Mathworld description [1]_.
The probability density function for `maxwell` is:
.. math::
f(x) = \sqrt{2/\pi}x^2 \exp(-x^2/2)
for :math:`x > 0`.
%(after_notes)s
References
----------
.. [1] http://mathworld.wolfram.com/MaxwellDistribution.html
%(example)s
"""
def _rvs(self):
return chi.rvs(3.0, size=self._size, random_state=self._random_state)
def _pdf(self, x):
# maxwell.pdf(x) = sqrt(2/pi)x**2 * exp(-x**2/2)
return np.sqrt(2.0/np.pi)*x*x*np.exp(-x*x/2.0)
def _cdf(self, x):
return sc.gammainc(1.5, x*x/2.0)
def _ppf(self, q):
return np.sqrt(2*sc.gammaincinv(1.5, q))
def _stats(self):
val = 3*np.pi-8
return (2*np.sqrt(2.0/np.pi),
3-8/np.pi,
np.sqrt(2)*(32-10*np.pi)/val**1.5,
(-12*np.pi*np.pi + 160*np.pi - 384) / val**2.0)
def _entropy(self):
return _EULER + 0.5*np.log(2*np.pi)-0.5
maxwell = maxwell_gen(a=0.0, name='maxwell')
class mielke_gen(rv_continuous):
r"""A Mielke's Beta-Kappa continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `mielke` is:
.. math::
f(x, k, s) = \frac{k x^{k-1}}{(1+x^s)^{1+k/s}}
for :math:`x > 0` and :math:`k, s > 0`.
`mielke` takes ``k`` and ``s`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, k, s):
# mielke.pdf(x, k, s) = k * x**(k-1) / (1+x**s)**(1+k/s)
return k*x**(k-1.0) / (1.0+x**s)**(1.0+k*1.0/s)
def _cdf(self, x, k, s):
return x**k / (1.0+x**s)**(k*1.0/s)
def _ppf(self, q, k, s):
qsk = pow(q, s*1.0/k)
return pow(qsk/(1.0-qsk), 1.0/s)
mielke = mielke_gen(a=0.0, name='mielke')
class kappa4_gen(rv_continuous):
r"""Kappa 4 parameter distribution.
%(before_notes)s
Notes
-----
The probability density function for kappa4 is:
.. math::
f(x, h, k) = (1 - k x)^{1/k - 1} (1 - h (1 - k x)^{1/k})^{1/h-1}
if :math:`h` and :math:`k` are not equal to 0.
If :math:`h` or :math:`k` are zero then the pdf can be simplified:
h = 0 and k != 0::
kappa4.pdf(x, h, k) = (1.0 - k*x)**(1.0/k - 1.0)*
exp(-(1.0 - k*x)**(1.0/k))
h != 0 and k = 0::
kappa4.pdf(x, h, k) = exp(-x)*(1.0 - h*exp(-x))**(1.0/h - 1.0)
h = 0 and k = 0::
kappa4.pdf(x, h, k) = exp(-x)*exp(-exp(-x))
kappa4 takes :math:`h` and :math:`k` as shape parameters.
The kappa4 distribution returns other distributions when certain
:math:`h` and :math:`k` values are used.
+------+-------------+----------------+------------------+
| h | k=0.0 | k=1.0 | -inf<=k<=inf |
+======+=============+================+==================+
| -1.0 | Logistic | | Generalized |
| | | | Logistic(1) |
| | | | |
| | logistic(x) | | |
+------+-------------+----------------+------------------+
| 0.0 | Gumbel | Reverse | Generalized |
| | | Exponential(2) | Extreme Value |
| | | | |
| | gumbel_r(x) | | genextreme(x, k) |
+------+-------------+----------------+------------------+
| 1.0 | Exponential | Uniform | Generalized |
| | | | Pareto |
| | | | |
| | expon(x) | uniform(x) | genpareto(x, -k) |
+------+-------------+----------------+------------------+
(1) There are at least five generalized logistic distributions.
Four are described here:
https://en.wikipedia.org/wiki/Generalized_logistic_distribution
The "fifth" one is the one kappa4 should match which currently
isn't implemented in scipy:
https://en.wikipedia.org/wiki/Talk:Generalized_logistic_distribution
https://www.mathwave.com/help/easyfit/html/analyses/distributions/gen_logistic.html
(2) This distribution is currently not in scipy.
References
----------
J.C. Finney, "Optimization of a Skewed Logistic Distribution With Respect
to the Kolmogorov-Smirnov Test", A Dissertation Submitted to the Graduate
Faculty of the Louisiana State University and Agricultural and Mechanical
College, (August, 2004),
https://digitalcommons.lsu.edu/gradschool_dissertations/3672
J.R.M. Hosking, "The four-parameter kappa distribution". IBM J. Res.
Develop. 38 (3), 25 1-258 (1994).
B. Kumphon, A. Kaew-Man, P. Seenoi, "A Rainfall Distribution for the Lampao
Site in the Chi River Basin, Thailand", Journal of Water Resource and
Protection, vol. 4, 866-869, (2012).
https://doi.org/10.4236/jwarp.2012.410101
C. Winchester, "On Estimation of the Four-Parameter Kappa Distribution", A
Thesis Submitted to Dalhousie University, Halifax, Nova Scotia, (March
2000).
http://www.nlc-bnc.ca/obj/s4/f2/dsk2/ftp01/MQ57336.pdf
%(after_notes)s
%(example)s
"""
def _argcheck(self, h, k):
condlist = [np.logical_and(h > 0, k > 0),
np.logical_and(h > 0, k == 0),
np.logical_and(h > 0, k < 0),
np.logical_and(h <= 0, k > 0),
np.logical_and(h <= 0, k == 0),
np.logical_and(h <= 0, k < 0)]
def f0(h, k):
return (1.0 - float_power(h, -k))/k
def f1(h, k):
return np.log(h)
def f3(h, k):
a = np.empty(np.shape(h))
a[:] = -np.inf
return a
def f5(h, k):
return 1.0/k
self.a = _lazyselect(condlist,
[f0, f1, f0, f3, f3, f5],
[h, k],
default=np.nan)
def f0(h, k):
return 1.0/k
def f1(h, k):
a = np.empty(np.shape(h))
a[:] = np.inf
return a
self.b = _lazyselect(condlist,
[f0, f1, f1, f0, f1, f1],
[h, k],
default=np.nan)
return h == h
def _pdf(self, x, h, k):
# kappa4.pdf(x, h, k) = (1.0 - k*x)**(1.0/k - 1.0)*
# (1.0 - h*(1.0 - k*x)**(1.0/k))**(1.0/h-1)
return np.exp(self._logpdf(x, h, k))
def _logpdf(self, x, h, k):
condlist = [np.logical_and(h != 0, k != 0),
np.logical_and(h == 0, k != 0),
np.logical_and(h != 0, k == 0),
np.logical_and(h == 0, k == 0)]
def f0(x, h, k):
'''pdf = (1.0 - k*x)**(1.0/k - 1.0)*(
1.0 - h*(1.0 - k*x)**(1.0/k))**(1.0/h-1.0)
logpdf = ...
'''
return (sc.xlog1py(1.0/k - 1.0, -k*x) +
sc.xlog1py(1.0/h - 1.0, -h*(1.0 - k*x)**(1.0/k)))
def f1(x, h, k):
'''pdf = (1.0 - k*x)**(1.0/k - 1.0)*np.exp(-(
1.0 - k*x)**(1.0/k))
logpdf = ...
'''
return sc.xlog1py(1.0/k - 1.0, -k*x) - (1.0 - k*x)**(1.0/k)
def f2(x, h, k):
'''pdf = np.exp(-x)*(1.0 - h*np.exp(-x))**(1.0/h - 1.0)
logpdf = ...
'''
return -x + sc.xlog1py(1.0/h - 1.0, -h*np.exp(-x))
def f3(x, h, k):
'''pdf = np.exp(-x-np.exp(-x))
logpdf = ...
'''
return -x - np.exp(-x)
return _lazyselect(condlist,
[f0, f1, f2, f3],
[x, h, k],
default=np.nan)
def _cdf(self, x, h, k):
return np.exp(self._logcdf(x, h, k))
def _logcdf(self, x, h, k):
condlist = [np.logical_and(h != 0, k != 0),
np.logical_and(h == 0, k != 0),
np.logical_and(h != 0, k == 0),
np.logical_and(h == 0, k == 0)]
def f0(x, h, k):
'''cdf = (1.0 - h*(1.0 - k*x)**(1.0/k))**(1.0/h)
logcdf = ...
'''
return (1.0/h)*sc.log1p(-h*(1.0 - k*x)**(1.0/k))
def f1(x, h, k):
'''cdf = np.exp(-(1.0 - k*x)**(1.0/k))
logcdf = ...
'''
return -(1.0 - k*x)**(1.0/k)
def f2(x, h, k):
'''cdf = (1.0 - h*np.exp(-x))**(1.0/h)
logcdf = ...
'''
return (1.0/h)*sc.log1p(-h*np.exp(-x))
def f3(x, h, k):
'''cdf = np.exp(-np.exp(-x))
logcdf = ...
'''
return -np.exp(-x)
return _lazyselect(condlist,
[f0, f1, f2, f3],
[x, h, k],
default=np.nan)
def _ppf(self, q, h, k):
condlist = [np.logical_and(h != 0, k != 0),
np.logical_and(h == 0, k != 0),
np.logical_and(h != 0, k == 0),
np.logical_and(h == 0, k == 0)]
def f0(q, h, k):
return 1.0/k*(1.0 - ((1.0 - (q**h))/h)**k)
def f1(q, h, k):
return 1.0/k*(1.0 - (-np.log(q))**k)
def f2(q, h, k):
'''ppf = -np.log((1.0 - (q**h))/h)
'''
return -sc.log1p(-(q**h)) + np.log(h)
def f3(q, h, k):
return -np.log(-np.log(q))
return _lazyselect(condlist,
[f0, f1, f2, f3],
[q, h, k],
default=np.nan)
def _stats(self, h, k):
if h >= 0 and k >= 0:
maxr = 5
elif h < 0 and k >= 0:
maxr = int(-1.0/h*k)
elif k < 0:
maxr = int(-1.0/k)
else:
maxr = 5
outputs = [None if r < maxr else np.nan for r in range(1, 5)]
return outputs[:]
kappa4 = kappa4_gen(name='kappa4')
class kappa3_gen(rv_continuous):
r"""Kappa 3 parameter distribution.
%(before_notes)s
Notes
-----
The probability density function for `kappa3` is:
.. math::
f(x, a) = a (a + x^a)^{-(a + 1)/a}
for :math:`x > 0` and :math:`a > 0`.
`kappa3` takes ``a`` as a shape parameter for :math:`a`.
References
----------
P.W. Mielke and E.S. Johnson, "Three-Parameter Kappa Distribution Maximum
Likelihood and Likelihood Ratio Tests", Methods in Weather Research,
701-707, (September, 1973),
https://doi.org/10.1175/1520-0493(1973)101<0701:TKDMLE>2.3.CO;2
B. Kumphon, "Maximum Entropy and Maximum Likelihood Estimation for the
Three-Parameter Kappa Distribution", Open Journal of Statistics, vol 2,
415-419 (2012), https://doi.org/10.4236/ojs.2012.24050
%(after_notes)s
%(example)s
"""
def _argcheck(self, a):
return a > 0
def _pdf(self, x, a):
# kappa3.pdf(x, a) = a*(a + x**a)**(-(a + 1)/a), for x > 0
return a*(a + x**a)**(-1.0/a-1)
def _cdf(self, x, a):
return x*(a + x**a)**(-1.0/a)
def _ppf(self, q, a):
return (a/(q**-a - 1.0))**(1.0/a)
def _stats(self, a):
outputs = [None if i < a else np.nan for i in range(1, 5)]
return outputs[:]
kappa3 = kappa3_gen(a=0.0, name='kappa3')
class moyal_gen(rv_continuous):
r"""A Moyal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `moyal` is:
.. math::
f(x) = \exp(-(x + \exp(-x))/2) / \sqrt{2\pi}
for a real number :math:`x`.
%(after_notes)s
This distribution has utility in high-energy physics and radiation
detection. It describes the energy loss of a charged relativistic
particle due to ionization of the medium [1]_. It also provides an
approximation for the Landau distribution. For an in depth description
see [2]_. For additional description, see [3]_.
References
----------
.. [1] J.E. Moyal, "XXX. Theory of ionization fluctuations",
The London, Edinburgh, and Dublin Philosophical Magazine
and Journal of Science, vol 46, 263-280, (1955).
:doi:`10.1080/14786440308521076` (gated)
.. [2] G. Cordeiro et al., "The beta Moyal: a useful skew distribution",
International Journal of Research and Reviews in Applied Sciences,
vol 10, 171-192, (2012).
http://www.arpapress.com/Volumes/Vol10Issue2/IJRRAS_10_2_02.pdf
.. [3] C. Walck, "Handbook on Statistical Distributions for
Experimentalists; International Report SUF-PFY/96-01", Chapter 26,
University of Stockholm: Stockholm, Sweden, (2007).
http://www.stat.rice.edu/~dobelman/textfiles/DistributionsHandbook.pdf
.. versionadded:: 1.1.0
%(example)s
"""
def _rvs(self):
sz, rndm = self._size, self._random_state
u1 = gamma.rvs(a = 0.5, scale = 2, size=sz, random_state=rndm)
return -np.log(u1)
def _pdf(self, x):
return np.exp(-0.5 * (x + np.exp(-x))) / np.sqrt(2*np.pi)
def _cdf(self, x):
return sc.erfc(np.exp(-0.5 * x) / np.sqrt(2))
def _sf(self, x):
return sc.erf(np.exp(-0.5 * x) / np.sqrt(2))
def _ppf(self, x):
return -np.log(2 * sc.erfcinv(x)**2)
def _stats(self):
mu = np.log(2) + np.euler_gamma
mu2 = np.pi**2 / 2
g1 = 28 * np.sqrt(2) * sc.zeta(3) / np.pi**3
g2 = 4.
return mu, mu2, g1, g2
def _munp(self, n):
if n == 1.0:
return np.log(2) + np.euler_gamma
elif n == 2.0:
return np.pi**2 / 2 + (np.log(2) + np.euler_gamma)**2
elif n == 3.0:
tmp1 = 1.5 * np.pi**2 * (np.log(2)+np.euler_gamma)
tmp2 = (np.log(2)+np.euler_gamma)**3
tmp3 = 14 * sc.zeta(3)
return tmp1 + tmp2 + tmp3
elif n == 4.0:
tmp1 = 4 * 14 * sc.zeta(3) * (np.log(2) + np.euler_gamma)
tmp2 = 3 * np.pi**2 * (np.log(2) + np.euler_gamma)**2
tmp3 = (np.log(2) + np.euler_gamma)**4
tmp4 = 7 * np.pi**4 / 4
return tmp1 + tmp2 + tmp3 + tmp4
else:
# return generic for higher moments
# return rv_continuous._mom1_sc(self, n, b)
return self._mom1_sc(n)
moyal = moyal_gen(name="moyal")
class nakagami_gen(rv_continuous):
r"""A Nakagami continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `nakagami` is:
.. math::
f(x, \nu) = \frac{2 \nu^\nu}{\Gamma(\nu)} x^{2\nu-1} \exp(-\nu x^2)
for :math:`x > 0`, :math:`\nu > 0`.
`nakagami` takes ``nu`` as a shape parameter for :math:`\nu`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, nu):
# nakagami.pdf(x, nu) = 2 * nu**nu / gamma(nu) *
# x**(2*nu-1) * exp(-nu*x**2)
return 2*nu**nu/sc.gamma(nu)*(x**(2*nu-1.0))*np.exp(-nu*x*x)
def _cdf(self, x, nu):
return sc.gammainc(nu, nu*x*x)
def _ppf(self, q, nu):
return np.sqrt(1.0/nu*sc.gammaincinv(nu, q))
def _stats(self, nu):
mu = sc.gamma(nu+0.5)/sc.gamma(nu)/np.sqrt(nu)
mu2 = 1.0-mu*mu
g1 = mu * (1 - 4*nu*mu2) / 2.0 / nu / np.power(mu2, 1.5)
g2 = -6*mu**4*nu + (8*nu-2)*mu**2-2*nu + 1
g2 /= nu*mu2**2.0
return mu, mu2, g1, g2
nakagami = nakagami_gen(a=0.0, name="nakagami")
class ncx2_gen(rv_continuous):
r"""A non-central chi-squared continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `ncx2` is:
.. math::
f(x, k, \lambda) = \frac{1}{2} \exp(-(\lambda+x)/2)
(x/\lambda)^{(k-2)/4} I_{(k-2)/2}(\sqrt{\lambda x})
for :math:`x > 0` and :math:`k, \lambda > 0`. :math:`k` specifies the
degrees of freedom (denoted ``df`` in the implementation) and
:math:`\lambda` is the non-centrality parameter (denoted ``nc`` in the
implementation). :math:`I_\nu` denotes the modified Bessel function of
first order of degree :math:`\nu` (`scipy.special.iv`).
`ncx2` takes ``df`` and ``nc`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, df, nc):
return self._random_state.noncentral_chisquare(df, nc, self._size)
def _logpdf(self, x, df, nc):
return _ncx2_log_pdf(x, df, nc)
def _pdf(self, x, df, nc):
# ncx2.pdf(x, df, nc) = exp(-(nc+x)/2) * 1/2 * (x/nc)**((df-2)/4)
# * I[(df-2)/2](sqrt(nc*x))
return _ncx2_pdf(x, df, nc)
def _cdf(self, x, df, nc):
return _ncx2_cdf(x, df, nc)
def _ppf(self, q, df, nc):
return sc.chndtrix(q, df, nc)
def _stats(self, df, nc):
val = df + 2.0*nc
return (df + nc,
2*val,
np.sqrt(8)*(val+nc)/val**1.5,
12.0*(val+2*nc)/val**2.0)
ncx2 = ncx2_gen(a=0.0, name='ncx2')
class ncf_gen(rv_continuous):
r"""A non-central F distribution continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `ncf` is:
.. math::
f(x, n_1, n_2, \lambda) =
\exp(\frac{\lambda}{2} + \lambda n_1 \frac{x}{2(n_1 x+n_2)})
n_1^{n_1/2} n_2^{n_2/2} x^{n_1/2 - 1} \\
(n_2+n_1 x)^{-(n_1+n_2)/2}
\gamma(n_1/2) \gamma(1+n_2/2) \\
\frac{L^{\frac{v_1}{2}-1}_{v_2/2}
(-\lambda v_1 \frac{x}{2(v_1 x+v_2)})}
{B(v_1/2, v_2/2) \gamma(\frac{v_1+v_2}{2})}
for :math:`n_1 > 1`, :math:`n_2, \lambda > 0`. Here :math:`n_1` is the
degrees of freedom in the numerator, :math:`n_2` the degrees of freedom in
the denominator, :math:`\lambda` the non-centrality parameter,
:math:`\gamma` is the logarithm of the Gamma function, :math:`L_n^k` is a
generalized Laguerre polynomial and :math:`B` is the beta function.
`ncf` takes ``df1``, ``df2`` and ``nc`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, dfn, dfd, nc):
return self._random_state.noncentral_f(dfn, dfd, nc, self._size)
def _pdf_skip(self, x, dfn, dfd, nc):
# ncf.pdf(x, df1, df2, nc) = exp(nc/2 + nc*df1*x/(2*(df1*x+df2))) *
# df1**(df1/2) * df2**(df2/2) * x**(df1/2-1) *
# (df2+df1*x)**(-(df1+df2)/2) *
# gamma(df1/2)*gamma(1+df2/2) *
# L^{v1/2-1}^{v2/2}(-nc*v1*x/(2*(v1*x+v2))) /
# (B(v1/2, v2/2) * gamma((v1+v2)/2))
n1, n2 = dfn, dfd
term = -nc/2+nc*n1*x/(2*(n2+n1*x)) + sc.gammaln(n1/2.)+sc.gammaln(1+n2/2.)
term -= sc.gammaln((n1+n2)/2.0)
Px = np.exp(term)
Px *= n1**(n1/2) * n2**(n2/2) * x**(n1/2-1)
Px *= (n2+n1*x)**(-(n1+n2)/2)
Px *= sc.assoc_laguerre(-nc*n1*x/(2.0*(n2+n1*x)), n2/2, n1/2-1)
Px /= sc.beta(n1/2, n2/2)
# This function does not have a return. Drop it for now, the generic
# function seems to work OK.
def _cdf(self, x, dfn, dfd, nc):
return sc.ncfdtr(dfn, dfd, nc, x)
def _ppf(self, q, dfn, dfd, nc):
return sc.ncfdtri(dfn, dfd, nc, q)
def _munp(self, n, dfn, dfd, nc):
val = (dfn * 1.0/dfd)**n
term = sc.gammaln(n+0.5*dfn) + sc.gammaln(0.5*dfd-n) - sc.gammaln(dfd*0.5)
val *= np.exp(-nc / 2.0+term)
val *= sc.hyp1f1(n+0.5*dfn, 0.5*dfn, 0.5*nc)
return val
def _stats(self, dfn, dfd, nc):
mu = np.where(dfd <= 2, np.inf, dfd / (dfd-2.0)*(1+nc*1.0/dfn))
mu2 = np.where(dfd <= 4, np.inf, 2*(dfd*1.0/dfn)**2.0 *
((dfn+nc/2.0)**2.0 + (dfn+nc)*(dfd-2.0)) /
((dfd-2.0)**2.0 * (dfd-4.0)))
return mu, mu2, None, None
ncf = ncf_gen(a=0.0, name='ncf')
class t_gen(rv_continuous):
r"""A Student's t continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `t` is:
.. math::
f(x, \nu) = \frac{\Gamma((\nu+1)/2)}
{\sqrt{\pi \nu} \Gamma(\nu)}
(1+x^2/\nu)^{-(\nu+1)/2}
where :math:`x` is a real number and the degrees of freedom parameter
:math:`\nu` (denoted ``df`` in the implementation) satisfies
:math:`\nu > 0`. :math:`\Gamma` is the gamma function
(`scipy.special.gamma`).
%(after_notes)s
%(example)s
"""
def _argcheck(self, df):
return df > 0
def _rvs(self, df):
return self._random_state.standard_t(df, size=self._size)
def _pdf(self, x, df):
# gamma((df+1)/2)
# t.pdf(x, df) = ---------------------------------------------------
# sqrt(pi*df) * gamma(df/2) * (1+x**2/df)**((df+1)/2)
r = np.asarray(df*1.0)
Px = np.exp(sc.gammaln((r+1)/2)-sc.gammaln(r/2))
Px /= np.sqrt(r*np.pi)*(1+(x**2)/r)**((r+1)/2)
return Px
def _logpdf(self, x, df):
r = df*1.0
lPx = sc.gammaln((r+1)/2)-sc.gammaln(r/2)
lPx -= 0.5*np.log(r*np.pi) + (r+1)/2*np.log(1+(x**2)/r)
return lPx
def _cdf(self, x, df):
return sc.stdtr(df, x)
def _sf(self, x, df):
return sc.stdtr(df, -x)
def _ppf(self, q, df):
return sc.stdtrit(df, q)
def _isf(self, q, df):
return -sc.stdtrit(df, q)
def _stats(self, df):
mu = np.where(df > 1, 0.0, np.inf)
mu2 = _lazywhere(df > 2, (df,),
lambda df: df / (df-2.0),
np.inf)
mu2 = np.where(df <= 1, np.nan, mu2)
g1 = np.where(df > 3, 0.0, np.nan)
g2 = _lazywhere(df > 4, (df,),
lambda df: 6.0 / (df-4.0),
np.inf)
g2 = np.where(df <= 2, np.nan, g2)
return mu, mu2, g1, g2
t = t_gen(name='t')
class nct_gen(rv_continuous):
r"""A non-central Student's t continuous random variable.
%(before_notes)s
Notes
-----
If :math:`Y` is a standard normal random variable and :math:`V` is
an independent chi-square random variable (`chi2`) with :math:`k` degrees
of freedom, then
.. math::
X = \frac{Y + c}{\sqrt{V/k}}
has a non-central Student's t distribution on the real line.
The degrees of freedom parameter :math:`k` (denoted ``df`` in the
implementation) satisfies :math:`k > 0` and the noncentrality parameter
:math:`c` (denoted ``nct`` in the implementation) is a real number.
%(after_notes)s
%(example)s
"""
def _argcheck(self, df, nc):
return (df > 0) & (nc == nc)
def _rvs(self, df, nc):
sz, rndm = self._size, self._random_state
n = norm.rvs(loc=nc, size=sz, random_state=rndm)
c2 = chi2.rvs(df, size=sz, random_state=rndm)
return n * np.sqrt(df) / np.sqrt(c2)
def _pdf(self, x, df, nc):
n = df*1.0
nc = nc*1.0
x2 = x*x
ncx2 = nc*nc*x2
fac1 = n + x2
trm1 = n/2.*np.log(n) + sc.gammaln(n+1)
trm1 -= n*np.log(2)+nc*nc/2.+(n/2.)*np.log(fac1)+sc.gammaln(n/2.)
Px = np.exp(trm1)
valF = ncx2 / (2*fac1)
trm1 = np.sqrt(2)*nc*x*sc.hyp1f1(n/2+1, 1.5, valF)
trm1 /= np.asarray(fac1*sc.gamma((n+1)/2))
trm2 = sc.hyp1f1((n+1)/2, 0.5, valF)
trm2 /= np.asarray(np.sqrt(fac1)*sc.gamma(n/2+1))
Px *= trm1+trm2
return Px
def _cdf(self, x, df, nc):
return sc.nctdtr(df, nc, x)
def _ppf(self, q, df, nc):
return sc.nctdtrit(df, nc, q)
def _stats(self, df, nc, moments='mv'):
#
# See D. Hogben, R.S. Pinkham, and M.B. Wilk,
# 'The moments of the non-central t-distribution'
# Biometrika 48, p. 465 (2961).
# e.g. https://www.jstor.org/stable/2332772 (gated)
#
mu, mu2, g1, g2 = None, None, None, None
gfac = sc.gamma(df/2.-0.5) / sc.gamma(df/2.)
c11 = np.sqrt(df/2.) * gfac
c20 = df / (df-2.)
c22 = c20 - c11*c11
mu = np.where(df > 1, nc*c11, np.inf)
mu2 = np.where(df > 2, c22*nc*nc + c20, np.inf)
if 's' in moments:
c33t = df * (7.-2.*df) / (df-2.) / (df-3.) + 2.*c11*c11
c31t = 3.*df / (df-2.) / (df-3.)
mu3 = (c33t*nc*nc + c31t) * c11*nc
g1 = np.where(df > 3, mu3 / np.power(mu2, 1.5), np.nan)
# kurtosis
if 'k' in moments:
c44 = df*df / (df-2.) / (df-4.)
c44 -= c11*c11 * 2.*df*(5.-df) / (df-2.) / (df-3.)
c44 -= 3.*c11**4
c42 = df / (df-4.) - c11*c11 * (df-1.) / (df-3.)
c42 *= 6.*df / (df-2.)
c40 = 3.*df*df / (df-2.) / (df-4.)
mu4 = c44 * nc**4 + c42*nc**2 + c40
g2 = np.where(df > 4, mu4/mu2**2 - 3., np.nan)
return mu, mu2, g1, g2
nct = nct_gen(name="nct")
class pareto_gen(rv_continuous):
r"""A Pareto continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `pareto` is:
.. math::
f(x, b) = \frac{b}{x^{b+1}}
for :math:`x \ge 1`, :math:`b > 0`.
`pareto` takes ``b`` as a shape parameter for :math:`b`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, b):
# pareto.pdf(x, b) = b / x**(b+1)
return b * x**(-b-1)
def _cdf(self, x, b):
return 1 - x**(-b)
def _ppf(self, q, b):
return pow(1-q, -1.0/b)
def _sf(self, x, b):
return x**(-b)
def _stats(self, b, moments='mv'):
mu, mu2, g1, g2 = None, None, None, None
if 'm' in moments:
mask = b > 1
bt = np.extract(mask, b)
mu = valarray(np.shape(b), value=np.inf)
np.place(mu, mask, bt / (bt-1.0))
if 'v' in moments:
mask = b > 2
bt = np.extract(mask, b)
mu2 = valarray(np.shape(b), value=np.inf)
np.place(mu2, mask, bt / (bt-2.0) / (bt-1.0)**2)
if 's' in moments:
mask = b > 3
bt = np.extract(mask, b)
g1 = valarray(np.shape(b), value=np.nan)
vals = 2 * (bt + 1.0) * np.sqrt(bt - 2.0) / ((bt - 3.0) * np.sqrt(bt))
np.place(g1, mask, vals)
if 'k' in moments:
mask = b > 4
bt = np.extract(mask, b)
g2 = valarray(np.shape(b), value=np.nan)
vals = (6.0*np.polyval([1.0, 1.0, -6, -2], bt) /
np.polyval([1.0, -7.0, 12.0, 0.0], bt))
np.place(g2, mask, vals)
return mu, mu2, g1, g2
def _entropy(self, c):
return 1 + 1.0/c - np.log(c)
pareto = pareto_gen(a=1.0, name="pareto")
class lomax_gen(rv_continuous):
r"""A Lomax (Pareto of the second kind) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `lomax` is:
.. math::
f(x, c) = \frac{c}{(1+x)^{c+1}}
for :math:`x \ge 0`, :math:`c > 0`.
`lomax` takes ``c`` as a shape parameter for :math:`c`.
`lomax` is a special case of `pareto` with ``loc=-1.0``.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
# lomax.pdf(x, c) = c / (1+x)**(c+1)
return c*1.0/(1.0+x)**(c+1.0)
def _logpdf(self, x, c):
return np.log(c) - (c+1)*sc.log1p(x)
def _cdf(self, x, c):
return -sc.expm1(-c*sc.log1p(x))
def _sf(self, x, c):
return np.exp(-c*sc.log1p(x))
def _logsf(self, x, c):
return -c*sc.log1p(x)
def _ppf(self, q, c):
return sc.expm1(-sc.log1p(-q)/c)
def _stats(self, c):
mu, mu2, g1, g2 = pareto.stats(c, loc=-1.0, moments='mvsk')
return mu, mu2, g1, g2
def _entropy(self, c):
return 1+1.0/c-np.log(c)
lomax = lomax_gen(a=0.0, name="lomax")
class pearson3_gen(rv_continuous):
r"""A pearson type III continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `pearson3` is:
.. math::
f(x, skew) = \frac{|\beta|}{\Gamma(\alpha)}
(\beta (x - \zeta))^{\alpha - 1}
\exp(-\beta (x - \zeta))
where:
.. math::
\beta = \frac{2}{skew stddev}
\alpha = (stddev \beta)^2
\zeta = loc - \frac{\alpha}{\beta}
:math:`\Gamma` is the gamma function (`scipy.special.gamma`).
`pearson3` takes ``skew`` as a shape parameter for :math:`skew`.
%(after_notes)s
%(example)s
References
----------
R.W. Vogel and D.E. McMartin, "Probability Plot Goodness-of-Fit and
Skewness Estimation Procedures for the Pearson Type 3 Distribution", Water
Resources Research, Vol.27, 3149-3158 (1991).
L.R. Salvosa, "Tables of Pearson's Type III Function", Ann. Math. Statist.,
Vol.1, 191-198 (1930).
"Using Modern Computing Tools to Fit the Pearson Type III Distribution to
Aviation Loads Data", Office of Aviation Research (2003).
"""
def _preprocess(self, x, skew):
# The real 'loc' and 'scale' are handled in the calling pdf(...). The
# local variables 'loc' and 'scale' within pearson3._pdf are set to
# the defaults just to keep them as part of the equations for
# documentation.
loc = 0.0
scale = 1.0
# If skew is small, return _norm_pdf. The divide between pearson3
# and norm was found by brute force and is approximately a skew of
# 0.000016. No one, I hope, would actually use a skew value even
# close to this small.
norm2pearson_transition = 0.000016
ans, x, skew = np.broadcast_arrays([1.0], x, skew)
ans = ans.copy()
# mask is True where skew is small enough to use the normal approx.
mask = np.absolute(skew) < norm2pearson_transition
invmask = ~mask
beta = 2.0 / (skew[invmask] * scale)
alpha = (scale * beta)**2
zeta = loc - alpha / beta
transx = beta * (x[invmask] - zeta)
return ans, x, transx, mask, invmask, beta, alpha, zeta
def _argcheck(self, skew):
# The _argcheck function in rv_continuous only allows positive
# arguments. The skew argument for pearson3 can be zero (which I want
# to handle inside pearson3._pdf) or negative. So just return True
# for all skew args.
return np.ones(np.shape(skew), dtype=bool)
def _stats(self, skew):
_, _, _, _, _, beta, alpha, zeta = (
self._preprocess([1], skew))
m = zeta + alpha / beta
v = alpha / (beta**2)
s = 2.0 / (alpha**0.5) * np.sign(beta)
k = 6.0 / alpha
return m, v, s, k
def _pdf(self, x, skew):
# pearson3.pdf(x, skew) = abs(beta) / gamma(alpha) *
# (beta * (x - zeta))**(alpha - 1) * exp(-beta*(x - zeta))
# Do the calculation in _logpdf since helps to limit
# overflow/underflow problems
ans = np.exp(self._logpdf(x, skew))
if ans.ndim == 0:
if np.isnan(ans):
return 0.0
return ans
ans[np.isnan(ans)] = 0.0
return ans
def _logpdf(self, x, skew):
# PEARSON3 logpdf GAMMA logpdf
# np.log(abs(beta))
# + (alpha - 1)*np.log(beta*(x - zeta)) + (a - 1)*np.log(x)
# - beta*(x - zeta) - x
# - sc.gammalnalpha) - sc.gammalna)
ans, x, transx, mask, invmask, beta, alpha, _ = (
self._preprocess(x, skew))
ans[mask] = np.log(_norm_pdf(x[mask]))
ans[invmask] = np.log(abs(beta)) + gamma._logpdf(transx, alpha)
return ans
def _cdf(self, x, skew):
ans, x, transx, mask, invmask, _, alpha, _ = (
self._preprocess(x, skew))
ans[mask] = _norm_cdf(x[mask])
ans[invmask] = gamma._cdf(transx, alpha)
return ans
def _rvs(self, skew):
skew = broadcast_to(skew, self._size)
ans, _, _, mask, invmask, beta, alpha, zeta = (
self._preprocess([0], skew))
nsmall = mask.sum()
nbig = mask.size - nsmall
ans[mask] = self._random_state.standard_normal(nsmall)
ans[invmask] = (self._random_state.standard_gamma(alpha, nbig)/beta +
zeta)
if self._size == ():
ans = ans[0]
return ans
def _ppf(self, q, skew):
ans, q, _, mask, invmask, beta, alpha, zeta = (
self._preprocess(q, skew))
ans[mask] = _norm_ppf(q[mask])
ans[invmask] = sc.gammaincinv(alpha, q[invmask])/beta + zeta
return ans
pearson3 = pearson3_gen(name="pearson3")
class powerlaw_gen(rv_continuous):
r"""A power-function continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `powerlaw` is:
.. math::
f(x, a) = a x^{a-1}
for :math:`0 \le x \le 1`, :math:`a > 0`.
`powerlaw` takes ``a`` as a shape parameter for :math:`a`.
%(after_notes)s
`powerlaw` is a special case of `beta` with ``b=1``.
%(example)s
"""
def _pdf(self, x, a):
# powerlaw.pdf(x, a) = a * x**(a-1)
return a*x**(a-1.0)
def _logpdf(self, x, a):
return np.log(a) + sc.xlogy(a - 1, x)
def _cdf(self, x, a):
return x**(a*1.0)
def _logcdf(self, x, a):
return a*np.log(x)
def _ppf(self, q, a):
return pow(q, 1.0/a)
def _stats(self, a):
return (a / (a + 1.0),
a / (a + 2.0) / (a + 1.0) ** 2,
-2.0 * ((a - 1.0) / (a + 3.0)) * np.sqrt((a + 2.0) / a),
6 * np.polyval([1, -1, -6, 2], a) / (a * (a + 3.0) * (a + 4)))
def _entropy(self, a):
return 1 - 1.0/a - np.log(a)
powerlaw = powerlaw_gen(a=0.0, b=1.0, name="powerlaw")
class powerlognorm_gen(rv_continuous):
r"""A power log-normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `powerlognorm` is:
.. math::
f(x, c, s) = \frac{c}{x s} \phi(\log(x)/s)
(\Phi(-\log(x)/s))^{c-1}
where :math:`\phi` is the normal pdf, and :math:`\Phi` is the normal cdf,
and :math:`x > 0`, :math:`s, c > 0`.
`powerlognorm` takes :math:`c` and :math:`s` as shape parameters.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x, c, s):
# powerlognorm.pdf(x, c, s) = c / (x*s) * phi(log(x)/s) *
# (Phi(-log(x)/s))**(c-1),
return (c/(x*s) * _norm_pdf(np.log(x)/s) *
pow(_norm_cdf(-np.log(x)/s), c*1.0-1.0))
def _cdf(self, x, c, s):
return 1.0 - pow(_norm_cdf(-np.log(x)/s), c*1.0)
def _ppf(self, q, c, s):
return np.exp(-s * _norm_ppf(pow(1.0 - q, 1.0 / c)))
powerlognorm = powerlognorm_gen(a=0.0, name="powerlognorm")
class powernorm_gen(rv_continuous):
r"""A power normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `powernorm` is:
.. math::
f(x, c) = c \phi(x) (\Phi(-x))^{c-1}
where :math:`\phi` is the normal pdf, and :math:`\Phi` is the normal cdf,
and :math:`x > 0`, :math:`c > 0`.
`powernorm` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
# powernorm.pdf(x, c) = c * phi(x) * (Phi(-x))**(c-1)
return c*_norm_pdf(x) * (_norm_cdf(-x)**(c-1.0))
def _logpdf(self, x, c):
return np.log(c) + _norm_logpdf(x) + (c-1)*_norm_logcdf(-x)
def _cdf(self, x, c):
return 1.0-_norm_cdf(-x)**(c*1.0)
def _ppf(self, q, c):
return -_norm_ppf(pow(1.0 - q, 1.0 / c))
powernorm = powernorm_gen(name='powernorm')
class rdist_gen(rv_continuous):
r"""An R-distributed continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `rdist` is:
.. math::
f(x, c) = \frac{(1-x^2)^{c/2-1}}{B(1/2, c/2)}
for :math:`-1 \le x \le 1`, :math:`c > 0`.
`rdist` takes ``c`` as a shape parameter for :math:`c`.
This distribution includes the following distribution kernels as
special cases::
c = 2: uniform
c = 4: Epanechnikov (parabolic)
c = 6: quartic (biweight)
c = 8: triweight
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
# rdist.pdf(x, c) = (1-x**2)**(c/2-1) / B(1/2, c/2)
return np.power((1.0 - x**2), c / 2.0 - 1) / sc.beta(0.5, c / 2.0)
def _cdf(self, x, c):
term1 = x / sc.beta(0.5, c / 2.0)
res = 0.5 + term1 * sc.hyp2f1(0.5, 1 - c / 2.0, 1.5, x**2)
# There's an issue with hyp2f1, it returns nans near x = +-1, c > 100.
# Use the generic implementation in that case. See gh-1285 for
# background.
if np.any(np.isnan(res)):
return rv_continuous._cdf(self, x, c)
return res
def _munp(self, n, c):
numerator = (1 - (n % 2)) * sc.beta((n + 1.0) / 2, c / 2.0)
return numerator / sc.beta(1. / 2, c / 2.)
rdist = rdist_gen(a=-1.0, b=1.0, name="rdist")
class rayleigh_gen(rv_continuous):
r"""A Rayleigh continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `rayleigh` is:
.. math::
f(x) = x \exp(-x^2/2)
for :math:`x \ge 0`.
`rayleigh` is a special case of `chi` with ``df=2``.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self):
return chi.rvs(2, size=self._size, random_state=self._random_state)
def _pdf(self, r):
# rayleigh.pdf(r) = r * exp(-r**2/2)
return np.exp(self._logpdf(r))
def _logpdf(self, r):
return np.log(r) - 0.5 * r * r
def _cdf(self, r):
return -sc.expm1(-0.5 * r**2)
def _ppf(self, q):
return np.sqrt(-2 * sc.log1p(-q))
def _sf(self, r):
return np.exp(self._logsf(r))
def _logsf(self, r):
return -0.5 * r * r
def _isf(self, q):
return np.sqrt(-2 * np.log(q))
def _stats(self):
val = 4 - np.pi
return (np.sqrt(np.pi/2),
val/2,
2*(np.pi-3)*np.sqrt(np.pi)/val**1.5,
6*np.pi/val-16/val**2)
def _entropy(self):
return _EULER/2.0 + 1 - 0.5*np.log(2)
rayleigh = rayleigh_gen(a=0.0, name="rayleigh")
class reciprocal_gen(rv_continuous):
r"""A reciprocal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `reciprocal` is:
.. math::
f(x, a, b) = \frac{1}{x \log(b/a)}
for :math:`a \le x \le b`, :math:`b > a > 0`.
`reciprocal` takes :math:`a` and :math:`b` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, a, b):
self.a = a
self.b = b
self.d = np.log(b*1.0 / a)
return (a > 0) & (b > a)
def _pdf(self, x, a, b):
# reciprocal.pdf(x, a, b) = 1 / (x*log(b/a))
return 1.0 / (x * self.d)
def _logpdf(self, x, a, b):
return -np.log(x) - np.log(self.d)
def _cdf(self, x, a, b):
return (np.log(x)-np.log(a)) / self.d
def _ppf(self, q, a, b):
return a*pow(b*1.0/a, q)
def _munp(self, n, a, b):
return 1.0/self.d / n * (pow(b*1.0, n) - pow(a*1.0, n))
def _entropy(self, a, b):
return 0.5*np.log(a*b)+np.log(np.log(b/a))
reciprocal = reciprocal_gen(name="reciprocal")
class rice_gen(rv_continuous):
r"""A Rice continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `rice` is:
.. math::
f(x, b) = x \exp(- \frac{x^2 + b^2}{2}) I_0(x b)
for :math:`x > 0`, :math:`b > 0`. :math:`I_0` is the modified Bessel
function of order zero (`scipy.special.i0`).
`rice` takes ``b`` as a shape parameter for :math:`b`.
%(after_notes)s
The Rice distribution describes the length, :math:`r`, of a 2-D vector with
components :math:`(U+u, V+v)`, where :math:`U, V` are constant, :math:`u,
v` are independent Gaussian random variables with standard deviation
:math:`s`. Let :math:`R = \sqrt{U^2 + V^2}`. Then the pdf of :math:`r` is
``rice.pdf(x, R/s, scale=s)``.
%(example)s
"""
def _argcheck(self, b):
return b >= 0
def _rvs(self, b):
# https://en.wikipedia.org/wiki/Rice_distribution
t = b/np.sqrt(2) + self._random_state.standard_normal(size=(2,) +
self._size)
return np.sqrt((t*t).sum(axis=0))
def _cdf(self, x, b):
return sc.chndtr(np.square(x), 2, np.square(b))
def _ppf(self, q, b):
return np.sqrt(sc.chndtrix(q, 2, np.square(b)))
def _pdf(self, x, b):
# rice.pdf(x, b) = x * exp(-(x**2+b**2)/2) * I[0](x*b)
#
# We use (x**2 + b**2)/2 = ((x-b)**2)/2 + xb.
# The factor of np.exp(-xb) is then included in the i0e function
# in place of the modified Bessel function, i0, improving
# numerical stability for large values of xb.
return x * np.exp(-(x-b)*(x-b)/2.0) * sc.i0e(x*b)
def _munp(self, n, b):
nd2 = n/2.0
n1 = 1 + nd2
b2 = b*b/2.0
return (2.0**(nd2) * np.exp(-b2) * sc.gamma(n1) *
sc.hyp1f1(n1, 1, b2))
rice = rice_gen(a=0.0, name="rice")
# FIXME: PPF does not work.
class recipinvgauss_gen(rv_continuous):
r"""A reciprocal inverse Gaussian continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `recipinvgauss` is:
.. math::
f(x, \mu) = \frac{1}{\sqrt{2\pi x}}
\exp\left(\frac{-(1-\mu x)^2}{2\mu^2x}\right)
for :math:`x \ge 0`.
`recipinvgauss` takes ``mu`` as a shape parameter for :math:`\mu`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, mu):
# recipinvgauss.pdf(x, mu) =
# 1/sqrt(2*pi*x) * exp(-(1-mu*x)**2/(2*x*mu**2))
return 1.0/np.sqrt(2*np.pi*x)*np.exp(-(1-mu*x)**2.0 / (2*x*mu**2.0))
def _logpdf(self, x, mu):
return -(1-mu*x)**2.0 / (2*x*mu**2.0) - 0.5*np.log(2*np.pi*x)
def _cdf(self, x, mu):
trm1 = 1.0/mu - x
trm2 = 1.0/mu + x
isqx = 1.0/np.sqrt(x)
return 1.0-_norm_cdf(isqx*trm1)-np.exp(2.0/mu)*_norm_cdf(-isqx*trm2)
def _rvs(self, mu):
return 1.0/self._random_state.wald(mu, 1.0, size=self._size)
recipinvgauss = recipinvgauss_gen(a=0.0, name='recipinvgauss')
class semicircular_gen(rv_continuous):
r"""A semicircular continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `semicircular` is:
.. math::
f(x) = \frac{2}{\pi} \sqrt{1-x^2}
for :math:`-1 \le x \le 1`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
# semicircular.pdf(x) = 2/pi * sqrt(1-x**2)
return 2.0/np.pi*np.sqrt(1-x*x)
def _cdf(self, x):
return 0.5+1.0/np.pi*(x*np.sqrt(1-x*x) + np.arcsin(x))
def _stats(self):
return 0, 0.25, 0, -1.0
def _entropy(self):
return 0.64472988584940017414
semicircular = semicircular_gen(a=-1.0, b=1.0, name="semicircular")
class skew_norm_gen(rv_continuous):
r"""A skew-normal random variable.
%(before_notes)s
Notes
-----
The pdf is::
skewnorm.pdf(x, a) = 2 * norm.pdf(x) * norm.cdf(a*x)
`skewnorm` takes a real number :math:`a` as a skewness parameter
When ``a = 0`` the distribution is identical to a normal distribution
(`norm`). `rvs` implements the method of [1]_.
%(after_notes)s
%(example)s
References
----------
.. [1] A. Azzalini and A. Capitanio (1999). Statistical applications of the
multivariate skew-normal distribution. J. Roy. Statist. Soc., B 61, 579-602.
http://azzalini.stat.unipd.it/SN/faq-r.html
"""
def _argcheck(self, a):
return np.isfinite(a)
def _pdf(self, x, a):
return 2.*_norm_pdf(x)*_norm_cdf(a*x)
def _cdf_single(self, x, *args):
if x <= 0:
cdf = integrate.quad(self._pdf, self.a, x, args=args)[0]
else:
t1 = integrate.quad(self._pdf, self.a, 0, args=args)[0]
t2 = integrate.quad(self._pdf, 0, x, args=args)[0]
cdf = t1 + t2
if cdf > 1:
# Presumably numerical noise, e.g. 1.0000000000000002
cdf = 1.0
return cdf
def _sf(self, x, a):
return self._cdf(-x, -a)
def _rvs(self, a):
u0 = self._random_state.normal(size=self._size)
v = self._random_state.normal(size=self._size)
d = a/np.sqrt(1 + a**2)
u1 = d*u0 + v*np.sqrt(1 - d**2)
return np.where(u0 >= 0, u1, -u1)
def _stats(self, a, moments='mvsk'):
output = [None, None, None, None]
const = np.sqrt(2/np.pi) * a/np.sqrt(1 + a**2)
if 'm' in moments:
output[0] = const
if 'v' in moments:
output[1] = 1 - const**2
if 's' in moments:
output[2] = ((4 - np.pi)/2) * (const/np.sqrt(1 - const**2))**3
if 'k' in moments:
output[3] = (2*(np.pi - 3)) * (const**4/(1 - const**2)**2)
return output
skewnorm = skew_norm_gen(name='skewnorm')
class trapz_gen(rv_continuous):
r"""A trapezoidal continuous random variable.
%(before_notes)s
Notes
-----
The trapezoidal distribution can be represented with an up-sloping line
from ``loc`` to ``(loc + c*scale)``, then constant to ``(loc + d*scale)``
and then downsloping from ``(loc + d*scale)`` to ``(loc+scale)``.
`trapz` takes :math:`c` and :math:`d` as shape parameters.
%(after_notes)s
The standard form is in the range [0, 1] with c the mode.
The location parameter shifts the start to `loc`.
The scale parameter changes the width from 1 to `scale`.
%(example)s
"""
def _argcheck(self, c, d):
return (c >= 0) & (c <= 1) & (d >= 0) & (d <= 1) & (d >= c)
def _pdf(self, x, c, d):
u = 2 / (d-c+1)
return _lazyselect([x < c,
(c <= x) & (x <= d),
x > d],
[lambda x, c, d, u: u * x / c,
lambda x, c, d, u: u,
lambda x, c, d, u: u * (1-x) / (1-d)],
(x, c, d, u))
def _cdf(self, x, c, d):
return _lazyselect([x < c,
(c <= x) & (x <= d),
x > d],
[lambda x, c, d: x**2 / c / (d-c+1),
lambda x, c, d: (c + 2 * (x-c)) / (d-c+1),
lambda x, c, d: 1-((1-x) ** 2
/ (d-c+1) / (1-d))],
(x, c, d))
def _ppf(self, q, c, d):
qc, qd = self._cdf(c, c, d), self._cdf(d, c, d)
condlist = [q < qc, q <= qd, q > qd]
choicelist = [np.sqrt(q * c * (1 + d - c)),
0.5 * q * (1 + d - c) + 0.5 * c,
1 - np.sqrt((1 - q) * (d - c + 1) * (1 - d))]
return np.select(condlist, choicelist)
trapz = trapz_gen(a=0.0, b=1.0, name="trapz")
class triang_gen(rv_continuous):
r"""A triangular continuous random variable.
%(before_notes)s
Notes
-----
The triangular distribution can be represented with an up-sloping line from
``loc`` to ``(loc + c*scale)`` and then downsloping for ``(loc + c*scale)``
to ``(loc + scale)``.
`triang` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
The standard form is in the range [0, 1] with c the mode.
The location parameter shifts the start to `loc`.
The scale parameter changes the width from 1 to `scale`.
%(example)s
"""
def _rvs(self, c):
return self._random_state.triangular(0, c, 1, self._size)
def _argcheck(self, c):
return (c >= 0) & (c <= 1)
def _pdf(self, x, c):
# 0: edge case where c=0
# 1: generalised case for x < c, don't use x <= c, as it doesn't cope
# with c = 0.
# 2: generalised case for x >= c, but doesn't cope with c = 1
# 3: edge case where c=1
r = _lazyselect([c == 0,
x < c,
(x >= c) & (c != 1),
c == 1],
[lambda x, c: 2 - 2 * x,
lambda x, c: 2 * x / c,
lambda x, c: 2 * (1 - x) / (1 - c),
lambda x, c: 2 * x],
(x, c))
return r
def _cdf(self, x, c):
r = _lazyselect([c == 0,
x < c,
(x >= c) & (c != 1),
c == 1],
[lambda x, c: 2*x - x*x,
lambda x, c: x * x / c,
lambda x, c: (x*x - 2*x + c) / (c-1),
lambda x, c: x * x],
(x, c))
return r
def _ppf(self, q, c):
return np.where(q < c, np.sqrt(c * q), 1-np.sqrt((1-c) * (1-q)))
def _stats(self, c):
return ((c+1.0)/3.0,
(1.0-c+c*c)/18,
np.sqrt(2)*(2*c-1)*(c+1)*(c-2) / (5*np.power((1.0-c+c*c), 1.5)),
-3.0/5.0)
def _entropy(self, c):
return 0.5-np.log(2)
triang = triang_gen(a=0.0, b=1.0, name="triang")
class truncexpon_gen(rv_continuous):
r"""A truncated exponential continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `truncexpon` is:
.. math::
f(x, b) = \frac{\exp(-x)}{1 - \exp(-b)}
for :math:`0 < x < b`.
`truncexpon` takes ``b`` as a shape parameter for :math:`b`.
%(after_notes)s
%(example)s
"""
def _argcheck(self, b):
self.b = b
return b > 0
def _pdf(self, x, b):
# truncexpon.pdf(x, b) = exp(-x) / (1-exp(-b))
return np.exp(-x)/(-sc.expm1(-b))
def _logpdf(self, x, b):
return -x - np.log(-sc.expm1(-b))
def _cdf(self, x, b):
return sc.expm1(-x)/sc.expm1(-b)
def _ppf(self, q, b):
return -sc.log1p(q*sc.expm1(-b))
def _munp(self, n, b):
# wrong answer with formula, same as in continuous.pdf
# return sc.gamman+1)-sc.gammainc1+n, b)
if n == 1:
return (1-(b+1)*np.exp(-b))/(-sc.expm1(-b))
elif n == 2:
return 2*(1-0.5*(b*b+2*b+2)*np.exp(-b))/(-sc.expm1(-b))
else:
# return generic for higher moments
# return rv_continuous._mom1_sc(self, n, b)
return self._mom1_sc(n, b)
def _entropy(self, b):
eB = np.exp(b)
return np.log(eB-1)+(1+eB*(b-1.0))/(1.0-eB)
truncexpon = truncexpon_gen(a=0.0, name='truncexpon')
class truncnorm_gen(rv_continuous):
r"""A truncated normal continuous random variable.
%(before_notes)s
Notes
-----
The standard form of this distribution is a standard normal truncated to
the range [a, b] --- notice that a and b are defined over the domain of the
standard normal. To convert clip values for a specific mean and standard
deviation, use::
a, b = (myclip_a - my_mean) / my_std, (myclip_b - my_mean) / my_std
`truncnorm` takes :math:`a` and :math:`b` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, a, b):
self.a = a
self.b = b
self._nb = _norm_cdf(b)
self._na = _norm_cdf(a)
self._sb = _norm_sf(b)
self._sa = _norm_sf(a)
self._delta = np.where(self.a > 0,
-(self._sb - self._sa),
self._nb - self._na)
self._logdelta = np.log(self._delta)
return a < b
def _pdf(self, x, a, b):
return _norm_pdf(x) / self._delta
def _logpdf(self, x, a, b):
return _norm_logpdf(x) - self._logdelta
def _cdf(self, x, a, b):
return (_norm_cdf(x) - self._na) / self._delta
def _ppf(self, q, a, b):
# XXX Use _lazywhere...
ppf = np.where(self.a > 0,
_norm_isf(q*self._sb + self._sa*(1.0-q)),
_norm_ppf(q*self._nb + self._na*(1.0-q)))
return ppf
def _stats(self, a, b):
nA, nB = self._na, self._nb
d = nB - nA
pA, pB = _norm_pdf(a), _norm_pdf(b)
mu = (pA - pB) / d # correction sign
mu2 = 1 + (a*pA - b*pB) / d - mu*mu
return mu, mu2, None, None
truncnorm = truncnorm_gen(name='truncnorm')
# FIXME: RVS does not work.
class tukeylambda_gen(rv_continuous):
r"""A Tukey-Lamdba continuous random variable.
%(before_notes)s
Notes
-----
A flexible distribution, able to represent and interpolate between the
following distributions:
- Cauchy (:math:`lambda = -1`)
- logistic (:math:`lambda = 0`)
- approx Normal (:math:`lambda = 0.14`)
- uniform from -1 to 1 (:math:`lambda = 1`)
`tukeylambda` takes a real number :math:`lambda` (denoted ``lam``
in the implementation) as a shape parameter.
%(after_notes)s
%(example)s
"""
def _argcheck(self, lam):
return np.ones(np.shape(lam), dtype=bool)
def _pdf(self, x, lam):
Fx = np.asarray(sc.tklmbda(x, lam))
Px = Fx**(lam-1.0) + (np.asarray(1-Fx))**(lam-1.0)
Px = 1.0/np.asarray(Px)
return np.where((lam <= 0) | (abs(x) < 1.0/np.asarray(lam)), Px, 0.0)
def _cdf(self, x, lam):
return sc.tklmbda(x, lam)
def _ppf(self, q, lam):
return sc.boxcox(q, lam) - sc.boxcox1p(-q, lam)
def _stats(self, lam):
return 0, _tlvar(lam), 0, _tlkurt(lam)
def _entropy(self, lam):
def integ(p):
return np.log(pow(p, lam-1)+pow(1-p, lam-1))
return integrate.quad(integ, 0, 1)[0]
tukeylambda = tukeylambda_gen(name='tukeylambda')
class FitUniformFixedScaleDataError(FitDataError):
def __init__(self, ptp, fscale):
self.args = (
"Invalid values in `data`. Maximum likelihood estimation with "
"the uniform distribution and fixed scale requires that "
"data.ptp() <= fscale, but data.ptp() = %r and fscale = %r." %
(ptp, fscale),
)
class uniform_gen(rv_continuous):
r"""A uniform continuous random variable.
In the standard form, the distribution is uniform on ``[0, 1]``. Using
the parameters ``loc`` and ``scale``, one obtains the uniform distribution
on ``[loc, loc + scale]``.
%(before_notes)s
%(example)s
"""
def _rvs(self):
return self._random_state.uniform(0.0, 1.0, self._size)
def _pdf(self, x):
return 1.0*(x == x)
def _cdf(self, x):
return x
def _ppf(self, q):
return q
def _stats(self):
return 0.5, 1.0/12, 0, -1.2
def _entropy(self):
return 0.0
def fit(self, data, *args, **kwds):
"""
Maximum likelihood estimate for the location and scale parameters.
`uniform.fit` uses only the following parameters. Because exact
formulas are used, the parameters related to optimization that are
available in the `fit` method of other distributions are ignored
here. The only positional argument accepted is `data`.
Parameters
----------
data : array_like
Data to use in calculating the maximum likelihood estimate.
floc : float, optional
Hold the location parameter fixed to the specified value.
fscale : float, optional
Hold the scale parameter fixed to the specified value.
Returns
-------
loc, scale : float
Maximum likelihood estimates for the location and scale.
Notes
-----
An error is raised if `floc` is given and any values in `data` are
less than `floc`, or if `fscale` is given and `fscale` is less
than ``data.max() - data.min()``. An error is also raised if both
`floc` and `fscale` are given.
Examples
--------
>>> from scipy.stats import uniform
We'll fit the uniform distribution to `x`:
>>> x = np.array([2, 2.5, 3.1, 9.5, 13.0])
For a uniform distribution MLE, the location is the minimum of the
data, and the scale is the maximum minus the minimum.
>>> loc, scale = uniform.fit(x)
>>> loc
2.0
>>> scale
11.0
If we know the data comes from a uniform distribution where the support
starts at 0, we can use `floc=0`:
>>> loc, scale = uniform.fit(x, floc=0)
>>> loc
0.0
>>> scale
13.0
Alternatively, if we know the length of the support is 12, we can use
`fscale=12`:
>>> loc, scale = uniform.fit(x, fscale=12)
>>> loc
1.5
>>> scale
12.0
In that last example, the support interval is [1.5, 13.5]. This
solution is not unique. For example, the distribution with ``loc=2``
and ``scale=12`` has the same likelihood as the one above. When
`fscale` is given and it is larger than ``data.max() - data.min()``,
the parameters returned by the `fit` method center the support over
the interval ``[data.min(), data.max()]``.
"""
if len(args) > 0:
raise TypeError("Too many arguments.")
floc = kwds.pop('floc', None)
fscale = kwds.pop('fscale', None)
# Ignore the optimizer-related keyword arguments, if given.
kwds.pop('loc', None)
kwds.pop('scale', None)
kwds.pop('optimizer', None)
if kwds:
raise TypeError("Unknown arguments: %s." % kwds)
if floc is not None and fscale is not None:
# This check is for consistency with `rv_continuous.fit`.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
data = np.asarray(data)
# MLE for the uniform distribution
# --------------------------------
# The PDF is
#
# f(x, loc, scale) = {1/scale for loc <= x <= loc + scale
# {0 otherwise}
#
# The likelihood function is
# L(x, loc, scale) = (1/scale)**n
# where n is len(x), assuming loc <= x <= loc + scale for all x.
# The log-likelihood is
# l(x, loc, scale) = -n*log(scale)
# The log-likelihood is maximized by making scale as small as possible,
# while keeping loc <= x <= loc + scale. So if neither loc nor scale
# are fixed, the log-likelihood is maximized by choosing
# loc = x.min()
# scale = x.ptp()
# If loc is fixed, it must be less than or equal to x.min(), and then
# the scale is
# scale = x.max() - loc
# If scale is fixed, it must not be less than x.ptp(). If scale is
# greater than x.ptp(), the solution is not unique. Note that the
# likelihood does not depend on loc, except for the requirement that
# loc <= x <= loc + scale. All choices of loc for which
# x.max() - scale <= loc <= x.min()
# have the same log-likelihood. In this case, we choose loc such that
# the support is centered over the interval [data.min(), data.max()]:
# loc = x.min() = 0.5*(scale - x.ptp())
if fscale is None:
# scale is not fixed.
if floc is None:
# loc is not fixed, scale is not fixed.
loc = data.min()
scale = data.ptp()
else:
# loc is fixed, scale is not fixed.
loc = floc
scale = data.max() - loc
if data.min() < loc:
raise FitDataError("uniform", lower=loc, upper=loc + scale)
else:
# loc is not fixed, scale is fixed.
ptp = data.ptp()
if ptp > fscale:
raise FitUniformFixedScaleDataError(ptp=ptp, fscale=fscale)
# If ptp < fscale, the ML estimate is not unique; see the comments
# above. We choose the distribution for which the support is
# centered over the interval [data.min(), data.max()].
loc = data.min() - 0.5*(fscale - ptp)
scale = fscale
# We expect the return values to be floating point, so ensure it
# by explicitly converting to float.
return float(loc), float(scale)
uniform = uniform_gen(a=0.0, b=1.0, name='uniform')
class vonmises_gen(rv_continuous):
r"""A Von Mises continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `vonmises` and `vonmises_line` is:
.. math::
f(x, \kappa) = \frac{ \exp(\kappa \cos(x)) }{ 2 \pi I_0(\kappa) }
for :math:`-\pi \le x \le \pi`, :math:`\kappa > 0`. :math:`I_0` is the
modified Bessel function of order zero (`scipy.special.i0`).
`vonmises` is a circular distribution which does not restrict the
distribution to a fixed interval. Currently, there is no circular
distribution framework in scipy. The ``cdf`` is implemented such that
``cdf(x + 2*np.pi) == cdf(x) + 1``.
`vonmises_line` is the same distribution, defined on :math:`[-\pi, \pi]`
on the real line. This is a regular (i.e. non-circular) distribution.
`vonmises` and `vonmises_line` take ``kappa`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, kappa):
return self._random_state.vonmises(0.0, kappa, size=self._size)
def _pdf(self, x, kappa):
# vonmises.pdf(x, \kappa) = exp(\kappa * cos(x)) / (2*pi*I[0](\kappa))
return np.exp(kappa * np.cos(x)) / (2*np.pi*sc.i0(kappa))
def _cdf(self, x, kappa):
return _stats.von_mises_cdf(kappa, x)
def _stats_skip(self, kappa):
return 0, None, 0, None
def _entropy(self, kappa):
return (-kappa * sc.i1(kappa) / sc.i0(kappa) +
np.log(2 * np.pi * sc.i0(kappa)))
vonmises = vonmises_gen(name='vonmises')
vonmises_line = vonmises_gen(a=-np.pi, b=np.pi, name='vonmises_line')
class wald_gen(invgauss_gen):
r"""A Wald continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `wald` is:
.. math::
f(x) = \frac{1}{\sqrt{2\pi x^3}} \exp(- \frac{ (x-1)^2 }{ 2x })
for :math:`x > 0`.
`wald` is a special case of `invgauss` with ``mu=1``.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self):
return self._random_state.wald(1.0, 1.0, size=self._size)
def _pdf(self, x):
# wald.pdf(x) = 1/sqrt(2*pi*x**3) * exp(-(x-1)**2/(2*x))
return invgauss._pdf(x, 1.0)
def _logpdf(self, x):
return invgauss._logpdf(x, 1.0)
def _cdf(self, x):
return invgauss._cdf(x, 1.0)
def _stats(self):
return 1.0, 1.0, 3.0, 15.0
wald = wald_gen(a=0.0, name="wald")
class wrapcauchy_gen(rv_continuous):
r"""A wrapped Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `wrapcauchy` is:
.. math::
f(x, c) = \frac{1-c^2}{2\pi (1+c^2 - 2c \cos(x))}
for :math:`0 \le x \le 2\pi`, :math:`0 < c < 1`.
`wrapcauchy` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
return (c > 0) & (c < 1)
def _pdf(self, x, c):
# wrapcauchy.pdf(x, c) = (1-c**2) / (2*pi*(1+c**2-2*c*cos(x)))
return (1.0-c*c)/(2*np.pi*(1+c*c-2*c*np.cos(x)))
def _cdf(self, x, c):
output = np.zeros(x.shape, dtype=x.dtype)
val = (1.0+c)/(1.0-c)
c1 = x < np.pi
c2 = 1-c1
xp = np.extract(c1, x)
xn = np.extract(c2, x)
if np.any(xn):
valn = np.extract(c2, np.ones_like(x)*val)
xn = 2*np.pi - xn
yn = np.tan(xn/2.0)
on = 1.0-1.0/np.pi*np.arctan(valn*yn)
np.place(output, c2, on)
if np.any(xp):
valp = np.extract(c1, np.ones_like(x)*val)
yp = np.tan(xp/2.0)
op = 1.0/np.pi*np.arctan(valp*yp)
np.place(output, c1, op)
return output
def _ppf(self, q, c):
val = (1.0-c)/(1.0+c)
rcq = 2*np.arctan(val*np.tan(np.pi*q))
rcmq = 2*np.pi-2*np.arctan(val*np.tan(np.pi*(1-q)))
return np.where(q < 1.0/2, rcq, rcmq)
def _entropy(self, c):
return np.log(2*np.pi*(1-c*c))
wrapcauchy = wrapcauchy_gen(a=0.0, b=2*np.pi, name='wrapcauchy')
class gennorm_gen(rv_continuous):
r"""A generalized normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gennorm` is [1]_:
.. math::
f(x, \beta) = \frac{\beta}{2 \Gamma(1/\beta)} \exp(-|x|^\beta)
:math:`\Gamma` is the gamma function (`scipy.special.gamma`).
`gennorm` takes ``beta`` as a shape parameter for :math:`\beta`.
For :math:`\beta = 1`, it is identical to a Laplace distribution.
For :math:`\beta = 2`, it is identical to a normal distribution
(with ``scale=1/sqrt(2)``).
See Also
--------
laplace : Laplace distribution
norm : normal distribution
References
----------
.. [1] "Generalized normal distribution, Version 1",
https://en.wikipedia.org/wiki/Generalized_normal_distribution#Version_1
%(example)s
"""
def _pdf(self, x, beta):
return np.exp(self._logpdf(x, beta))
def _logpdf(self, x, beta):
return np.log(0.5*beta) - sc.gammaln(1.0/beta) - abs(x)**beta
def _cdf(self, x, beta):
c = 0.5 * np.sign(x)
# evaluating (.5 + c) first prevents numerical cancellation
return (0.5 + c) - c * sc.gammaincc(1.0/beta, abs(x)**beta)
def _ppf(self, x, beta):
c = np.sign(x - 0.5)
# evaluating (1. + c) first prevents numerical cancellation
return c * sc.gammainccinv(1.0/beta, (1.0 + c) - 2.0*c*x)**(1.0/beta)
def _sf(self, x, beta):
return self._cdf(-x, beta)
def _isf(self, x, beta):
return -self._ppf(x, beta)
def _stats(self, beta):
c1, c3, c5 = sc.gammaln([1.0/beta, 3.0/beta, 5.0/beta])
return 0., np.exp(c3 - c1), 0., np.exp(c5 + c1 - 2.0*c3) - 3.
def _entropy(self, beta):
return 1. / beta - np.log(.5 * beta) + sc.gammaln(1. / beta)
gennorm = gennorm_gen(name='gennorm')
class halfgennorm_gen(rv_continuous):
r"""The upper half of a generalized normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halfgennorm` is:
.. math::
f(x, \beta) = \frac{\beta}{\Gamma(1/\beta)} \exp(-|x|^\beta)
for :math:`x > 0`. :math:`\Gamma` is the gamma function
(`scipy.special.gamma`).
`gennorm` takes ``beta`` as a shape parameter for :math:`\beta`.
For :math:`\beta = 1`, it is identical to an exponential distribution.
For :math:`\beta = 2`, it is identical to a half normal distribution
(with ``scale=1/sqrt(2)``).
See Also
--------
gennorm : generalized normal distribution
expon : exponential distribution
halfnorm : half normal distribution
References
----------
.. [1] "Generalized normal distribution, Version 1",
https://en.wikipedia.org/wiki/Generalized_normal_distribution#Version_1
%(example)s
"""
def _pdf(self, x, beta):
# beta
# halfgennorm.pdf(x, beta) = ------------- exp(-|x|**beta)
# gamma(1/beta)
return np.exp(self._logpdf(x, beta))
def _logpdf(self, x, beta):
return np.log(beta) - sc.gammaln(1.0/beta) - x**beta
def _cdf(self, x, beta):
return sc.gammainc(1.0/beta, x**beta)
def _ppf(self, x, beta):
return sc.gammaincinv(1.0/beta, x)**(1.0/beta)
def _sf(self, x, beta):
return sc.gammaincc(1.0/beta, x**beta)
def _isf(self, x, beta):
return sc.gammainccinv(1.0/beta, x)**(1.0/beta)
def _entropy(self, beta):
return 1.0/beta - np.log(beta) + sc.gammaln(1.0/beta)
halfgennorm = halfgennorm_gen(a=0, name='halfgennorm')
class crystalball_gen(rv_continuous):
r"""
Crystalball distribution
%(before_notes)s
Notes
-----
The probability density function for `crystalball` is:
.. math::
f(x, \beta, m) = \begin{cases}
N \exp(-x^2 / 2), &\text{for } x > -\beta\\
N A (B - x)^{-m} &\text{for } x \le -\beta
\end{cases}
where :math:`A = (m / |\beta|)^n \exp(-\beta^2 / 2)`,
:math:`B = m/|\beta| - |\beta|` and :math:`N` is a normalisation constant.
`crystalball` takes :math:`\beta > 0` and :math:`m > 1` as shape
parameters. :math:`\beta` defines the point where the pdf changes
from a power-law to a Gaussian distribution. :math:`m` is the power
of the power-law tail.
References
----------
.. [1] "Crystal Ball Function",
https://en.wikipedia.org/wiki/Crystal_Ball_function
%(after_notes)s
.. versionadded:: 0.19.0
%(example)s
"""
def _pdf(self, x, beta, m):
"""
Return PDF of the crystalball function.
--
| exp(-x**2 / 2), for x > -beta
crystalball.pdf(x, beta, m) = N * |
| A * (B - x)**(-m), for x <= -beta
--
"""
N = 1.0 / (m/beta / (m-1) * np.exp(-beta**2 / 2.0) +
_norm_pdf_C * _norm_cdf(beta))
def rhs(x, beta, m):
return np.exp(-x**2 / 2)
def lhs(x, beta, m):
return ((m/beta)**m * np.exp(-beta**2 / 2.0) *
(m/beta - beta - x)**(-m))
return N * _lazywhere(x > -beta, (x, beta, m), f=rhs, f2=lhs)
def _logpdf(self, x, beta, m):
"""
Return the log of the PDF of the crystalball function.
"""
N = 1.0 / (m/beta / (m-1) * np.exp(-beta**2 / 2.0) +
_norm_pdf_C * _norm_cdf(beta))
def rhs(x, beta, m):
return -x**2/2
def lhs(x, beta, m):
return m*np.log(m/beta) - beta**2/2 - m*np.log(m/beta - beta - x)
return np.log(N) + _lazywhere(x > -beta, (x, beta, m), f=rhs, f2=lhs)
def _cdf(self, x, beta, m):
"""
Return CDF of the crystalball function
"""
N = 1.0 / (m/beta / (m-1) * np.exp(-beta**2 / 2.0) +
_norm_pdf_C * _norm_cdf(beta))
def rhs(x, beta, m):
return ((m/beta) * np.exp(-beta**2 / 2.0) / (m-1) +
_norm_pdf_C * (_norm_cdf(x) - _norm_cdf(-beta)))
def lhs(x, beta, m):
return ((m/beta)**m * np.exp(-beta**2 / 2.0) *
(m/beta - beta - x)**(-m+1) / (m-1))
return N * _lazywhere(x > -beta, (x, beta, m), f=rhs, f2=lhs)
def _ppf(self, p, beta, m):
N = 1.0 / (m/beta / (m-1) * np.exp(-beta**2 / 2.0) +
_norm_pdf_C * _norm_cdf(beta))
pbeta = N * (m/beta) * np.exp(-beta**2/2) / (m - 1)
def ppf_less(p, beta, m):
eb2 = np.exp(-beta**2/2)
C = (m/beta) * eb2 / (m-1)
N = 1/(C + _norm_pdf_C * _norm_cdf(beta))
return (m/beta - beta -
((m - 1)*(m/beta)**(-m)/eb2*p/N)**(1/(1-m)))
def ppf_greater(p, beta, m):
eb2 = np.exp(-beta**2/2)
C = (m/beta) * eb2 / (m-1)
N = 1/(C + _norm_pdf_C * _norm_cdf(beta))
return _norm_ppf(_norm_cdf(-beta) + (1/_norm_pdf_C)*(p/N - C))
return _lazywhere(p < pbeta, (p, beta, m), f=ppf_less, f2=ppf_greater)
def _munp(self, n, beta, m):
"""
Returns the n-th non-central moment of the crystalball function.
"""
N = 1.0 / (m/beta / (m-1) * np.exp(-beta**2 / 2.0) +
_norm_pdf_C * _norm_cdf(beta))
def n_th_moment(n, beta, m):
"""
Returns n-th moment. Defined only if n+1 < m
Function cannot broadcast due to the loop over n
"""
A = (m/beta)**m * np.exp(-beta**2 / 2.0)
B = m/beta - beta
rhs = (2**((n-1)/2.0) * sc.gamma((n+1)/2) *
(1.0 + (-1)**n * sc.gammainc((n+1)/2, beta**2 / 2)))
lhs = np.zeros(rhs.shape)
for k in range(n + 1):
lhs += (sc.binom(n, k) * B**(n-k) * (-1)**k / (m - k - 1) *
(m/beta)**(-m + k + 1))
return A * lhs + rhs
return N * _lazywhere(n + 1 < m, (n, beta, m),
np.vectorize(n_th_moment, otypes=[np.float]),
np.inf)
def _argcheck(self, beta, m):
"""
Shape parameter bounds are m > 1 and beta > 0.
"""
return (m > 1) & (beta > 0)
crystalball = crystalball_gen(name='crystalball', longname="A Crystalball Function")
def _argus_phi(chi):
"""
Utility function for the argus distribution
used in the CDF and norm of the Argus Funktion
"""
return _norm_cdf(chi) - chi * _norm_pdf(chi) - 0.5
class argus_gen(rv_continuous):
r"""
Argus distribution
%(before_notes)s
Notes
-----
The probability density function for `argus` is:
.. math::
f(x, \chi) = \frac{\chi^3}{\sqrt{2\pi} \Psi(\chi)} x \sqrt{1-x^2}
\exp(-\chi^2 (1 - x^2)/2)
for :math:`0 < x < 1`, where
.. math::
\Psi(\chi) = \Phi(\chi) - \chi \phi(\chi) - 1/2
with :math:`\Phi` and :math:`\phi` being the CDF and PDF of a standard
normal distribution, respectively.
`argus` takes :math:`\chi` as shape a parameter.
References
----------
.. [1] "ARGUS distribution",
https://en.wikipedia.org/wiki/ARGUS_distribution
%(after_notes)s
.. versionadded:: 0.19.0
%(example)s
"""
def _pdf(self, x, chi):
"""
Return PDF of the argus function
argus.pdf(x, chi) = chi**3 / (sqrt(2*pi) * Psi(chi)) * x *
sqrt(1-x**2) * exp(- 0.5 * chi**2 * (1 - x**2))
"""
y = 1.0 - x**2
return chi**3 / (_norm_pdf_C * _argus_phi(chi)) * x * np.sqrt(y) * np.exp(-chi**2 * y / 2)
def _cdf(self, x, chi):
"""
Return CDF of the argus function
"""
return 1.0 - self._sf(x, chi)
def _sf(self, x, chi):
"""
Return survival function of the argus function
"""
return _argus_phi(chi * np.sqrt(1 - x**2)) / _argus_phi(chi)
argus = argus_gen(name='argus', longname="An Argus Function", a=0.0, b=1.0)
class rv_histogram(rv_continuous):
"""
Generates a distribution given by a histogram.
This is useful to generate a template distribution from a binned
datasample.
As a subclass of the `rv_continuous` class, `rv_histogram` inherits from it
a collection of generic methods (see `rv_continuous` for the full list),
and implements them based on the properties of the provided binned
datasample.
Parameters
----------
histogram : tuple of array_like
Tuple containing two array_like objects
The first containing the content of n bins
The second containing the (n+1) bin boundaries
In particular the return value np.histogram is accepted
Notes
-----
There are no additional shape parameters except for the loc and scale.
The pdf is defined as a stepwise function from the provided histogram
The cdf is a linear interpolation of the pdf.
.. versionadded:: 0.19.0
Examples
--------
Create a scipy.stats distribution from a numpy histogram
>>> import scipy.stats
>>> import numpy as np
>>> data = scipy.stats.norm.rvs(size=100000, loc=0, scale=1.5, random_state=123)
>>> hist = np.histogram(data, bins=100)
>>> hist_dist = scipy.stats.rv_histogram(hist)
Behaves like an ordinary scipy rv_continuous distribution
>>> hist_dist.pdf(1.0)
0.20538577847618705
>>> hist_dist.cdf(2.0)
0.90818568543056499
PDF is zero above (below) the highest (lowest) bin of the histogram,
defined by the max (min) of the original dataset
>>> hist_dist.pdf(np.max(data))
0.0
>>> hist_dist.cdf(np.max(data))
1.0
>>> hist_dist.pdf(np.min(data))
7.7591907244498314e-05
>>> hist_dist.cdf(np.min(data))
0.0
PDF and CDF follow the histogram
>>> import matplotlib.pyplot as plt
>>> X = np.linspace(-5.0, 5.0, 100)
>>> plt.title("PDF from Template")
>>> plt.hist(data, density=True, bins=100)
>>> plt.plot(X, hist_dist.pdf(X), label='PDF')
>>> plt.plot(X, hist_dist.cdf(X), label='CDF')
>>> plt.show()
"""
_support_mask = rv_continuous._support_mask
def __init__(self, histogram, *args, **kwargs):
"""
Create a new distribution using the given histogram
Parameters
----------
histogram : tuple of array_like
Tuple containing two array_like objects
The first containing the content of n bins
The second containing the (n+1) bin boundaries
In particular the return value np.histogram is accepted
"""
self._histogram = histogram
if len(histogram) != 2:
raise ValueError("Expected length 2 for parameter histogram")
self._hpdf = np.asarray(histogram[0])
self._hbins = np.asarray(histogram[1])
if len(self._hpdf) + 1 != len(self._hbins):
raise ValueError("Number of elements in histogram content "
"and histogram boundaries do not match, "
"expected n and n+1.")
self._hbin_widths = self._hbins[1:] - self._hbins[:-1]
self._hpdf = self._hpdf / float(np.sum(self._hpdf * self._hbin_widths))
self._hcdf = np.cumsum(self._hpdf * self._hbin_widths)
self._hpdf = np.hstack([0.0, self._hpdf, 0.0])
self._hcdf = np.hstack([0.0, self._hcdf])
# Set support
kwargs['a'] = self._hbins[0]
kwargs['b'] = self._hbins[-1]
super(rv_histogram, self).__init__(*args, **kwargs)
def _pdf(self, x):
"""
PDF of the histogram
"""
return self._hpdf[np.searchsorted(self._hbins, x, side='right')]
def _cdf(self, x):
"""
CDF calculated from the histogram
"""
return np.interp(x, self._hbins, self._hcdf)
def _ppf(self, x):
"""
Percentile function calculated from the histogram
"""
return np.interp(x, self._hcdf, self._hbins)
def _munp(self, n):
"""Compute the n-th non-central moment."""
integrals = (self._hbins[1:]**(n+1) - self._hbins[:-1]**(n+1)) / (n+1)
return np.sum(self._hpdf[1:-1] * integrals)
def _entropy(self):
"""Compute entropy of distribution"""
res = _lazywhere(self._hpdf[1:-1] > 0.0,
(self._hpdf[1:-1],),
np.log,
0.0)
return -np.sum(self._hpdf[1:-1] * res * self._hbin_widths)
def _updated_ctor_param(self):
"""
Set the histogram as additional constructor argument
"""
dct = super(rv_histogram, self)._updated_ctor_param()
dct['histogram'] = self._histogram
return dct
# Collect names of classes and objects in this module.
pairs = list(globals().items())
_distn_names, _distn_gen_names = get_distribution_names(pairs, rv_continuous)
__all__ = _distn_names + _distn_gen_names + ['rv_histogram']
| bsd-3-clause |
alimuldal/numpy | numpy/core/tests/test_multiarray.py | 6 | 246246 | from __future__ import division, absolute_import, print_function
import collections
import tempfile
import sys
import shutil
import warnings
import operator
import io
import itertools
import ctypes
import os
import gc
if sys.version_info[0] >= 3:
import builtins
else:
import __builtin__ as builtins
from decimal import Decimal
import numpy as np
from numpy.compat import asbytes, getexception, strchar, unicode, sixu
from test_print import in_foreign_locale
from numpy.core.multiarray_tests import (
test_neighborhood_iterator, test_neighborhood_iterator_oob,
test_pydatamem_seteventhook_start, test_pydatamem_seteventhook_end,
test_inplace_increment, get_buffer_info, test_as_c_array,
)
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_raises, assert_warns,
assert_equal, assert_almost_equal, assert_array_equal,
assert_array_almost_equal, assert_allclose, IS_PYPY, HAS_REFCOUNT,
assert_array_less, runstring, dec, SkipTest, temppath, suppress_warnings
)
# Need to test an object that does not fully implement math interface
from datetime import timedelta
if sys.version_info[:2] > (3, 2):
# In Python 3.3 the representation of empty shape, strides and sub-offsets
# is an empty tuple instead of None.
# http://docs.python.org/dev/whatsnew/3.3.html#api-changes
EMPTY = ()
else:
EMPTY = None
class TestFlags(TestCase):
def setUp(self):
self.a = np.arange(10)
def test_writeable(self):
mydict = locals()
self.a.flags.writeable = False
self.assertRaises(ValueError, runstring, 'self.a[0] = 3', mydict)
self.assertRaises(ValueError, runstring, 'self.a[0:1].itemset(3)', mydict)
self.a.flags.writeable = True
self.a[0] = 5
self.a[0] = 0
def test_otherflags(self):
assert_equal(self.a.flags.carray, True)
assert_equal(self.a.flags.farray, False)
assert_equal(self.a.flags.behaved, True)
assert_equal(self.a.flags.fnc, False)
assert_equal(self.a.flags.forc, True)
assert_equal(self.a.flags.owndata, True)
assert_equal(self.a.flags.writeable, True)
assert_equal(self.a.flags.aligned, True)
assert_equal(self.a.flags.updateifcopy, False)
def test_string_align(self):
a = np.zeros(4, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
# not power of two are accessed byte-wise and thus considered aligned
a = np.zeros(5, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
def test_void_align(self):
a = np.zeros(4, dtype=np.dtype([("a", "i4"), ("b", "i4")]))
assert_(a.flags.aligned)
class TestHash(TestCase):
# see #3793
def test_int(self):
for st, ut, s in [(np.int8, np.uint8, 8),
(np.int16, np.uint16, 16),
(np.int32, np.uint32, 32),
(np.int64, np.uint64, 64)]:
for i in range(1, s):
assert_equal(hash(st(-2**i)), hash(-2**i),
err_msg="%r: -2**%d" % (st, i))
assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (st, i - 1))
assert_equal(hash(st(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (st, i))
i = max(i - 1, 1)
assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (ut, i - 1))
assert_equal(hash(ut(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (ut, i))
class TestAttributes(TestCase):
def setUp(self):
self.one = np.arange(10)
self.two = np.arange(20).reshape(4, 5)
self.three = np.arange(60, dtype=np.float64).reshape(2, 5, 6)
def test_attributes(self):
assert_equal(self.one.shape, (10,))
assert_equal(self.two.shape, (4, 5))
assert_equal(self.three.shape, (2, 5, 6))
self.three.shape = (10, 3, 2)
assert_equal(self.three.shape, (10, 3, 2))
self.three.shape = (2, 5, 6)
assert_equal(self.one.strides, (self.one.itemsize,))
num = self.two.itemsize
assert_equal(self.two.strides, (5*num, num))
num = self.three.itemsize
assert_equal(self.three.strides, (30*num, 6*num, num))
assert_equal(self.one.ndim, 1)
assert_equal(self.two.ndim, 2)
assert_equal(self.three.ndim, 3)
num = self.two.itemsize
assert_equal(self.two.size, 20)
assert_equal(self.two.nbytes, 20*num)
assert_equal(self.two.itemsize, self.two.dtype.itemsize)
assert_equal(self.two.base, np.arange(20))
def test_dtypeattr(self):
assert_equal(self.one.dtype, np.dtype(np.int_))
assert_equal(self.three.dtype, np.dtype(np.float_))
assert_equal(self.one.dtype.char, 'l')
assert_equal(self.three.dtype.char, 'd')
self.assertTrue(self.three.dtype.str[0] in '<>')
assert_equal(self.one.dtype.str[1], 'i')
assert_equal(self.three.dtype.str[1], 'f')
def test_int_subclassing(self):
# Regression test for https://github.com/numpy/numpy/pull/3526
numpy_int = np.int_(0)
if sys.version_info[0] >= 3:
# On Py3k int_ should not inherit from int, because it's not
# fixed-width anymore
assert_equal(isinstance(numpy_int, int), False)
else:
# Otherwise, it should inherit from int...
assert_equal(isinstance(numpy_int, int), True)
# ... and fast-path checks on C-API level should also work
from numpy.core.multiarray_tests import test_int_subclass
assert_equal(test_int_subclass(numpy_int), True)
def test_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
return np.ndarray(size, buffer=x, dtype=int,
offset=offset*x.itemsize,
strides=strides*x.itemsize)
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
self.assertRaises(ValueError, make_array, 4, 4, -2)
self.assertRaises(ValueError, make_array, 4, 2, -1)
self.assertRaises(ValueError, make_array, 8, 3, 1)
assert_equal(make_array(8, 3, 0), np.array([3]*8))
# Check behavior reported in gh-2503:
self.assertRaises(ValueError, make_array, (2, 3), 5, np.array([-2, -3]))
make_array(0, 0, 10)
def test_set_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
try:
r = np.ndarray([size], dtype=int, buffer=x,
offset=offset*x.itemsize)
except:
raise RuntimeError(getexception())
r.strides = strides = strides*x.itemsize
return r
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
assert_equal(make_array(7, 3, 1), np.array([3, 4, 5, 6, 7, 8, 9]))
self.assertRaises(ValueError, make_array, 4, 4, -2)
self.assertRaises(ValueError, make_array, 4, 2, -1)
self.assertRaises(RuntimeError, make_array, 8, 3, 1)
# Check that the true extent of the array is used.
# Test relies on as_strided base not exposing a buffer.
x = np.lib.stride_tricks.as_strided(np.arange(1), (10, 10), (0, 0))
def set_strides(arr, strides):
arr.strides = strides
self.assertRaises(ValueError, set_strides, x, (10*x.itemsize, x.itemsize))
# Test for offset calculations:
x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1],
shape=(10,), strides=(-1,))
self.assertRaises(ValueError, set_strides, x[::-1], -1)
a = x[::-1]
a.strides = 1
a[::2].strides = 2
def test_fill(self):
for t in "?bhilqpBHILQPfdgFDGO":
x = np.empty((3, 2, 1), t)
y = np.empty((3, 2, 1), t)
x.fill(1)
y[...] = 1
assert_equal(x, y)
def test_fill_max_uint64(self):
x = np.empty((3, 2, 1), dtype=np.uint64)
y = np.empty((3, 2, 1), dtype=np.uint64)
value = 2**64 - 1
y[...] = value
x.fill(value)
assert_array_equal(x, y)
def test_fill_struct_array(self):
# Filling from a scalar
x = np.array([(0, 0.0), (1, 1.0)], dtype='i4,f8')
x.fill(x[0])
assert_equal(x['f1'][1], x['f1'][0])
# Filling from a tuple that can be converted
# to a scalar
x = np.zeros(2, dtype=[('a', 'f8'), ('b', 'i4')])
x.fill((3.5, -2))
assert_array_equal(x['a'], [3.5, 3.5])
assert_array_equal(x['b'], [-2, -2])
class TestArrayConstruction(TestCase):
def test_array(self):
d = np.ones(6)
r = np.array([d, d])
assert_equal(r, np.ones((2, 6)))
d = np.ones(6)
tgt = np.ones((2, 6))
r = np.array([d, d])
assert_equal(r, tgt)
tgt[1] = 2
r = np.array([d, d + 1])
assert_equal(r, tgt)
d = np.ones(6)
r = np.array([[d, d]])
assert_equal(r, np.ones((1, 2, 6)))
d = np.ones(6)
r = np.array([[d, d], [d, d]])
assert_equal(r, np.ones((2, 2, 6)))
d = np.ones((6, 6))
r = np.array([d, d])
assert_equal(r, np.ones((2, 6, 6)))
d = np.ones((6, ))
r = np.array([[d, d + 1], d + 2])
assert_equal(len(r), 2)
assert_equal(r[0], [d, d + 1])
assert_equal(r[1], d + 2)
tgt = np.ones((2, 3), dtype=np.bool)
tgt[0, 2] = False
tgt[1, 0:2] = False
r = np.array([[True, True, False], [False, False, True]])
assert_equal(r, tgt)
r = np.array([[True, False], [True, False], [False, True]])
assert_equal(r, tgt.T)
def test_array_empty(self):
assert_raises(TypeError, np.array)
def test_array_copy_false(self):
d = np.array([1, 2, 3])
e = np.array(d, copy=False)
d[1] = 3
assert_array_equal(e, [1, 3, 3])
e = np.array(d, copy=False, order='F')
d[1] = 4
assert_array_equal(e, [1, 4, 3])
e[2] = 7
assert_array_equal(d, [1, 4, 7])
def test_array_copy_true(self):
d = np.array([[1,2,3], [1, 2, 3]])
e = np.array(d, copy=True)
d[0, 1] = 3
e[0, 2] = -7
assert_array_equal(e, [[1, 2, -7], [1, 2, 3]])
assert_array_equal(d, [[1, 3, 3], [1, 2, 3]])
e = np.array(d, copy=True, order='F')
d[0, 1] = 5
e[0, 2] = 7
assert_array_equal(e, [[1, 3, 7], [1, 2, 3]])
assert_array_equal(d, [[1, 5, 3], [1,2,3]])
def test_array_cont(self):
d = np.ones(10)[::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.ascontiguousarray(d).flags.f_contiguous)
assert_(np.asfortranarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
d = np.ones((10, 10))[::2,::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
class TestAssignment(TestCase):
def test_assignment_broadcasting(self):
a = np.arange(6).reshape(2, 3)
# Broadcasting the input to the output
a[...] = np.arange(3)
assert_equal(a, [[0, 1, 2], [0, 1, 2]])
a[...] = np.arange(2).reshape(2, 1)
assert_equal(a, [[0, 0, 0], [1, 1, 1]])
# For compatibility with <= 1.5, a limited version of broadcasting
# the output to the input.
#
# This behavior is inconsistent with NumPy broadcasting
# in general, because it only uses one of the two broadcasting
# rules (adding a new "1" dimension to the left of the shape),
# applied to the output instead of an input. In NumPy 2.0, this kind
# of broadcasting assignment will likely be disallowed.
a[...] = np.arange(6)[::-1].reshape(1, 2, 3)
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
# The other type of broadcasting would require a reduction operation.
def assign(a, b):
a[...] = b
assert_raises(ValueError, assign, a, np.arange(12).reshape(2, 2, 3))
def test_assignment_errors(self):
# Address issue #2276
class C:
pass
a = np.zeros(1)
def assign(v):
a[0] = v
assert_raises((AttributeError, TypeError), assign, C())
assert_raises(ValueError, assign, [1])
class TestDtypedescr(TestCase):
def test_construction(self):
d1 = np.dtype('i4')
assert_equal(d1, np.dtype(np.int32))
d2 = np.dtype('f8')
assert_equal(d2, np.dtype(np.float64))
def test_byteorders(self):
self.assertNotEqual(np.dtype('<i4'), np.dtype('>i4'))
self.assertNotEqual(np.dtype([('a', '<i4')]), np.dtype([('a', '>i4')]))
class TestZeroRank(TestCase):
def setUp(self):
self.d = np.array(0), np.array('x', object)
def test_ellipsis_subscript(self):
a, b = self.d
self.assertEqual(a[...], 0)
self.assertEqual(b[...], 'x')
self.assertTrue(a[...].base is a) # `a[...] is a` in numpy <1.9.
self.assertTrue(b[...].base is b) # `b[...] is b` in numpy <1.9.
def test_empty_subscript(self):
a, b = self.d
self.assertEqual(a[()], 0)
self.assertEqual(b[()], 'x')
self.assertTrue(type(a[()]) is a.dtype.type)
self.assertTrue(type(b[()]) is str)
def test_invalid_subscript(self):
a, b = self.d
self.assertRaises(IndexError, lambda x: x[0], a)
self.assertRaises(IndexError, lambda x: x[0], b)
self.assertRaises(IndexError, lambda x: x[np.array([], int)], a)
self.assertRaises(IndexError, lambda x: x[np.array([], int)], b)
def test_ellipsis_subscript_assignment(self):
a, b = self.d
a[...] = 42
self.assertEqual(a, 42)
b[...] = ''
self.assertEqual(b.item(), '')
def test_empty_subscript_assignment(self):
a, b = self.d
a[()] = 42
self.assertEqual(a, 42)
b[()] = ''
self.assertEqual(b.item(), '')
def test_invalid_subscript_assignment(self):
a, b = self.d
def assign(x, i, v):
x[i] = v
self.assertRaises(IndexError, assign, a, 0, 42)
self.assertRaises(IndexError, assign, b, 0, '')
self.assertRaises(ValueError, assign, a, (), '')
def test_newaxis(self):
a, b = self.d
self.assertEqual(a[np.newaxis].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ...].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
self.assertEqual(a[..., np.newaxis, np.newaxis].shape, (1, 1))
self.assertEqual(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
self.assertEqual(a[(np.newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a, b = self.d
def subscript(x, i):
x[i]
self.assertRaises(IndexError, subscript, a, (np.newaxis, 0))
self.assertRaises(IndexError, subscript, a, (np.newaxis,)*50)
def test_constructor(self):
x = np.ndarray(())
x[()] = 5
self.assertEqual(x[()], 5)
y = np.ndarray((), buffer=x)
y[()] = 6
self.assertEqual(x[()], 6)
def test_output(self):
x = np.array(2)
self.assertRaises(ValueError, np.add, x, [1], x)
class TestScalarIndexing(TestCase):
def setUp(self):
self.d = np.array([0, 1])[0]
def test_ellipsis_subscript(self):
a = self.d
self.assertEqual(a[...], 0)
self.assertEqual(a[...].shape, ())
def test_empty_subscript(self):
a = self.d
self.assertEqual(a[()], 0)
self.assertEqual(a[()].shape, ())
def test_invalid_subscript(self):
a = self.d
self.assertRaises(IndexError, lambda x: x[0], a)
self.assertRaises(IndexError, lambda x: x[np.array([], int)], a)
def test_invalid_subscript_assignment(self):
a = self.d
def assign(x, i, v):
x[i] = v
self.assertRaises(TypeError, assign, a, 0, 42)
def test_newaxis(self):
a = self.d
self.assertEqual(a[np.newaxis].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ...].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
self.assertEqual(a[..., np.newaxis, np.newaxis].shape, (1, 1))
self.assertEqual(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
self.assertEqual(a[(np.newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a = self.d
def subscript(x, i):
x[i]
self.assertRaises(IndexError, subscript, a, (np.newaxis, 0))
self.assertRaises(IndexError, subscript, a, (np.newaxis,)*50)
def test_overlapping_assignment(self):
# With positive strides
a = np.arange(4)
a[:-1] = a[1:]
assert_equal(a, [1, 2, 3, 3])
a = np.arange(4)
a[1:] = a[:-1]
assert_equal(a, [0, 0, 1, 2])
# With positive and negative strides
a = np.arange(4)
a[:] = a[::-1]
assert_equal(a, [3, 2, 1, 0])
a = np.arange(6).reshape(2, 3)
a[::-1,:] = a[:, ::-1]
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
a = np.arange(6).reshape(2, 3)
a[::-1, ::-1] = a[:, ::-1]
assert_equal(a, [[3, 4, 5], [0, 1, 2]])
# With just one element overlapping
a = np.arange(5)
a[:3] = a[2:]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[2:] = a[:3]
assert_equal(a, [0, 1, 0, 1, 2])
a = np.arange(5)
a[2::-1] = a[2:]
assert_equal(a, [4, 3, 2, 3, 4])
a = np.arange(5)
a[2:] = a[2::-1]
assert_equal(a, [0, 1, 2, 1, 0])
a = np.arange(5)
a[2::-1] = a[:1:-1]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[:1:-1] = a[2::-1]
assert_equal(a, [0, 1, 0, 1, 2])
class TestCreation(TestCase):
def test_from_attribute(self):
class x(object):
def __array__(self, dtype=None):
pass
self.assertRaises(ValueError, np.array, x())
def test_from_string(self):
types = np.typecodes['AllInteger'] + np.typecodes['Float']
nstr = ['123', '123']
result = np.array([123, 123], dtype=int)
for type in types:
msg = 'String conversion for %s' % type
assert_equal(np.array(nstr, dtype=type), result, err_msg=msg)
def test_void(self):
arr = np.array([], dtype='V')
assert_equal(arr.dtype.kind, 'V')
def test_too_big_error(self):
# 45341 is the smallest integer greater than sqrt(2**31 - 1).
# 3037000500 is the smallest integer greater than sqrt(2**63 - 1).
# We want to make sure that the square byte array with those dimensions
# is too big on 32 or 64 bit systems respectively.
if np.iinfo('intp').max == 2**31 - 1:
shape = (46341, 46341)
elif np.iinfo('intp').max == 2**63 - 1:
shape = (3037000500, 3037000500)
else:
return
assert_raises(ValueError, np.empty, shape, dtype=np.int8)
assert_raises(ValueError, np.zeros, shape, dtype=np.int8)
assert_raises(ValueError, np.ones, shape, dtype=np.int8)
def test_zeros(self):
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((13,), dtype=dt)
assert_equal(np.count_nonzero(d), 0)
# true for ieee floats
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='4i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4, (2,4)i4')
assert_equal(np.count_nonzero(d), 0)
@dec.slow
def test_zeros_big(self):
# test big array as they might be allocated different by the system
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((30 * 1024**2,), dtype=dt)
assert_(not d.any())
# This test can fail on 32-bit systems due to insufficient
# contiguous memory. Deallocating the previous array increases the
# chance of success.
del(d)
def test_zeros_obj(self):
# test initialization from PyLong(0)
d = np.zeros((13,), dtype=object)
assert_array_equal(d, [0] * 13)
assert_equal(np.count_nonzero(d), 0)
def test_zeros_obj_obj(self):
d = np.zeros(10, dtype=[('k', object, 2)])
assert_array_equal(d['k'], 0)
def test_zeros_like_like_zeros(self):
# test zeros_like returns the same as zeros
for c in np.typecodes['All']:
if c == 'V':
continue
d = np.zeros((3,3), dtype=c)
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
# explicitly check some special cases
d = np.zeros((3,3), dtype='S5')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='U5')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='<i4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='>i4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='<M8[s]')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='>M8[s]')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='f4,f4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
def test_empty_unicode(self):
# don't throw decode errors on garbage memory
for i in range(5, 100, 5):
d = np.empty(i, dtype='U')
str(d)
def test_sequence_non_homogenous(self):
assert_equal(np.array([4, 2**80]).dtype, np.object)
assert_equal(np.array([4, 2**80, 4]).dtype, np.object)
assert_equal(np.array([2**80, 4]).dtype, np.object)
assert_equal(np.array([2**80] * 3).dtype, np.object)
assert_equal(np.array([[1, 1],[1j, 1j]]).dtype, np.complex)
assert_equal(np.array([[1j, 1j],[1, 1]]).dtype, np.complex)
assert_equal(np.array([[1, 1, 1],[1, 1j, 1.], [1, 1, 1]]).dtype, np.complex)
@dec.skipif(sys.version_info[0] >= 3)
def test_sequence_long(self):
assert_equal(np.array([long(4), long(4)]).dtype, np.long)
assert_equal(np.array([long(4), 2**80]).dtype, np.object)
assert_equal(np.array([long(4), 2**80, long(4)]).dtype, np.object)
assert_equal(np.array([2**80, long(4)]).dtype, np.object)
def test_non_sequence_sequence(self):
"""Should not segfault.
Class Fail breaks the sequence protocol for new style classes, i.e.,
those derived from object. Class Map is a mapping type indicated by
raising a ValueError. At some point we may raise a warning instead
of an error in the Fail case.
"""
class Fail(object):
def __len__(self):
return 1
def __getitem__(self, index):
raise ValueError()
class Map(object):
def __len__(self):
return 1
def __getitem__(self, index):
raise KeyError()
a = np.array([Map()])
assert_(a.shape == (1,))
assert_(a.dtype == np.dtype(object))
assert_raises(ValueError, np.array, [Fail()])
def test_no_len_object_type(self):
# gh-5100, want object array from iterable object without len()
class Point2:
def __init__(self):
pass
def __getitem__(self, ind):
if ind in [0, 1]:
return ind
else:
raise IndexError()
d = np.array([Point2(), Point2(), Point2()])
assert_equal(d.dtype, np.dtype(object))
def test_false_len_sequence(self):
# gh-7264, segfault for this example
class C:
def __getitem__(self, i):
raise IndexError
def __len__(self):
return 42
assert_raises(ValueError, np.array, C()) # segfault?
def test_failed_len_sequence(self):
# gh-7393
class A(object):
def __init__(self, data):
self._data = data
def __getitem__(self, item):
return type(self)(self._data[item])
def __len__(self):
return len(self._data)
# len(d) should give 3, but len(d[0]) will fail
d = A([1,2,3])
assert_equal(len(np.array(d)), 3)
def test_array_too_big(self):
# Test that array creation succeeds for arrays addressable by intp
# on the byte level and fails for too large arrays.
buf = np.zeros(100)
max_bytes = np.iinfo(np.intp).max
for dtype in ["intp", "S20", "b"]:
dtype = np.dtype(dtype)
itemsize = dtype.itemsize
np.ndarray(buffer=buf, strides=(0,),
shape=(max_bytes//itemsize,), dtype=dtype)
assert_raises(ValueError, np.ndarray, buffer=buf, strides=(0,),
shape=(max_bytes//itemsize + 1,), dtype=dtype)
class TestStructured(TestCase):
def test_subarray_field_access(self):
a = np.zeros((3, 5), dtype=[('a', ('i4', (2, 2)))])
a['a'] = np.arange(60).reshape(3, 5, 2, 2)
# Since the subarray is always in C-order, a transpose
# does not swap the subarray:
assert_array_equal(a.T['a'], a['a'].transpose(1, 0, 2, 3))
# In Fortran order, the subarray gets appended
# like in all other cases, not prepended as a special case
b = a.copy(order='F')
assert_equal(a['a'].shape, b['a'].shape)
assert_equal(a.T['a'].shape, a.T.copy()['a'].shape)
def test_subarray_comparison(self):
# Check that comparisons between record arrays with
# multi-dimensional field types work properly
a = np.rec.fromrecords(
[([1, 2, 3], 'a', [[1, 2], [3, 4]]), ([3, 3, 3], 'b', [[0, 0], [0, 0]])],
dtype=[('a', ('f4', 3)), ('b', np.object), ('c', ('i4', (2, 2)))])
b = a.copy()
assert_equal(a == b, [True, True])
assert_equal(a != b, [False, False])
b[1].b = 'c'
assert_equal(a == b, [True, False])
assert_equal(a != b, [False, True])
for i in range(3):
b[0].a = a[0].a
b[0].a[i] = 5
assert_equal(a == b, [False, False])
assert_equal(a != b, [True, True])
for i in range(2):
for j in range(2):
b = a.copy()
b[0].c[i, j] = 10
assert_equal(a == b, [False, True])
assert_equal(a != b, [True, False])
# Check that broadcasting with a subarray works
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8')])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8')])
assert_equal(a == b, [[True, True, False], [False, False, True]])
assert_equal(b == a, [[True, True, False], [False, False, True]])
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8', (1,))])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8', (1,))])
assert_equal(a == b, [[True, True, False], [False, False, True]])
assert_equal(b == a, [[True, True, False], [False, False, True]])
a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))])
b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
assert_equal(a == b, [[True, False, False], [False, False, True]])
assert_equal(b == a, [[True, False, False], [False, False, True]])
# Check that broadcasting Fortran-style arrays with a subarray work
a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))], order='F')
b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
assert_equal(a == b, [[True, False, False], [False, False, True]])
assert_equal(b == a, [[True, False, False], [False, False, True]])
# Check that incompatible sub-array shapes don't result to broadcasting
x = np.zeros((1,), dtype=[('a', ('f4', (1, 2))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
# This comparison invokes deprecated behaviour, and will probably
# start raising an error eventually. What we really care about in this
# test is just that it doesn't return True.
with suppress_warnings() as sup:
sup.filter(FutureWarning, "elementwise == comparison failed")
assert_equal(x == y, False)
x = np.zeros((1,), dtype=[('a', ('f4', (2, 1))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
# This comparison invokes deprecated behaviour, and will probably
# start raising an error eventually. What we really care about in this
# test is just that it doesn't return True.
with suppress_warnings() as sup:
sup.filter(FutureWarning, "elementwise == comparison failed")
assert_equal(x == y, False)
# Check that structured arrays that are different only in
# byte-order work
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i8'), ('b', '<f8')])
b = np.array([(5, 43), (10, 1)], dtype=[('a', '<i8'), ('b', '>f8')])
assert_equal(a == b, [False, True])
def test_casting(self):
# Check that casting a structured array to change its byte order
# works
a = np.array([(1,)], dtype=[('a', '<i4')])
assert_(np.can_cast(a.dtype, [('a', '>i4')], casting='unsafe'))
b = a.astype([('a', '>i4')])
assert_equal(b, a.byteswap().newbyteorder())
assert_equal(a['a'][0], b['a'][0])
# Check that equality comparison works on structured arrays if
# they are 'equiv'-castable
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i4'), ('b', '<f8')])
b = np.array([(42, 5), (1, 10)], dtype=[('b', '>f8'), ('a', '<i4')])
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
assert_equal(a == b, [True, True])
# Check that 'equiv' casting can reorder fields and change byte
# order
# New in 1.12: This behavior changes in 1.13, test for dep warning
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
with assert_warns(FutureWarning):
c = a.astype(b.dtype, casting='equiv')
assert_equal(a == c, [True, True])
# Check that 'safe' casting can change byte order and up-cast
# fields
t = [('a', '<i8'), ('b', '>f8')]
assert_(np.can_cast(a.dtype, t, casting='safe'))
c = a.astype(t, casting='safe')
assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
[True, True])
# Check that 'same_kind' casting can change byte order and
# change field widths within a "kind"
t = [('a', '<i4'), ('b', '>f4')]
assert_(np.can_cast(a.dtype, t, casting='same_kind'))
c = a.astype(t, casting='same_kind')
assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
[True, True])
# Check that casting fails if the casting rule should fail on
# any of the fields
t = [('a', '>i8'), ('b', '<f4')]
assert_(not np.can_cast(a.dtype, t, casting='safe'))
assert_raises(TypeError, a.astype, t, casting='safe')
t = [('a', '>i2'), ('b', '<f8')]
assert_(not np.can_cast(a.dtype, t, casting='equiv'))
assert_raises(TypeError, a.astype, t, casting='equiv')
t = [('a', '>i8'), ('b', '<i2')]
assert_(not np.can_cast(a.dtype, t, casting='same_kind'))
assert_raises(TypeError, a.astype, t, casting='same_kind')
assert_(not np.can_cast(a.dtype, b.dtype, casting='no'))
assert_raises(TypeError, a.astype, b.dtype, casting='no')
# Check that non-'unsafe' casting can't change the set of field names
for casting in ['no', 'safe', 'equiv', 'same_kind']:
t = [('a', '>i4')]
assert_(not np.can_cast(a.dtype, t, casting=casting))
t = [('a', '>i4'), ('b', '<f8'), ('c', 'i4')]
assert_(not np.can_cast(a.dtype, t, casting=casting))
def test_objview(self):
# https://github.com/numpy/numpy/issues/3286
a = np.array([], dtype=[('a', 'f'), ('b', 'f'), ('c', 'O')])
a[['a', 'b']] # TypeError?
# https://github.com/numpy/numpy/issues/3253
dat2 = np.zeros(3, [('A', 'i'), ('B', '|O')])
dat2[['B', 'A']] # TypeError?
def test_setfield(self):
# https://github.com/numpy/numpy/issues/3126
struct_dt = np.dtype([('elem', 'i4', 5),])
dt = np.dtype([('field', 'i4', 10),('struct', struct_dt)])
x = np.zeros(1, dt)
x[0]['field'] = np.ones(10, dtype='i4')
x[0]['struct'] = np.ones(1, dtype=struct_dt)
assert_equal(x[0]['field'], np.ones(10, dtype='i4'))
def test_setfield_object(self):
# make sure object field assignment with ndarray value
# on void scalar mimics setitem behavior
b = np.zeros(1, dtype=[('x', 'O')])
# next line should work identically to b['x'][0] = np.arange(3)
b[0]['x'] = np.arange(3)
assert_equal(b[0]['x'], np.arange(3))
# check that broadcasting check still works
c = np.zeros(1, dtype=[('x', 'O', 5)])
def testassign():
c[0]['x'] = np.arange(3)
assert_raises(ValueError, testassign)
def test_zero_width_string(self):
# Test for PR #6430 / issues #473, #4955, #2585
dt = np.dtype([('I', int), ('S', 'S0')])
x = np.zeros(4, dtype=dt)
assert_equal(x['S'], [b'', b'', b'', b''])
assert_equal(x['S'].itemsize, 0)
x['S'] = ['a', 'b', 'c', 'd']
assert_equal(x['S'], [b'', b'', b'', b''])
assert_equal(x['I'], [0, 0, 0, 0])
# Variation on test case from #4955
x['S'][x['I'] == 0] = 'hello'
assert_equal(x['S'], [b'', b'', b'', b''])
assert_equal(x['I'], [0, 0, 0, 0])
# Variation on test case from #2585
x['S'] = 'A'
assert_equal(x['S'], [b'', b'', b'', b''])
assert_equal(x['I'], [0, 0, 0, 0])
# Allow zero-width dtypes in ndarray constructor
y = np.ndarray(4, dtype=x['S'].dtype)
assert_equal(y.itemsize, 0)
assert_equal(x['S'], y)
# More tests for indexing an array with zero-width fields
assert_equal(np.zeros(4, dtype=[('a', 'S0,S0'),
('b', 'u1')])['a'].itemsize, 0)
assert_equal(np.empty(3, dtype='S0,S0').itemsize, 0)
assert_equal(np.zeros(4, dtype='S0,u1')['f0'].itemsize, 0)
xx = x['S'].reshape((2, 2))
assert_equal(xx.itemsize, 0)
assert_equal(xx, [[b'', b''], [b'', b'']])
b = io.BytesIO()
np.save(b, xx)
b.seek(0)
yy = np.load(b)
assert_equal(yy.itemsize, 0)
assert_equal(xx, yy)
with temppath(suffix='.npy') as tmp:
np.save(tmp, xx)
yy = np.load(tmp)
assert_equal(yy.itemsize, 0)
assert_equal(xx, yy)
def test_base_attr(self):
a = np.zeros(3, dtype='i4,f4')
b = a[0]
assert_(b.base is a)
class TestBool(TestCase):
def test_test_interning(self):
a0 = np.bool_(0)
b0 = np.bool_(False)
self.assertTrue(a0 is b0)
a1 = np.bool_(1)
b1 = np.bool_(True)
self.assertTrue(a1 is b1)
self.assertTrue(np.array([True])[0] is a1)
self.assertTrue(np.array(True)[()] is a1)
def test_sum(self):
d = np.ones(101, dtype=np.bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
d = np.frombuffer(b'\xff\xff' * 100, dtype=bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
def check_count_nonzero(self, power, length):
powers = [2 ** i for i in range(length)]
for i in range(2**power):
l = [(i & x) != 0 for x in powers]
a = np.array(l, dtype=np.bool)
c = builtins.sum(l)
self.assertEqual(np.count_nonzero(a), c)
av = a.view(np.uint8)
av *= 3
self.assertEqual(np.count_nonzero(a), c)
av *= 4
self.assertEqual(np.count_nonzero(a), c)
av[av != 0] = 0xFF
self.assertEqual(np.count_nonzero(a), c)
def test_count_nonzero(self):
# check all 12 bit combinations in a length 17 array
# covers most cases of the 16 byte unrolled code
self.check_count_nonzero(12, 17)
@dec.slow
def test_count_nonzero_all(self):
# check all combinations in a length 17 array
# covers all cases of the 16 byte unrolled code
self.check_count_nonzero(17, 17)
def test_count_nonzero_unaligned(self):
# prevent mistakes as e.g. gh-4060
for o in range(7):
a = np.zeros((18,), dtype=np.bool)[o+1:]
a[:o] = True
self.assertEqual(np.count_nonzero(a), builtins.sum(a.tolist()))
a = np.ones((18,), dtype=np.bool)[o+1:]
a[:o] = False
self.assertEqual(np.count_nonzero(a), builtins.sum(a.tolist()))
class TestMethods(TestCase):
def test_compress(self):
tgt = [[5, 6, 7, 8, 9]]
arr = np.arange(10).reshape(2, 5)
out = arr.compress([0, 1], axis=0)
assert_equal(out, tgt)
tgt = [[1, 3], [6, 8]]
out = arr.compress([0, 1, 0, 1, 0], axis=1)
assert_equal(out, tgt)
tgt = [[1], [6]]
arr = np.arange(10).reshape(2, 5)
out = arr.compress([0, 1], axis=1)
assert_equal(out, tgt)
arr = np.arange(10).reshape(2, 5)
out = arr.compress([0, 1])
assert_equal(out, 1)
def test_choose(self):
x = 2*np.ones((3,), dtype=int)
y = 3*np.ones((3,), dtype=int)
x2 = 2*np.ones((2, 3), dtype=int)
y2 = 3*np.ones((2, 3), dtype=int)
ind = np.array([0, 0, 1])
A = ind.choose((x, y))
assert_equal(A, [2, 2, 3])
A = ind.choose((x2, y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
A = ind.choose((x, y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
def test_prod(self):
ba = [1, 2, 10, 11, 6, 5, 4]
ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]]
for ctype in [np.int16, np.uint16, np.int32, np.uint32,
np.float32, np.float64, np.complex64, np.complex128]:
a = np.array(ba, ctype)
a2 = np.array(ba2, ctype)
if ctype in ['1', 'b']:
self.assertRaises(ArithmeticError, a.prod)
self.assertRaises(ArithmeticError, a2.prod, axis=1)
else:
assert_equal(a.prod(axis=0), 26400)
assert_array_equal(a2.prod(axis=0),
np.array([50, 36, 84, 180], ctype))
assert_array_equal(a2.prod(axis=-1),
np.array([24, 1890, 600], ctype))
def test_repeat(self):
m = np.array([1, 2, 3, 4, 5, 6])
m_rect = m.reshape((2, 3))
A = m.repeat([1, 3, 2, 1, 1, 2])
assert_equal(A, [1, 2, 2, 2, 3,
3, 4, 5, 6, 6])
A = m.repeat(2)
assert_equal(A, [1, 1, 2, 2, 3, 3,
4, 4, 5, 5, 6, 6])
A = m_rect.repeat([2, 1], axis=0)
assert_equal(A, [[1, 2, 3],
[1, 2, 3],
[4, 5, 6]])
A = m_rect.repeat([1, 3, 2], axis=1)
assert_equal(A, [[1, 2, 2, 2, 3, 3],
[4, 5, 5, 5, 6, 6]])
A = m_rect.repeat(2, axis=0)
assert_equal(A, [[1, 2, 3],
[1, 2, 3],
[4, 5, 6],
[4, 5, 6]])
A = m_rect.repeat(2, axis=1)
assert_equal(A, [[1, 1, 2, 2, 3, 3],
[4, 4, 5, 5, 6, 6]])
def test_reshape(self):
arr = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])
tgt = [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]
assert_equal(arr.reshape(2, 6), tgt)
tgt = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]
assert_equal(arr.reshape(3, 4), tgt)
tgt = [[1, 10, 8, 6], [4, 2, 11, 9], [7, 5, 3, 12]]
assert_equal(arr.reshape((3, 4), order='F'), tgt)
tgt = [[1, 4, 7, 10], [2, 5, 8, 11], [3, 6, 9, 12]]
assert_equal(arr.T.reshape((3, 4), order='C'), tgt)
def test_round(self):
def check_round(arr, expected, *round_args):
assert_equal(arr.round(*round_args), expected)
# With output array
out = np.zeros_like(arr)
res = arr.round(*round_args, out=out)
assert_equal(out, expected)
assert_equal(out, res)
check_round(np.array([1.2, 1.5]), [1, 2])
check_round(np.array(1.5), 2)
check_round(np.array([12.2, 15.5]), [10, 20], -1)
check_round(np.array([12.15, 15.51]), [12.2, 15.5], 1)
# Complex rounding
check_round(np.array([4.5 + 1.5j]), [4 + 2j])
check_round(np.array([12.5 + 15.5j]), [10 + 20j], -1)
def test_squeeze(self):
a = np.array([[[1], [2], [3]]])
assert_equal(a.squeeze(), [1, 2, 3])
assert_equal(a.squeeze(axis=(0,)), [[1], [2], [3]])
assert_raises(ValueError, a.squeeze, axis=(1,))
assert_equal(a.squeeze(axis=(2,)), [[1, 2, 3]])
def test_transpose(self):
a = np.array([[1, 2], [3, 4]])
assert_equal(a.transpose(), [[1, 3], [2, 4]])
self.assertRaises(ValueError, lambda: a.transpose(0))
self.assertRaises(ValueError, lambda: a.transpose(0, 0))
self.assertRaises(ValueError, lambda: a.transpose(0, 1, 2))
def test_sort(self):
# test ordering for floats and complex containing nans. It is only
# necessary to check the less-than comparison, so sorts that
# only follow the insertion sort path are sufficient. We only
# test doubles and complex doubles as the logic is the same.
# check doubles
msg = "Test real sort order with nans"
a = np.array([np.nan, 1, 0])
b = np.sort(a)
assert_equal(b, a[::-1], msg)
# check complex
msg = "Test complex sort order with nans"
a = np.zeros(9, dtype=np.complex128)
a.real += [np.nan, np.nan, np.nan, 1, 0, 1, 1, 0, 0]
a.imag += [np.nan, 1, 0, np.nan, np.nan, 1, 0, 1, 0]
b = np.sort(a)
assert_equal(b, a[::-1], msg)
# all c scalar sorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
a = np.arange(101)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "scalar sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test complex sorts. These use the same code as the scalars
# but the compare function differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in ['q', 'm', 'h']:
msg = "complex sort, real part == 1, kind=%s" % kind
c = ai.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
c = bi.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
ai = a + 1j
bi = b + 1j
for kind in ['q', 'm', 'h']:
msg = "complex sort, imag part == 1, kind=%s" % kind
c = ai.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
c = bi.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
# test sorting of complex arrays requiring byte-swapping, gh-5441
for endianess in '<>':
for dt in np.typecodes['Complex']:
arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianess + dt)
c = arr.copy()
c.sort()
msg = 'byte-swapped complex sort, dtype={0}'.format(dt)
assert_equal(c, arr, msg)
# test string sorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "string sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test unicode sorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "unicode sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test object array sorts.
a = np.empty((101,), dtype=np.object)
a[:] = list(range(101))
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "object sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test record array sorts.
dt = np.dtype([('f', float), ('i', int)])
a = np.array([(i, i) for i in range(101)], dtype=dt)
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "object sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test datetime64 sorts.
a = np.arange(0, 101, dtype='datetime64[D]')
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "datetime64 sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test timedelta64 sorts.
a = np.arange(0, 101, dtype='timedelta64[D]')
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "timedelta64 sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# check axis handling. This should be the same for all type
# specific sorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
b = np.array([[1, 0], [3, 2]])
c = np.array([[2, 3], [0, 1]])
d = a.copy()
d.sort(axis=0)
assert_equal(d, b, "test sort with axis=0")
d = a.copy()
d.sort(axis=1)
assert_equal(d, c, "test sort with axis=1")
d = a.copy()
d.sort()
assert_equal(d, c, "test sort with default axis")
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array sort with axis={0}'.format(axis)
assert_equal(np.sort(a, axis=axis), a, msg)
msg = 'test empty array sort with axis=None'
assert_equal(np.sort(a, axis=None), a.ravel(), msg)
# test generic class with bogus ordering,
# should not segfault.
class Boom(object):
def __lt__(self, other):
return True
a = np.array([Boom()]*100, dtype=object)
for kind in ['q', 'm', 'h']:
msg = "bogus comparison object sort, kind=%s" % kind
c.sort(kind=kind)
def test_sort_degraded(self):
# test degraded dataset would take minutes to run with normal qsort
d = np.arange(1000000)
do = d.copy()
x = d
# create a median of 3 killer where each median is the sorted second
# last element of the quicksort partition
while x.size > 3:
mid = x.size // 2
x[mid], x[-2] = x[-2], x[mid]
x = x[:-2]
assert_equal(np.sort(d), do)
assert_equal(d[np.argsort(d)], do)
def test_copy(self):
def assert_fortran(arr):
assert_(arr.flags.fortran)
assert_(arr.flags.f_contiguous)
assert_(not arr.flags.c_contiguous)
def assert_c(arr):
assert_(not arr.flags.fortran)
assert_(not arr.flags.f_contiguous)
assert_(arr.flags.c_contiguous)
a = np.empty((2, 2), order='F')
# Test copying a Fortran array
assert_c(a.copy())
assert_c(a.copy('C'))
assert_fortran(a.copy('F'))
assert_fortran(a.copy('A'))
# Now test starting with a C array.
a = np.empty((2, 2), order='C')
assert_c(a.copy())
assert_c(a.copy('C'))
assert_fortran(a.copy('F'))
assert_c(a.copy('A'))
def test_sort_order(self):
# Test sorting an array with fields
x1 = np.array([21, 32, 14])
x2 = np.array(['my', 'first', 'name'])
x3 = np.array([3.1, 4.5, 6.2])
r = np.rec.fromarrays([x1, x2, x3], names='id,word,number')
r.sort(order=['id'])
assert_equal(r.id, np.array([14, 21, 32]))
assert_equal(r.word, np.array(['name', 'my', 'first']))
assert_equal(r.number, np.array([6.2, 3.1, 4.5]))
r.sort(order=['word'])
assert_equal(r.id, np.array([32, 21, 14]))
assert_equal(r.word, np.array(['first', 'my', 'name']))
assert_equal(r.number, np.array([4.5, 3.1, 6.2]))
r.sort(order=['number'])
assert_equal(r.id, np.array([21, 32, 14]))
assert_equal(r.word, np.array(['my', 'first', 'name']))
assert_equal(r.number, np.array([3.1, 4.5, 6.2]))
if sys.byteorder == 'little':
strtype = '>i2'
else:
strtype = '<i2'
mydtype = [('name', strchar + '5'), ('col2', strtype)]
r = np.array([('a', 1), ('b', 255), ('c', 3), ('d', 258)],
dtype=mydtype)
r.sort(order='col2')
assert_equal(r['col2'], [1, 3, 255, 258])
assert_equal(r, np.array([('a', 1), ('c', 3), ('b', 255), ('d', 258)],
dtype=mydtype))
def test_argsort(self):
# all c scalar argsorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
a = np.arange(101)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "scalar argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), a, msg)
assert_equal(b.copy().argsort(kind=kind), b, msg)
# test complex argsorts. These use the same code as the scalars
# but the compare function differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in ['q', 'm', 'h']:
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
ai = a + 1j
bi = b + 1j
for kind in ['q', 'm', 'h']:
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
# test argsort of complex arrays requiring byte-swapping, gh-5441
for endianess in '<>':
for dt in np.typecodes['Complex']:
arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianess + dt)
msg = 'byte-swapped complex argsort, dtype={0}'.format(dt)
assert_equal(arr.argsort(),
np.arange(len(arr), dtype=np.intp), msg)
# test string argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "string argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test unicode argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "unicode argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test object array argsorts.
a = np.empty((101,), dtype=np.object)
a[:] = list(range(101))
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "object argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test structured array argsorts.
dt = np.dtype([('f', float), ('i', int)])
a = np.array([(i, i) for i in range(101)], dtype=dt)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "structured array argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test datetime64 argsorts.
a = np.arange(0, 101, dtype='datetime64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm']:
msg = "datetime64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test timedelta64 argsorts.
a = np.arange(0, 101, dtype='timedelta64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm']:
msg = "timedelta64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# check axis handling. This should be the same for all type
# specific argsorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
b = np.array([[1, 1], [0, 0]])
c = np.array([[1, 0], [1, 0]])
assert_equal(a.copy().argsort(axis=0), b)
assert_equal(a.copy().argsort(axis=1), c)
assert_equal(a.copy().argsort(), c)
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array argsort with axis={0}'.format(axis)
assert_equal(np.argsort(a, axis=axis),
np.zeros_like(a, dtype=np.intp), msg)
msg = 'test empty array argsort with axis=None'
assert_equal(np.argsort(a, axis=None),
np.zeros_like(a.ravel(), dtype=np.intp), msg)
# check that stable argsorts are stable
r = np.arange(100)
# scalars
a = np.zeros(100)
assert_equal(a.argsort(kind='m'), r)
# complex
a = np.zeros(100, dtype=np.complex)
assert_equal(a.argsort(kind='m'), r)
# string
a = np.array(['aaaaaaaaa' for i in range(100)])
assert_equal(a.argsort(kind='m'), r)
# unicode
a = np.array(['aaaaaaaaa' for i in range(100)], dtype=np.unicode)
assert_equal(a.argsort(kind='m'), r)
def test_sort_unicode_kind(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.sort, kind=k)
assert_raises(ValueError, d.argsort, kind=k)
def test_searchsorted(self):
# test for floats and complex containing nans. The logic is the
# same for all float types so only test double types for now.
# The search sorted routines use the compare functions for the
# array type, so this checks if that is consistent with the sort
# order.
# check double
a = np.array([0, 1, np.nan])
msg = "Test real searchsorted with nans, side='l'"
b = a.searchsorted(a, side='l')
assert_equal(b, np.arange(3), msg)
msg = "Test real searchsorted with nans, side='r'"
b = a.searchsorted(a, side='r')
assert_equal(b, np.arange(1, 4), msg)
# check double complex
a = np.zeros(9, dtype=np.complex128)
a.real += [0, 0, 1, 1, 0, 1, np.nan, np.nan, np.nan]
a.imag += [0, 1, 0, 1, np.nan, np.nan, 0, 1, np.nan]
msg = "Test complex searchsorted with nans, side='l'"
b = a.searchsorted(a, side='l')
assert_equal(b, np.arange(9), msg)
msg = "Test complex searchsorted with nans, side='r'"
b = a.searchsorted(a, side='r')
assert_equal(b, np.arange(1, 10), msg)
msg = "Test searchsorted with little endian, side='l'"
a = np.array([0, 128], dtype='<i4')
b = a.searchsorted(np.array(128, dtype='<i4'))
assert_equal(b, 1, msg)
msg = "Test searchsorted with big endian, side='l'"
a = np.array([0, 128], dtype='>i4')
b = a.searchsorted(np.array(128, dtype='>i4'))
assert_equal(b, 1, msg)
# Check 0 elements
a = np.ones(0)
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 0])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 0, 0])
a = np.ones(1)
# Check 1 element
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 1])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 1, 1])
# Check all elements equal
a = np.ones(2)
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 2])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 2, 2])
# Test searching unaligned array
a = np.arange(10)
aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
unaligned = aligned[1:].view(a.dtype)
unaligned[:] = a
# Test searching unaligned array
b = unaligned.searchsorted(a, 'l')
assert_equal(b, a)
b = unaligned.searchsorted(a, 'r')
assert_equal(b, a + 1)
# Test searching for unaligned keys
b = a.searchsorted(unaligned, 'l')
assert_equal(b, a)
b = a.searchsorted(unaligned, 'r')
assert_equal(b, a + 1)
# Test smart resetting of binsearch indices
a = np.arange(5)
b = a.searchsorted([6, 5, 4], 'l')
assert_equal(b, [5, 5, 4])
b = a.searchsorted([6, 5, 4], 'r')
assert_equal(b, [5, 5, 5])
# Test all type specific binary search functions
types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
np.typecodes['Datetime'], '?O'))
for dt in types:
if dt == 'M':
dt = 'M8[D]'
if dt == '?':
a = np.arange(2, dtype=dt)
out = np.arange(2)
else:
a = np.arange(0, 5, dtype=dt)
out = np.arange(5)
b = a.searchsorted(a, 'l')
assert_equal(b, out)
b = a.searchsorted(a, 'r')
assert_equal(b, out + 1)
def test_searchsorted_unicode(self):
# Test searchsorted on unicode strings.
# 1.6.1 contained a string length miscalculation in
# arraytypes.c.src:UNICODE_compare() which manifested as
# incorrect/inconsistent results from searchsorted.
a = np.array(['P:\\20x_dapi_cy3\\20x_dapi_cy3_20100185_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100186_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100187_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100189_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100190_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100191_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100192_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100193_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100194_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100195_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100196_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100197_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100198_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100199_1'],
dtype=np.unicode)
ind = np.arange(len(a))
assert_equal([a.searchsorted(v, 'left') for v in a], ind)
assert_equal([a.searchsorted(v, 'right') for v in a], ind + 1)
assert_equal([a.searchsorted(a[i], 'left') for i in ind], ind)
assert_equal([a.searchsorted(a[i], 'right') for i in ind], ind + 1)
def test_searchsorted_with_sorter(self):
a = np.array([5, 2, 1, 3, 4])
s = np.argsort(a)
assert_raises(TypeError, np.searchsorted, a, 0, sorter=(1, (2, 3)))
assert_raises(TypeError, np.searchsorted, a, 0, sorter=[1.1])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4, 5, 6])
# bounds check
assert_raises(ValueError, np.searchsorted, a, 4, sorter=[0, 1, 2, 3, 5])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[-1, 0, 1, 2, 3])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[4, 0, -1, 2, 3])
a = np.random.rand(300)
s = a.argsort()
b = np.sort(a)
k = np.linspace(0, 1, 20)
assert_equal(b.searchsorted(k), a.searchsorted(k, sorter=s))
a = np.array([0, 1, 2, 3, 5]*20)
s = a.argsort()
k = [0, 1, 2, 3, 5]
expected = [0, 20, 40, 60, 80]
assert_equal(a.searchsorted(k, side='l', sorter=s), expected)
expected = [20, 40, 60, 80, 100]
assert_equal(a.searchsorted(k, side='r', sorter=s), expected)
# Test searching unaligned array
keys = np.arange(10)
a = keys.copy()
np.random.shuffle(s)
s = a.argsort()
aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
unaligned = aligned[1:].view(a.dtype)
# Test searching unaligned array
unaligned[:] = a
b = unaligned.searchsorted(keys, 'l', s)
assert_equal(b, keys)
b = unaligned.searchsorted(keys, 'r', s)
assert_equal(b, keys + 1)
# Test searching for unaligned keys
unaligned[:] = keys
b = a.searchsorted(unaligned, 'l', s)
assert_equal(b, keys)
b = a.searchsorted(unaligned, 'r', s)
assert_equal(b, keys + 1)
# Test all type specific indirect binary search functions
types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
np.typecodes['Datetime'], '?O'))
for dt in types:
if dt == 'M':
dt = 'M8[D]'
if dt == '?':
a = np.array([1, 0], dtype=dt)
# We want the sorter array to be of a type that is different
# from np.intp in all platforms, to check for #4698
s = np.array([1, 0], dtype=np.int16)
out = np.array([1, 0])
else:
a = np.array([3, 4, 1, 2, 0], dtype=dt)
# We want the sorter array to be of a type that is different
# from np.intp in all platforms, to check for #4698
s = np.array([4, 2, 3, 0, 1], dtype=np.int16)
out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
b = a.searchsorted(a, 'l', s)
assert_equal(b, out)
b = a.searchsorted(a, 'r', s)
assert_equal(b, out + 1)
# Test non-contiguous sorter array
a = np.array([3, 4, 1, 2, 0])
srt = np.empty((10,), dtype=np.intp)
srt[1::2] = -1
srt[::2] = [4, 2, 3, 0, 1]
s = srt[::2]
out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
b = a.searchsorted(a, 'l', s)
assert_equal(b, out)
b = a.searchsorted(a, 'r', s)
assert_equal(b, out + 1)
def test_searchsorted_return_type(self):
# Functions returning indices should always return base ndarrays
class A(np.ndarray):
pass
a = np.arange(5).view(A)
b = np.arange(1, 3).view(A)
s = np.arange(5).view(A)
assert_(not isinstance(a.searchsorted(b, 'l'), A))
assert_(not isinstance(a.searchsorted(b, 'r'), A))
assert_(not isinstance(a.searchsorted(b, 'l', s), A))
assert_(not isinstance(a.searchsorted(b, 'r', s), A))
def test_argpartition_out_of_range(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(ValueError, d.argpartition, 10)
assert_raises(ValueError, d.argpartition, -11)
# Test also for generic type argpartition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(ValueError, d_obj.argpartition, 10)
assert_raises(ValueError, d_obj.argpartition, -11)
def test_partition_out_of_range(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(ValueError, d.partition, 10)
assert_raises(ValueError, d.partition, -11)
# Test also for generic type partition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(ValueError, d_obj.partition, 10)
assert_raises(ValueError, d_obj.partition, -11)
def test_partition_empty_array(self):
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array partition with axis={0}'.format(axis)
assert_equal(np.partition(a, 0, axis=axis), a, msg)
msg = 'test empty array partition with axis=None'
assert_equal(np.partition(a, 0, axis=None), a.ravel(), msg)
def test_argpartition_empty_array(self):
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array argpartition with axis={0}'.format(axis)
assert_equal(np.partition(a, 0, axis=axis),
np.zeros_like(a, dtype=np.intp), msg)
msg = 'test empty array argpartition with axis=None'
assert_equal(np.partition(a, 0, axis=None),
np.zeros_like(a.ravel(), dtype=np.intp), msg)
def test_partition(self):
d = np.arange(10)
assert_raises(TypeError, np.partition, d, 2, kind=1)
assert_raises(ValueError, np.partition, d, 2, kind="nonsense")
assert_raises(ValueError, np.argpartition, d, 2, kind="nonsense")
assert_raises(ValueError, d.partition, 2, axis=0, kind="nonsense")
assert_raises(ValueError, d.argpartition, 2, axis=0, kind="nonsense")
for k in ("introselect",):
d = np.array([])
assert_array_equal(np.partition(d, 0, kind=k), d)
assert_array_equal(np.argpartition(d, 0, kind=k), d)
d = np.ones(1)
assert_array_equal(np.partition(d, 0, kind=k)[0], d)
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
# kth not modified
kth = np.array([30, 15, 5])
okth = kth.copy()
np.partition(np.arange(40), kth)
assert_array_equal(kth, okth)
for r in ([2, 1], [1, 2], [1, 1]):
d = np.array(r)
tgt = np.sort(d)
assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
assert_array_equal(d[np.argpartition(d, 1, kind=k)],
np.partition(d, 1, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
for r in ([3, 2, 1], [1, 2, 3], [2, 1, 3], [2, 3, 1],
[1, 1, 1], [1, 2, 2], [2, 2, 1], [1, 2, 1]):
d = np.array(r)
tgt = np.sort(d)
assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
assert_array_equal(np.partition(d, 2, kind=k)[2], tgt[2])
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
assert_array_equal(d[np.argpartition(d, 1, kind=k)],
np.partition(d, 1, kind=k))
assert_array_equal(d[np.argpartition(d, 2, kind=k)],
np.partition(d, 2, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
d = np.ones(50)
assert_array_equal(np.partition(d, 0, kind=k), d)
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
# sorted
d = np.arange(49)
self.assertEqual(np.partition(d, 5, kind=k)[5], 5)
self.assertEqual(np.partition(d, 15, kind=k)[15], 15)
assert_array_equal(d[np.argpartition(d, 5, kind=k)],
np.partition(d, 5, kind=k))
assert_array_equal(d[np.argpartition(d, 15, kind=k)],
np.partition(d, 15, kind=k))
# rsorted
d = np.arange(47)[::-1]
self.assertEqual(np.partition(d, 6, kind=k)[6], 6)
self.assertEqual(np.partition(d, 16, kind=k)[16], 16)
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
np.partition(d, 16, kind=k))
assert_array_equal(np.partition(d, -6, kind=k),
np.partition(d, 41, kind=k))
assert_array_equal(np.partition(d, -16, kind=k),
np.partition(d, 31, kind=k))
assert_array_equal(d[np.argpartition(d, -6, kind=k)],
np.partition(d, 41, kind=k))
# median of 3 killer, O(n^2) on pure median 3 pivot quickselect
# exercises the median of median of 5 code used to keep O(n)
d = np.arange(1000000)
x = np.roll(d, d.size // 2)
mid = x.size // 2 + 1
assert_equal(np.partition(x, mid)[mid], mid)
d = np.arange(1000001)
x = np.roll(d, d.size // 2 + 1)
mid = x.size // 2 + 1
assert_equal(np.partition(x, mid)[mid], mid)
# max
d = np.ones(10)
d[1] = 4
assert_equal(np.partition(d, (2, -1))[-1], 4)
assert_equal(np.partition(d, (2, -1))[2], 1)
assert_equal(d[np.argpartition(d, (2, -1))][-1], 4)
assert_equal(d[np.argpartition(d, (2, -1))][2], 1)
d[1] = np.nan
assert_(np.isnan(d[np.argpartition(d, (2, -1))][-1]))
assert_(np.isnan(np.partition(d, (2, -1))[-1]))
# equal elements
d = np.arange(47) % 7
tgt = np.sort(np.arange(47) % 7)
np.random.shuffle(d)
for i in range(d.size):
self.assertEqual(np.partition(d, i, kind=k)[i], tgt[i])
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
np.partition(d, 16, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
d = np.array([0, 1, 2, 3, 4, 5, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 9])
kth = [0, 3, 19, 20]
assert_equal(np.partition(d, kth, kind=k)[kth], (0, 3, 7, 7))
assert_equal(d[np.argpartition(d, kth, kind=k)][kth], (0, 3, 7, 7))
d = np.array([2, 1])
d.partition(0, kind=k)
assert_raises(ValueError, d.partition, 2)
assert_raises(ValueError, d.partition, 3, axis=1)
assert_raises(ValueError, np.partition, d, 2)
assert_raises(ValueError, np.partition, d, 2, axis=1)
assert_raises(ValueError, d.argpartition, 2)
assert_raises(ValueError, d.argpartition, 3, axis=1)
assert_raises(ValueError, np.argpartition, d, 2)
assert_raises(ValueError, np.argpartition, d, 2, axis=1)
d = np.arange(10).reshape((2, 5))
d.partition(1, axis=0, kind=k)
d.partition(4, axis=1, kind=k)
np.partition(d, 1, axis=0, kind=k)
np.partition(d, 4, axis=1, kind=k)
np.partition(d, 1, axis=None, kind=k)
np.partition(d, 9, axis=None, kind=k)
d.argpartition(1, axis=0, kind=k)
d.argpartition(4, axis=1, kind=k)
np.argpartition(d, 1, axis=0, kind=k)
np.argpartition(d, 4, axis=1, kind=k)
np.argpartition(d, 1, axis=None, kind=k)
np.argpartition(d, 9, axis=None, kind=k)
assert_raises(ValueError, d.partition, 2, axis=0)
assert_raises(ValueError, d.partition, 11, axis=1)
assert_raises(TypeError, d.partition, 2, axis=None)
assert_raises(ValueError, np.partition, d, 9, axis=1)
assert_raises(ValueError, np.partition, d, 11, axis=None)
assert_raises(ValueError, d.argpartition, 2, axis=0)
assert_raises(ValueError, d.argpartition, 11, axis=1)
assert_raises(ValueError, np.argpartition, d, 9, axis=1)
assert_raises(ValueError, np.argpartition, d, 11, axis=None)
td = [(dt, s) for dt in [np.int32, np.float32, np.complex64]
for s in (9, 16)]
for dt, s in td:
aae = assert_array_equal
at = self.assertTrue
d = np.arange(s, dtype=dt)
np.random.shuffle(d)
d1 = np.tile(np.arange(s, dtype=dt), (4, 1))
map(np.random.shuffle, d1)
d0 = np.transpose(d1)
for i in range(d.size):
p = np.partition(d, i, kind=k)
self.assertEqual(p[i], i)
# all before are smaller
assert_array_less(p[:i], p[i])
# all after are larger
assert_array_less(p[i], p[i + 1:])
aae(p, d[np.argpartition(d, i, kind=k)])
p = np.partition(d1, i, axis=1, kind=k)
aae(p[:, i], np.array([i] * d1.shape[0], dtype=dt))
# array_less does not seem to work right
at((p[:, :i].T <= p[:, i]).all(),
msg="%d: %r <= %r" % (i, p[:, i], p[:, :i].T))
at((p[:, i + 1:].T > p[:, i]).all(),
msg="%d: %r < %r" % (i, p[:, i], p[:, i + 1:].T))
aae(p, d1[np.arange(d1.shape[0])[:, None],
np.argpartition(d1, i, axis=1, kind=k)])
p = np.partition(d0, i, axis=0, kind=k)
aae(p[i, :], np.array([i] * d1.shape[0], dtype=dt))
# array_less does not seem to work right
at((p[:i, :] <= p[i, :]).all(),
msg="%d: %r <= %r" % (i, p[i, :], p[:i, :]))
at((p[i + 1:, :] > p[i, :]).all(),
msg="%d: %r < %r" % (i, p[i, :], p[:, i + 1:]))
aae(p, d0[np.argpartition(d0, i, axis=0, kind=k),
np.arange(d0.shape[1])[None, :]])
# check inplace
dc = d.copy()
dc.partition(i, kind=k)
assert_equal(dc, np.partition(d, i, kind=k))
dc = d0.copy()
dc.partition(i, axis=0, kind=k)
assert_equal(dc, np.partition(d0, i, axis=0, kind=k))
dc = d1.copy()
dc.partition(i, axis=1, kind=k)
assert_equal(dc, np.partition(d1, i, axis=1, kind=k))
def assert_partitioned(self, d, kth):
prev = 0
for k in np.sort(kth):
assert_array_less(d[prev:k], d[k], err_msg='kth %d' % k)
assert_((d[k:] >= d[k]).all(),
msg="kth %d, %r not greater equal %d" % (k, d[k:], d[k]))
prev = k + 1
def test_partition_iterative(self):
d = np.arange(17)
kth = (0, 1, 2, 429, 231)
assert_raises(ValueError, d.partition, kth)
assert_raises(ValueError, d.argpartition, kth)
d = np.arange(10).reshape((2, 5))
assert_raises(ValueError, d.partition, kth, axis=0)
assert_raises(ValueError, d.partition, kth, axis=1)
assert_raises(ValueError, np.partition, d, kth, axis=1)
assert_raises(ValueError, np.partition, d, kth, axis=None)
d = np.array([3, 4, 2, 1])
p = np.partition(d, (0, 3))
self.assert_partitioned(p, (0, 3))
self.assert_partitioned(d[np.argpartition(d, (0, 3))], (0, 3))
assert_array_equal(p, np.partition(d, (-3, -1)))
assert_array_equal(p, d[np.argpartition(d, (-3, -1))])
d = np.arange(17)
np.random.shuffle(d)
d.partition(range(d.size))
assert_array_equal(np.arange(17), d)
np.random.shuffle(d)
assert_array_equal(np.arange(17), d[d.argpartition(range(d.size))])
# test unsorted kth
d = np.arange(17)
np.random.shuffle(d)
keys = np.array([1, 3, 8, -2])
np.random.shuffle(d)
p = np.partition(d, keys)
self.assert_partitioned(p, keys)
p = d[np.argpartition(d, keys)]
self.assert_partitioned(p, keys)
np.random.shuffle(keys)
assert_array_equal(np.partition(d, keys), p)
assert_array_equal(d[np.argpartition(d, keys)], p)
# equal kth
d = np.arange(20)[::-1]
self.assert_partitioned(np.partition(d, [5]*4), [5])
self.assert_partitioned(np.partition(d, [5]*4 + [6, 13]),
[5]*4 + [6, 13])
self.assert_partitioned(d[np.argpartition(d, [5]*4)], [5])
self.assert_partitioned(d[np.argpartition(d, [5]*4 + [6, 13])],
[5]*4 + [6, 13])
d = np.arange(12)
np.random.shuffle(d)
d1 = np.tile(np.arange(12), (4, 1))
map(np.random.shuffle, d1)
d0 = np.transpose(d1)
kth = (1, 6, 7, -1)
p = np.partition(d1, kth, axis=1)
pa = d1[np.arange(d1.shape[0])[:, None],
d1.argpartition(kth, axis=1)]
assert_array_equal(p, pa)
for i in range(d1.shape[0]):
self.assert_partitioned(p[i,:], kth)
p = np.partition(d0, kth, axis=0)
pa = d0[np.argpartition(d0, kth, axis=0),
np.arange(d0.shape[1])[None,:]]
assert_array_equal(p, pa)
for i in range(d0.shape[1]):
self.assert_partitioned(p[:, i], kth)
def test_partition_cdtype(self):
d = np.array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),
('Lancelot', 1.9, 38)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
tgt = np.sort(d, order=['age', 'height'])
assert_array_equal(np.partition(d, range(d.size),
order=['age', 'height']),
tgt)
assert_array_equal(d[np.argpartition(d, range(d.size),
order=['age', 'height'])],
tgt)
for k in range(d.size):
assert_equal(np.partition(d, k, order=['age', 'height'])[k],
tgt[k])
assert_equal(d[np.argpartition(d, k, order=['age', 'height'])][k],
tgt[k])
d = np.array(['Galahad', 'Arthur', 'zebra', 'Lancelot'])
tgt = np.sort(d)
assert_array_equal(np.partition(d, range(d.size)), tgt)
for k in range(d.size):
assert_equal(np.partition(d, k)[k], tgt[k])
assert_equal(d[np.argpartition(d, k)][k], tgt[k])
def test_partition_unicode_kind(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.partition, 2, kind=k)
assert_raises(ValueError, d.argpartition, 2, kind=k)
def test_partition_fuzz(self):
# a few rounds of random data testing
for j in range(10, 30):
for i in range(1, j - 2):
d = np.arange(j)
np.random.shuffle(d)
d = d % np.random.randint(2, 30)
idx = np.random.randint(d.size)
kth = [0, idx, i, i + 1]
tgt = np.sort(d)[kth]
assert_array_equal(np.partition(d, kth)[kth], tgt,
err_msg="data: %r\n kth: %r" % (d, kth))
def test_argpartition_gh5524(self):
# A test for functionality of argpartition on lists.
d = [6,7,3,2,9,0]
p = np.argpartition(d,1)
self.assert_partitioned(np.array(d)[p],[1])
def test_flatten(self):
x0 = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
x1 = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], np.int32)
y0 = np.array([1, 2, 3, 4, 5, 6], np.int32)
y0f = np.array([1, 4, 2, 5, 3, 6], np.int32)
y1 = np.array([1, 2, 3, 4, 5, 6, 7, 8], np.int32)
y1f = np.array([1, 5, 3, 7, 2, 6, 4, 8], np.int32)
assert_equal(x0.flatten(), y0)
assert_equal(x0.flatten('F'), y0f)
assert_equal(x0.flatten('F'), x0.T.flatten())
assert_equal(x1.flatten(), y1)
assert_equal(x1.flatten('F'), y1f)
assert_equal(x1.flatten('F'), x1.T.flatten())
def test_dot(self):
a = np.array([[1, 0], [0, 1]])
b = np.array([[0, 1], [1, 0]])
c = np.array([[9, 1], [1, -9]])
d = np.arange(24).reshape(4, 6)
ddt = np.array(
[[ 55, 145, 235, 325],
[ 145, 451, 757, 1063],
[ 235, 757, 1279, 1801],
[ 325, 1063, 1801, 2539]]
)
dtd = np.array(
[[504, 540, 576, 612, 648, 684],
[540, 580, 620, 660, 700, 740],
[576, 620, 664, 708, 752, 796],
[612, 660, 708, 756, 804, 852],
[648, 700, 752, 804, 856, 908],
[684, 740, 796, 852, 908, 964]]
)
# gemm vs syrk optimizations
for et in [np.float32, np.float64, np.complex64, np.complex128]:
eaf = a.astype(et)
assert_equal(np.dot(eaf, eaf), eaf)
assert_equal(np.dot(eaf.T, eaf), eaf)
assert_equal(np.dot(eaf, eaf.T), eaf)
assert_equal(np.dot(eaf.T, eaf.T), eaf)
assert_equal(np.dot(eaf.T.copy(), eaf), eaf)
assert_equal(np.dot(eaf, eaf.T.copy()), eaf)
assert_equal(np.dot(eaf.T.copy(), eaf.T.copy()), eaf)
# syrk validations
for et in [np.float32, np.float64, np.complex64, np.complex128]:
eaf = a.astype(et)
ebf = b.astype(et)
assert_equal(np.dot(ebf, ebf), eaf)
assert_equal(np.dot(ebf.T, ebf), eaf)
assert_equal(np.dot(ebf, ebf.T), eaf)
assert_equal(np.dot(ebf.T, ebf.T), eaf)
# syrk - different shape, stride, and view validations
for et in [np.float32, np.float64, np.complex64, np.complex128]:
edf = d.astype(et)
assert_equal(
np.dot(edf[::-1, :], edf.T),
np.dot(edf[::-1, :].copy(), edf.T.copy())
)
assert_equal(
np.dot(edf[:, ::-1], edf.T),
np.dot(edf[:, ::-1].copy(), edf.T.copy())
)
assert_equal(
np.dot(edf, edf[::-1, :].T),
np.dot(edf, edf[::-1, :].T.copy())
)
assert_equal(
np.dot(edf, edf[:, ::-1].T),
np.dot(edf, edf[:, ::-1].T.copy())
)
assert_equal(
np.dot(edf[:edf.shape[0] // 2, :], edf[::2, :].T),
np.dot(edf[:edf.shape[0] // 2, :].copy(), edf[::2, :].T.copy())
)
assert_equal(
np.dot(edf[::2, :], edf[:edf.shape[0] // 2, :].T),
np.dot(edf[::2, :].copy(), edf[:edf.shape[0] // 2, :].T.copy())
)
# syrk - different shape
for et in [np.float32, np.float64, np.complex64, np.complex128]:
edf = d.astype(et)
eddtf = ddt.astype(et)
edtdf = dtd.astype(et)
assert_equal(np.dot(edf, edf.T), eddtf)
assert_equal(np.dot(edf.T, edf), edtdf)
# function versus methods
assert_equal(np.dot(a, b), a.dot(b))
assert_equal(np.dot(np.dot(a, b), c), a.dot(b).dot(c))
# test passing in an output array
c = np.zeros_like(a)
a.dot(b, c)
assert_equal(c, np.dot(a, b))
# test keyword args
c = np.zeros_like(a)
a.dot(b=b, out=c)
assert_equal(c, np.dot(a, b))
def test_dot_override(self):
# 2016-01-29: NUMPY_UFUNC_DISABLED
return
class A(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return "A"
class B(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return NotImplemented
a = A()
b = B()
c = np.array([[1]])
assert_equal(np.dot(a, b), "A")
assert_equal(c.dot(a), "A")
assert_raises(TypeError, np.dot, b, c)
assert_raises(TypeError, c.dot, b)
def test_dot_type_mismatch(self):
c = 1.
A = np.array((1,1), dtype='i,i')
assert_raises(TypeError, np.dot, c, A)
assert_raises(TypeError, np.dot, A, c)
def test_diagonal(self):
a = np.arange(12).reshape((3, 4))
assert_equal(a.diagonal(), [0, 5, 10])
assert_equal(a.diagonal(0), [0, 5, 10])
assert_equal(a.diagonal(1), [1, 6, 11])
assert_equal(a.diagonal(-1), [4, 9])
b = np.arange(8).reshape((2, 2, 2))
assert_equal(b.diagonal(), [[0, 6], [1, 7]])
assert_equal(b.diagonal(0), [[0, 6], [1, 7]])
assert_equal(b.diagonal(1), [[2], [3]])
assert_equal(b.diagonal(-1), [[4], [5]])
assert_raises(ValueError, b.diagonal, axis1=0, axis2=0)
assert_equal(b.diagonal(0, 1, 2), [[0, 3], [4, 7]])
assert_equal(b.diagonal(0, 0, 1), [[0, 6], [1, 7]])
assert_equal(b.diagonal(offset=1, axis1=0, axis2=2), [[1], [3]])
# Order of axis argument doesn't matter:
assert_equal(b.diagonal(0, 2, 1), [[0, 3], [4, 7]])
def test_diagonal_view_notwriteable(self):
# this test is only for 1.9, the diagonal view will be
# writeable in 1.10.
a = np.eye(3).diagonal()
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diagonal(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diag(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
def test_diagonal_memleak(self):
# Regression test for a bug that crept in at one point
a = np.zeros((100, 100))
if HAS_REFCOUNT:
assert_(sys.getrefcount(a) < 50)
for i in range(100):
a.diagonal()
if HAS_REFCOUNT:
assert_(sys.getrefcount(a) < 50)
def test_trace(self):
a = np.arange(12).reshape((3, 4))
assert_equal(a.trace(), 15)
assert_equal(a.trace(0), 15)
assert_equal(a.trace(1), 18)
assert_equal(a.trace(-1), 13)
b = np.arange(8).reshape((2, 2, 2))
assert_equal(b.trace(), [6, 8])
assert_equal(b.trace(0), [6, 8])
assert_equal(b.trace(1), [2, 3])
assert_equal(b.trace(-1), [4, 5])
assert_equal(b.trace(0, 0, 1), [6, 8])
assert_equal(b.trace(0, 0, 2), [5, 9])
assert_equal(b.trace(0, 1, 2), [3, 11])
assert_equal(b.trace(offset=1, axis1=0, axis2=2), [1, 3])
def test_trace_subclass(self):
# The class would need to overwrite trace to ensure single-element
# output also has the right subclass.
class MyArray(np.ndarray):
pass
b = np.arange(8).reshape((2, 2, 2)).view(MyArray)
t = b.trace()
assert isinstance(t, MyArray)
def test_put(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
for dt in icodes + fcodes + 'O':
tgt = np.array([0, 1, 0, 3, 0, 5], dtype=dt)
# test 1-d
a = np.zeros(6, dtype=dt)
a.put([1, 3, 5], [1, 3, 5])
assert_equal(a, tgt)
# test 2-d
a = np.zeros((2, 3), dtype=dt)
a.put([1, 3, 5], [1, 3, 5])
assert_equal(a, tgt.reshape(2, 3))
for dt in '?':
tgt = np.array([False, True, False, True, False, True], dtype=dt)
# test 1-d
a = np.zeros(6, dtype=dt)
a.put([1, 3, 5], [True]*3)
assert_equal(a, tgt)
# test 2-d
a = np.zeros((2, 3), dtype=dt)
a.put([1, 3, 5], [True]*3)
assert_equal(a, tgt.reshape(2, 3))
# check must be writeable
a = np.zeros(6)
a.flags.writeable = False
assert_raises(ValueError, a.put, [1, 3, 5], [1, 3, 5])
# when calling np.put, make sure a
# TypeError is raised if the object
# isn't an ndarray
bad_array = [1, 2, 3]
assert_raises(TypeError, np.put, bad_array, [0, 2], 5)
def test_ravel(self):
a = np.array([[0, 1], [2, 3]])
assert_equal(a.ravel(), [0, 1, 2, 3])
assert_(not a.ravel().flags.owndata)
assert_equal(a.ravel('F'), [0, 2, 1, 3])
assert_equal(a.ravel(order='C'), [0, 1, 2, 3])
assert_equal(a.ravel(order='F'), [0, 2, 1, 3])
assert_equal(a.ravel(order='A'), [0, 1, 2, 3])
assert_(not a.ravel(order='A').flags.owndata)
assert_equal(a.ravel(order='K'), [0, 1, 2, 3])
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
a = np.array([[0, 1], [2, 3]], order='F')
assert_equal(a.ravel(), [0, 1, 2, 3])
assert_equal(a.ravel(order='A'), [0, 2, 1, 3])
assert_equal(a.ravel(order='K'), [0, 2, 1, 3])
assert_(not a.ravel(order='A').flags.owndata)
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
assert_equal(a.ravel(order='A'), a.reshape(-1, order='A'))
a = np.array([[0, 1], [2, 3]])[::-1, :]
assert_equal(a.ravel(), [2, 3, 0, 1])
assert_equal(a.ravel(order='C'), [2, 3, 0, 1])
assert_equal(a.ravel(order='F'), [2, 0, 3, 1])
assert_equal(a.ravel(order='A'), [2, 3, 0, 1])
# 'K' doesn't reverse the axes of negative strides
assert_equal(a.ravel(order='K'), [2, 3, 0, 1])
assert_(a.ravel(order='K').flags.owndata)
# Test simple 1-d copy behaviour:
a = np.arange(10)[::2]
assert_(a.ravel('K').flags.owndata)
assert_(a.ravel('C').flags.owndata)
assert_(a.ravel('F').flags.owndata)
# Not contiguous and 1-sized axis with non matching stride
a = np.arange(2**3 * 2)[::2]
a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2)
strides = list(a.strides)
strides[1] = 123
a.strides = strides
assert_(a.ravel(order='K').flags.owndata)
assert_equal(a.ravel('K'), np.arange(0, 15, 2))
# contiguous and 1-sized axis with non matching stride works:
a = np.arange(2**3)
a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2)
strides = list(a.strides)
strides[1] = 123
a.strides = strides
assert_(np.may_share_memory(a.ravel(order='K'), a))
assert_equal(a.ravel(order='K'), np.arange(2**3))
# Test negative strides (not very interesting since non-contiguous):
a = np.arange(4)[::-1].reshape(2, 2)
assert_(a.ravel(order='C').flags.owndata)
assert_(a.ravel(order='K').flags.owndata)
assert_equal(a.ravel('C'), [3, 2, 1, 0])
assert_equal(a.ravel('K'), [3, 2, 1, 0])
# 1-element tidy strides test (NPY_RELAXED_STRIDES_CHECKING):
a = np.array([[1]])
a.strides = (123, 432)
# If the stride is not 8, NPY_RELAXED_STRIDES_CHECKING is messing
# them up on purpose:
if np.ones(1).strides == (8,):
assert_(np.may_share_memory(a.ravel('K'), a))
assert_equal(a.ravel('K').strides, (a.dtype.itemsize,))
for order in ('C', 'F', 'A', 'K'):
# 0-d corner case:
a = np.array(0)
assert_equal(a.ravel(order), [0])
assert_(np.may_share_memory(a.ravel(order), a))
# Test that certain non-inplace ravels work right (mostly) for 'K':
b = np.arange(2**4 * 2)[::2].reshape(2, 2, 2, 2)
a = b[..., ::2]
assert_equal(a.ravel('K'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('C'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('A'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('F'), [0, 16, 8, 24, 4, 20, 12, 28])
a = b[::2, ...]
assert_equal(a.ravel('K'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('C'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('A'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('F'), [0, 8, 4, 12, 2, 10, 6, 14])
def test_ravel_subclass(self):
class ArraySubclass(np.ndarray):
pass
a = np.arange(10).view(ArraySubclass)
assert_(isinstance(a.ravel('C'), ArraySubclass))
assert_(isinstance(a.ravel('F'), ArraySubclass))
assert_(isinstance(a.ravel('A'), ArraySubclass))
assert_(isinstance(a.ravel('K'), ArraySubclass))
a = np.arange(10)[::2].view(ArraySubclass)
assert_(isinstance(a.ravel('C'), ArraySubclass))
assert_(isinstance(a.ravel('F'), ArraySubclass))
assert_(isinstance(a.ravel('A'), ArraySubclass))
assert_(isinstance(a.ravel('K'), ArraySubclass))
def test_swapaxes(self):
a = np.arange(1*2*3*4).reshape(1, 2, 3, 4).copy()
idx = np.indices(a.shape)
assert_(a.flags['OWNDATA'])
b = a.copy()
# check exceptions
assert_raises(ValueError, a.swapaxes, -5, 0)
assert_raises(ValueError, a.swapaxes, 4, 0)
assert_raises(ValueError, a.swapaxes, 0, -5)
assert_raises(ValueError, a.swapaxes, 0, 4)
for i in range(-4, 4):
for j in range(-4, 4):
for k, src in enumerate((a, b)):
c = src.swapaxes(i, j)
# check shape
shape = list(src.shape)
shape[i] = src.shape[j]
shape[j] = src.shape[i]
assert_equal(c.shape, shape, str((i, j, k)))
# check array contents
i0, i1, i2, i3 = [dim-1 for dim in c.shape]
j0, j1, j2, j3 = [dim-1 for dim in src.shape]
assert_equal(src[idx[j0], idx[j1], idx[j2], idx[j3]],
c[idx[i0], idx[i1], idx[i2], idx[i3]],
str((i, j, k)))
# check a view is always returned, gh-5260
assert_(not c.flags['OWNDATA'], str((i, j, k)))
# check on non-contiguous input array
if k == 1:
b = c
def test_conjugate(self):
a = np.array([1-1j, 1+1j, 23+23.0j])
ac = a.conj()
assert_equal(a.real, ac.real)
assert_equal(a.imag, -ac.imag)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1+1j, 23+23.0j], 'F')
ac = a.conj()
assert_equal(a.real, ac.real)
assert_equal(a.imag, -ac.imag)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1, 2, 3])
ac = a.conj()
assert_equal(a, ac)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1.0, 2.0, 3.0])
ac = a.conj()
assert_equal(a, ac)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1+1j, 1, 2.0], object)
ac = a.conj()
assert_equal(ac, [k.conjugate() for k in a])
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1, 2.0, 'f'], object)
assert_raises(AttributeError, lambda: a.conj())
assert_raises(AttributeError, lambda: a.conjugate())
def test__complex__(self):
dtypes = ['i1', 'i2', 'i4', 'i8',
'u1', 'u2', 'u4', 'u8',
'f', 'd', 'g', 'F', 'D', 'G',
'?', 'O']
for dt in dtypes:
a = np.array(7, dtype=dt)
b = np.array([7], dtype=dt)
c = np.array([[[[[7]]]]], dtype=dt)
msg = 'dtype: {0}'.format(dt)
ap = complex(a)
assert_equal(ap, a, msg)
bp = complex(b)
assert_equal(bp, b, msg)
cp = complex(c)
assert_equal(cp, c, msg)
def test__complex__should_not_work(self):
dtypes = ['i1', 'i2', 'i4', 'i8',
'u1', 'u2', 'u4', 'u8',
'f', 'd', 'g', 'F', 'D', 'G',
'?', 'O']
for dt in dtypes:
a = np.array([1, 2, 3], dtype=dt)
assert_raises(TypeError, complex, a)
dt = np.dtype([('a', 'f8'), ('b', 'i1')])
b = np.array((1.0, 3), dtype=dt)
assert_raises(TypeError, complex, b)
c = np.array([(1.0, 3), (2e-3, 7)], dtype=dt)
assert_raises(TypeError, complex, c)
d = np.array('1+1j')
assert_raises(TypeError, complex, d)
e = np.array(['1+1j'], 'U')
assert_raises(TypeError, complex, e)
class TestBinop(object):
def test_inplace(self):
# test refcount 1 inplace conversion
assert_array_almost_equal(np.array([0.5]) * np.array([1.0, 2.0]),
[0.5, 1.0])
d = np.array([0.5, 0.5])[::2]
assert_array_almost_equal(d * (d * np.array([1.0, 2.0])),
[0.25, 0.5])
a = np.array([0.5])
b = np.array([0.5])
c = a + b
c = a - b
c = a * b
c = a / b
assert_equal(a, b)
assert_almost_equal(c, 1.)
c = a + b * 2. / b * a - a / b
assert_equal(a, b)
assert_equal(c, 0.5)
# true divide
a = np.array([5])
b = np.array([3])
c = (a * a) / b
assert_almost_equal(c, 25 / 3)
assert_equal(a, 5)
assert_equal(b, 3)
def test_extension_incref_elide(self):
# test extension (e.g. cython) calling PyNumber_* slots without
# increasing the reference counts
#
# def incref_elide(a):
# d = input.copy() # refcount 1
# return d, d + d # PyNumber_Add without increasing refcount
from numpy.core.multiarray_tests import incref_elide
d = np.ones(5)
orig, res = incref_elide(d)
# the return original should not be changed to an inplace operation
assert_array_equal(orig, d)
assert_array_equal(res, d + d)
def test_extension_incref_elide_stack(self):
# scanning if the refcount == 1 object is on the python stack to check
# that we are called directly from python is flawed as object may still
# be above the stack pointer and we have no access to the top of it
#
# def incref_elide_l(d):
# return l[4] + l[4] # PyNumber_Add without increasing refcount
from numpy.core.multiarray_tests import incref_elide_l
# padding with 1 makes sure the object on the stack is not overwriten
l = [1, 1, 1, 1, np.ones(5)]
res = incref_elide_l(l)
# the return original should not be changed to an inplace operation
assert_array_equal(l[4], np.ones(5))
assert_array_equal(res, l[4] + l[4])
def test_ufunc_override_rop_precedence(self):
# 2016-01-29: NUMPY_UFUNC_DISABLED
return
# Check that __rmul__ and other right-hand operations have
# precedence over __numpy_ufunc__
ops = {
'__add__': ('__radd__', np.add, True),
'__sub__': ('__rsub__', np.subtract, True),
'__mul__': ('__rmul__', np.multiply, True),
'__truediv__': ('__rtruediv__', np.true_divide, True),
'__floordiv__': ('__rfloordiv__', np.floor_divide, True),
'__mod__': ('__rmod__', np.remainder, True),
'__divmod__': ('__rdivmod__', None, False),
'__pow__': ('__rpow__', np.power, True),
'__lshift__': ('__rlshift__', np.left_shift, True),
'__rshift__': ('__rrshift__', np.right_shift, True),
'__and__': ('__rand__', np.bitwise_and, True),
'__xor__': ('__rxor__', np.bitwise_xor, True),
'__or__': ('__ror__', np.bitwise_or, True),
'__ge__': ('__le__', np.less_equal, False),
'__gt__': ('__lt__', np.less, False),
'__le__': ('__ge__', np.greater_equal, False),
'__lt__': ('__gt__', np.greater, False),
'__eq__': ('__eq__', np.equal, False),
'__ne__': ('__ne__', np.not_equal, False),
}
class OtherNdarraySubclass(np.ndarray):
pass
class OtherNdarraySubclassWithOverride(np.ndarray):
def __numpy_ufunc__(self, *a, **kw):
raise AssertionError(("__numpy_ufunc__ %r %r shouldn't have "
"been called!") % (a, kw))
def check(op_name, ndsubclass):
rop_name, np_op, has_iop = ops[op_name]
if has_iop:
iop_name = '__i' + op_name[2:]
iop = getattr(operator, iop_name)
if op_name == "__divmod__":
op = divmod
else:
op = getattr(operator, op_name)
# Dummy class
def __init__(self, *a, **kw):
pass
def __numpy_ufunc__(self, *a, **kw):
raise AssertionError(("__numpy_ufunc__ %r %r shouldn't have "
"been called!") % (a, kw))
def __op__(self, *other):
return "op"
def __rop__(self, *other):
return "rop"
if ndsubclass:
bases = (np.ndarray,)
else:
bases = (object,)
dct = {'__init__': __init__,
'__numpy_ufunc__': __numpy_ufunc__,
op_name: __op__}
if op_name != rop_name:
dct[rop_name] = __rop__
cls = type("Rop" + rop_name, bases, dct)
# Check behavior against both bare ndarray objects and a
# ndarray subclasses with and without their own override
obj = cls((1,), buffer=np.ones(1,))
arr_objs = [np.array([1]),
np.array([2]).view(OtherNdarraySubclass),
np.array([3]).view(OtherNdarraySubclassWithOverride),
]
for arr in arr_objs:
err_msg = "%r %r" % (op_name, arr,)
# Check that ndarray op gives up if it sees a non-subclass
if not isinstance(obj, arr.__class__):
assert_equal(getattr(arr, op_name)(obj),
NotImplemented, err_msg=err_msg)
# Check that the Python binops have priority
assert_equal(op(obj, arr), "op", err_msg=err_msg)
if op_name == rop_name:
assert_equal(op(arr, obj), "op", err_msg=err_msg)
else:
assert_equal(op(arr, obj), "rop", err_msg=err_msg)
# Check that Python binops have priority also for in-place ops
if has_iop:
assert_equal(getattr(arr, iop_name)(obj),
NotImplemented, err_msg=err_msg)
if op_name != "__pow__":
# inplace pow requires the other object to be
# integer-like?
assert_equal(iop(arr, obj), "rop", err_msg=err_msg)
# Check that ufunc call __numpy_ufunc__ normally
if np_op is not None:
assert_raises(AssertionError, np_op, arr, obj,
err_msg=err_msg)
assert_raises(AssertionError, np_op, obj, arr,
err_msg=err_msg)
# Check all binary operations
for op_name in sorted(ops.keys()):
yield check, op_name, True
yield check, op_name, False
def test_ufunc_override_rop_simple(self):
# 2016-01-29: NUMPY_UFUNC_DISABLED
return
# Check parts of the binary op overriding behavior in an
# explicit test case that is easier to understand.
class SomeClass(object):
def __numpy_ufunc__(self, *a, **kw):
return "ufunc"
def __mul__(self, other):
return 123
def __rmul__(self, other):
return 321
def __rsub__(self, other):
return "no subs for me"
def __gt__(self, other):
return "yep"
def __lt__(self, other):
return "nope"
class SomeClass2(SomeClass, np.ndarray):
def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
if ufunc is np.multiply or ufunc is np.bitwise_and:
return "ufunc"
else:
inputs = list(inputs)
if i < len(inputs):
inputs[i] = np.asarray(self)
func = getattr(ufunc, method)
if ('out' in kw) and (kw['out'] is not None):
kw['out'] = np.asarray(kw['out'])
r = func(*inputs, **kw)
x = self.__class__(r.shape, dtype=r.dtype)
x[...] = r
return x
class SomeClass3(SomeClass2):
def __rsub__(self, other):
return "sub for me"
arr = np.array([0])
obj = SomeClass()
obj2 = SomeClass2((1,), dtype=np.int_)
obj2[0] = 9
obj3 = SomeClass3((1,), dtype=np.int_)
obj3[0] = 4
# obj is first, so should get to define outcome.
assert_equal(obj * arr, 123)
# obj is second, but has __numpy_ufunc__ and defines __rmul__.
assert_equal(arr * obj, 321)
# obj is second, but has __numpy_ufunc__ and defines __rsub__.
assert_equal(arr - obj, "no subs for me")
# obj is second, but has __numpy_ufunc__ and defines __lt__.
assert_equal(arr > obj, "nope")
# obj is second, but has __numpy_ufunc__ and defines __gt__.
assert_equal(arr < obj, "yep")
# Called as a ufunc, obj.__numpy_ufunc__ is used.
assert_equal(np.multiply(arr, obj), "ufunc")
# obj is second, but has __numpy_ufunc__ and defines __rmul__.
arr *= obj
assert_equal(arr, 321)
# obj2 is an ndarray subclass, so CPython takes care of the same rules.
assert_equal(obj2 * arr, 123)
assert_equal(arr * obj2, 321)
assert_equal(arr - obj2, "no subs for me")
assert_equal(arr > obj2, "nope")
assert_equal(arr < obj2, "yep")
# Called as a ufunc, obj2.__numpy_ufunc__ is called.
assert_equal(np.multiply(arr, obj2), "ufunc")
# Also when the method is not overridden.
assert_equal(arr & obj2, "ufunc")
arr *= obj2
assert_equal(arr, 321)
obj2 += 33
assert_equal(obj2[0], 42)
assert_equal(obj2.sum(), 42)
assert_(isinstance(obj2, SomeClass2))
# Obj3 is subclass that defines __rsub__. CPython calls it.
assert_equal(arr - obj3, "sub for me")
assert_equal(obj2 - obj3, "sub for me")
# obj3 is a subclass that defines __rmul__. CPython calls it.
assert_equal(arr * obj3, 321)
# But not here, since obj3.__rmul__ is obj2.__rmul__.
assert_equal(obj2 * obj3, 123)
# And of course, here obj3.__mul__ should be called.
assert_equal(obj3 * obj2, 123)
# obj3 defines __numpy_ufunc__ but obj3.__radd__ is obj2.__radd__.
# (and both are just ndarray.__radd__); see #4815.
res = obj2 + obj3
assert_equal(res, 46)
assert_(isinstance(res, SomeClass2))
# Since obj3 is a subclass, it should have precedence, like CPython
# would give, even though obj2 has __numpy_ufunc__ and __radd__.
# See gh-4815 and gh-5747.
res = obj3 + obj2
assert_equal(res, 46)
assert_(isinstance(res, SomeClass3))
def test_ufunc_override_normalize_signature(self):
# 2016-01-29: NUMPY_UFUNC_DISABLED
return
# gh-5674
class SomeClass(object):
def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
return kw
a = SomeClass()
kw = np.add(a, [1])
assert_('sig' not in kw and 'signature' not in kw)
kw = np.add(a, [1], sig='ii->i')
assert_('sig' not in kw and 'signature' in kw)
assert_equal(kw['signature'], 'ii->i')
kw = np.add(a, [1], signature='ii->i')
assert_('sig' not in kw and 'signature' in kw)
assert_equal(kw['signature'], 'ii->i')
def test_numpy_ufunc_index(self):
# 2016-01-29: NUMPY_UFUNC_DISABLED
return
# Check that index is set appropriately, also if only an output
# is passed on (latter is another regression tests for github bug 4753)
class CheckIndex(object):
def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
return i
a = CheckIndex()
dummy = np.arange(2.)
# 1 input, 1 output
assert_equal(np.sin(a), 0)
assert_equal(np.sin(dummy, a), 1)
assert_equal(np.sin(dummy, out=a), 1)
assert_equal(np.sin(dummy, out=(a,)), 1)
assert_equal(np.sin(a, a), 0)
assert_equal(np.sin(a, out=a), 0)
assert_equal(np.sin(a, out=(a,)), 0)
# 1 input, 2 outputs
assert_equal(np.modf(dummy, a), 1)
assert_equal(np.modf(dummy, None, a), 2)
assert_equal(np.modf(dummy, dummy, a), 2)
assert_equal(np.modf(dummy, out=a), 1)
assert_equal(np.modf(dummy, out=(a,)), 1)
assert_equal(np.modf(dummy, out=(a, None)), 1)
assert_equal(np.modf(dummy, out=(a, dummy)), 1)
assert_equal(np.modf(dummy, out=(None, a)), 2)
assert_equal(np.modf(dummy, out=(dummy, a)), 2)
assert_equal(np.modf(a, out=(dummy, a)), 0)
# 2 inputs, 1 output
assert_equal(np.add(a, dummy), 0)
assert_equal(np.add(dummy, a), 1)
assert_equal(np.add(dummy, dummy, a), 2)
assert_equal(np.add(dummy, a, a), 1)
assert_equal(np.add(dummy, dummy, out=a), 2)
assert_equal(np.add(dummy, dummy, out=(a,)), 2)
assert_equal(np.add(a, dummy, out=a), 0)
def test_out_override(self):
# 2016-01-29: NUMPY_UFUNC_DISABLED
return
# regression test for github bug 4753
class OutClass(np.ndarray):
def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
if 'out' in kw:
tmp_kw = kw.copy()
tmp_kw.pop('out')
func = getattr(ufunc, method)
kw['out'][...] = func(*inputs, **tmp_kw)
A = np.array([0]).view(OutClass)
B = np.array([5])
C = np.array([6])
np.multiply(C, B, A)
assert_equal(A[0], 30)
assert_(isinstance(A, OutClass))
A[0] = 0
np.multiply(C, B, out=A)
assert_equal(A[0], 30)
assert_(isinstance(A, OutClass))
class TestCAPI(TestCase):
def test_IsPythonScalar(self):
from numpy.core.multiarray_tests import IsPythonScalar
assert_(IsPythonScalar(b'foobar'))
assert_(IsPythonScalar(1))
assert_(IsPythonScalar(2**80))
assert_(IsPythonScalar(2.))
assert_(IsPythonScalar("a"))
class TestSubscripting(TestCase):
def test_test_zero_rank(self):
x = np.array([1, 2, 3])
self.assertTrue(isinstance(x[0], np.int_))
if sys.version_info[0] < 3:
self.assertTrue(isinstance(x[0], int))
self.assertTrue(type(x[0, ...]) is np.ndarray)
class TestPickling(TestCase):
def test_roundtrip(self):
import pickle
carray = np.array([[2, 9], [7, 0], [3, 8]])
DATA = [
carray,
np.transpose(carray),
np.array([('xxx', 1, 2.0)], dtype=[('a', (str, 3)), ('b', int),
('c', float)])
]
for a in DATA:
assert_equal(a, pickle.loads(a.dumps()), err_msg="%r" % a)
def _loads(self, obj):
if sys.version_info[0] >= 3:
return np.loads(obj, encoding='latin1')
else:
return np.loads(obj)
# version 0 pickles, using protocol=2 to pickle
# version 0 doesn't have a version field
def test_version0_int8(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = np.array([1, 2, 3, 4], dtype=np.int8)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version0_float32(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version0_object(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = np.array([{'a': 1}, {'b': 2}])
p = self._loads(asbytes(s))
assert_equal(a, p)
# version 1 pickles, using protocol=2 to pickle
def test_version1_int8(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = np.array([1, 2, 3, 4], dtype=np.int8)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version1_float32(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(K\x01U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version1_object(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = np.array([{'a': 1}, {'b': 2}])
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_subarray_int_shape(self):
s = "cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'V6'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nN(S'a'\np12\ng3\ntp13\n(dp14\ng12\n(g7\n(S'V4'\np15\nI0\nI1\ntp16\nRp17\n(I3\nS'|'\np18\n(g7\n(S'i1'\np19\nI0\nI1\ntp20\nRp21\n(I3\nS'|'\np22\nNNNI-1\nI-1\nI0\ntp23\nb(I2\nI2\ntp24\ntp25\nNNI4\nI1\nI0\ntp26\nbI0\ntp27\nsg3\n(g7\n(S'V2'\np28\nI0\nI1\ntp29\nRp30\n(I3\nS'|'\np31\n(g21\nI2\ntp32\nNNI2\nI1\nI0\ntp33\nbI4\ntp34\nsI6\nI1\nI0\ntp35\nbI00\nS'\\x01\\x01\\x01\\x01\\x01\\x02'\np36\ntp37\nb."
a = np.array([(1, (1, 2))], dtype=[('a', 'i1', (2, 2)), ('b', 'i1', 2)])
p = self._loads(asbytes(s))
assert_equal(a, p)
class TestFancyIndexing(TestCase):
def test_list(self):
x = np.ones((1, 1))
x[:, [0]] = 2.0
assert_array_equal(x, np.array([[2.0]]))
x = np.ones((1, 1, 1))
x[:, :, [0]] = 2.0
assert_array_equal(x, np.array([[[2.0]]]))
def test_tuple(self):
x = np.ones((1, 1))
x[:, (0,)] = 2.0
assert_array_equal(x, np.array([[2.0]]))
x = np.ones((1, 1, 1))
x[:, :, (0,)] = 2.0
assert_array_equal(x, np.array([[[2.0]]]))
def test_mask(self):
x = np.array([1, 2, 3, 4])
m = np.array([0, 1, 0, 0], bool)
assert_array_equal(x[m], np.array([2]))
def test_mask2(self):
x = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
m = np.array([0, 1], bool)
m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool)
m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool)
assert_array_equal(x[m], np.array([[5, 6, 7, 8]]))
assert_array_equal(x[m2], np.array([2, 5]))
assert_array_equal(x[m3], np.array([2]))
def test_assign_mask(self):
x = np.array([1, 2, 3, 4])
m = np.array([0, 1, 0, 0], bool)
x[m] = 5
assert_array_equal(x, np.array([1, 5, 3, 4]))
def test_assign_mask2(self):
xorig = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
m = np.array([0, 1], bool)
m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool)
m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool)
x = xorig.copy()
x[m] = 10
assert_array_equal(x, np.array([[1, 2, 3, 4], [10, 10, 10, 10]]))
x = xorig.copy()
x[m2] = 10
assert_array_equal(x, np.array([[1, 10, 3, 4], [10, 6, 7, 8]]))
x = xorig.copy()
x[m3] = 10
assert_array_equal(x, np.array([[1, 10, 3, 4], [5, 6, 7, 8]]))
class TestStringCompare(TestCase):
def test_string(self):
g1 = np.array(["This", "is", "example"])
g2 = np.array(["This", "was", "example"])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
def test_mixed(self):
g1 = np.array(["spam", "spa", "spammer", "and eggs"])
g2 = "spam"
assert_array_equal(g1 == g2, [x == g2 for x in g1])
assert_array_equal(g1 != g2, [x != g2 for x in g1])
assert_array_equal(g1 < g2, [x < g2 for x in g1])
assert_array_equal(g1 > g2, [x > g2 for x in g1])
assert_array_equal(g1 <= g2, [x <= g2 for x in g1])
assert_array_equal(g1 >= g2, [x >= g2 for x in g1])
def test_unicode(self):
g1 = np.array([sixu("This"), sixu("is"), sixu("example")])
g2 = np.array([sixu("This"), sixu("was"), sixu("example")])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
class TestArgmax(TestCase):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
([0, 1, 2, 3, complex(0, np.nan)], 4),
([0, 1, 2, 3, complex(np.nan, 0)], 4),
([0, 1, 2, complex(np.nan, 0), 3], 3),
([0, 1, 2, complex(0, np.nan), 3], 3),
([complex(0, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 1),
([complex(1, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(1, 1)], 2),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 5),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2015-11-20T12:20:59'),
np.datetime64('1932-09-23T10:10:13'),
np.datetime64('2014-10-10T03:50:30')], 3),
# Assorted tests with NaTs
([np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('NaT'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 4),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('NaT'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 0),
([np.timedelta64(2, 's'),
np.timedelta64(1, 's'),
np.timedelta64('NaT', 's'),
np.timedelta64(3, 's')], 3),
([np.timedelta64('NaT', 's')] * 3, 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 0),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 1),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 2),
([False, False, False, False, True], 4),
([False, False, False, True, False], 3),
([True, False, False, False, False], 0),
([True, False, True, False, False], 0),
]
def test_all(self):
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
amax = a.max(i)
aargmax = a.argmax(i)
axes = list(range(a.ndim))
axes.remove(i)
assert_(np.all(amax == aargmax.choose(*a.transpose(i,*axes))))
def test_combinations(self):
for arr, pos in self.nan_arr:
assert_equal(np.argmax(arr), pos, err_msg="%r" % arr)
assert_equal(arr[np.argmax(arr)], np.max(arr), err_msg="%r" % arr)
def test_output_shape(self):
# see also gh-616
a = np.ones((10, 5))
# Check some simple shape mismatches
out = np.ones(11, dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
out = np.ones((2, 5), dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
# these could be relaxed possibly (used to allow even the previous)
out = np.ones((1, 10), dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
out = np.ones(10, dtype=np.int_)
a.argmax(-1, out=out)
assert_equal(out, a.argmax(-1))
def test_argmax_unicode(self):
d = np.zeros(6031, dtype='<U9')
d[5942] = "as"
assert_equal(d.argmax(), 5942)
def test_np_vs_ndarray(self):
# make sure both ndarray.argmax and numpy.argmax support out/axis args
a = np.random.normal(size=(2,3))
# check positional args
out1 = np.zeros(2, dtype=int)
out2 = np.zeros(2, dtype=int)
assert_equal(a.argmax(1, out1), np.argmax(a, 1, out2))
assert_equal(out1, out2)
# check keyword args
out1 = np.zeros(3, dtype=int)
out2 = np.zeros(3, dtype=int)
assert_equal(a.argmax(out=out1, axis=0), np.argmax(a, out=out2, axis=0))
assert_equal(out1, out2)
def test_object_argmax_with_NULLs(self):
# See gh-6032
a = np.empty(4, dtype='O')
ctypes.memset(a.ctypes.data, 0, a.nbytes)
assert_equal(a.argmax(), 0)
a[3] = 10
assert_equal(a.argmax(), 3)
a[1] = 30
assert_equal(a.argmax(), 1)
class TestArgmin(TestCase):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
([0, 1, 2, 3, complex(0, np.nan)], 4),
([0, 1, 2, 3, complex(np.nan, 0)], 4),
([0, 1, 2, complex(np.nan, 0), 3], 3),
([0, 1, 2, complex(0, np.nan), 3], 3),
([complex(0, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(0, 1)], 2),
([complex(1, 0), complex(0, 2), complex(1, 1)], 1),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 0),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2014-11-20T12:20:59'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 5),
# Assorted tests with NaTs
([np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('NaT'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 5),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('NaT'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 4),
([np.timedelta64(2, 's'),
np.timedelta64(1, 's'),
np.timedelta64('NaT', 's'),
np.timedelta64(3, 's')], 1),
([np.timedelta64('NaT', 's')] * 3, 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 2),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 0),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 1),
([True, True, True, True, False], 4),
([True, True, True, False, True], 3),
([False, True, True, True, True], 0),
([False, True, False, True, True], 0),
]
def test_all(self):
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
amin = a.min(i)
aargmin = a.argmin(i)
axes = list(range(a.ndim))
axes.remove(i)
assert_(np.all(amin == aargmin.choose(*a.transpose(i,*axes))))
def test_combinations(self):
for arr, pos in self.nan_arr:
assert_equal(np.argmin(arr), pos, err_msg="%r" % arr)
assert_equal(arr[np.argmin(arr)], np.min(arr), err_msg="%r" % arr)
def test_minimum_signed_integers(self):
a = np.array([1, -2**7, -2**7 + 1], dtype=np.int8)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**15, -2**15 + 1], dtype=np.int16)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**31, -2**31 + 1], dtype=np.int32)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**63, -2**63 + 1], dtype=np.int64)
assert_equal(np.argmin(a), 1)
def test_output_shape(self):
# see also gh-616
a = np.ones((10, 5))
# Check some simple shape mismatches
out = np.ones(11, dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
out = np.ones((2, 5), dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
# these could be relaxed possibly (used to allow even the previous)
out = np.ones((1, 10), dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
out = np.ones(10, dtype=np.int_)
a.argmin(-1, out=out)
assert_equal(out, a.argmin(-1))
def test_argmin_unicode(self):
d = np.ones(6031, dtype='<U9')
d[6001] = "0"
assert_equal(d.argmin(), 6001)
def test_np_vs_ndarray(self):
# make sure both ndarray.argmin and numpy.argmin support out/axis args
a = np.random.normal(size=(2, 3))
# check positional args
out1 = np.zeros(2, dtype=int)
out2 = np.ones(2, dtype=int)
assert_equal(a.argmin(1, out1), np.argmin(a, 1, out2))
assert_equal(out1, out2)
# check keyword args
out1 = np.zeros(3, dtype=int)
out2 = np.ones(3, dtype=int)
assert_equal(a.argmin(out=out1, axis=0), np.argmin(a, out=out2, axis=0))
assert_equal(out1, out2)
def test_object_argmin_with_NULLs(self):
# See gh-6032
a = np.empty(4, dtype='O')
ctypes.memset(a.ctypes.data, 0, a.nbytes)
assert_equal(a.argmin(), 0)
a[3] = 30
assert_equal(a.argmin(), 3)
a[1] = 10
assert_equal(a.argmin(), 1)
class TestMinMax(TestCase):
def test_scalar(self):
assert_raises(ValueError, np.amax, 1, 1)
assert_raises(ValueError, np.amin, 1, 1)
assert_equal(np.amax(1, axis=0), 1)
assert_equal(np.amin(1, axis=0), 1)
assert_equal(np.amax(1, axis=None), 1)
assert_equal(np.amin(1, axis=None), 1)
def test_axis(self):
assert_raises(ValueError, np.amax, [1, 2, 3], 1000)
assert_equal(np.amax([[1, 2, 3]], axis=1), 3)
def test_datetime(self):
# NaTs are ignored
for dtype in ('m8[s]', 'm8[Y]'):
a = np.arange(10).astype(dtype)
a[3] = 'NaT'
assert_equal(np.amin(a), a[0])
assert_equal(np.amax(a), a[9])
a[0] = 'NaT'
assert_equal(np.amin(a), a[1])
assert_equal(np.amax(a), a[9])
a.fill('NaT')
assert_equal(np.amin(a), a[0])
assert_equal(np.amax(a), a[0])
class TestNewaxis(TestCase):
def test_basic(self):
sk = np.array([0, -0.1, 0.1])
res = 250*sk[:, np.newaxis]
assert_almost_equal(res.ravel(), 250*sk)
class TestClip(TestCase):
def _check_range(self, x, cmin, cmax):
assert_(np.all(x >= cmin))
assert_(np.all(x <= cmax))
def _clip_type(self, type_group, array_max,
clip_min, clip_max, inplace=False,
expected_min=None, expected_max=None):
if expected_min is None:
expected_min = clip_min
if expected_max is None:
expected_max = clip_max
for T in np.sctypes[type_group]:
if sys.byteorder == 'little':
byte_orders = ['=', '>']
else:
byte_orders = ['<', '=']
for byteorder in byte_orders:
dtype = np.dtype(T).newbyteorder(byteorder)
x = (np.random.random(1000) * array_max).astype(dtype)
if inplace:
x.clip(clip_min, clip_max, x)
else:
x = x.clip(clip_min, clip_max)
byteorder = '='
if x.dtype.byteorder == '|':
byteorder = '|'
assert_equal(x.dtype.byteorder, byteorder)
self._check_range(x, expected_min, expected_max)
return x
def test_basic(self):
for inplace in [False, True]:
self._clip_type(
'float', 1024, -12.8, 100.2, inplace=inplace)
self._clip_type(
'float', 1024, 0, 0, inplace=inplace)
self._clip_type(
'int', 1024, -120, 100.5, inplace=inplace)
self._clip_type(
'int', 1024, 0, 0, inplace=inplace)
self._clip_type(
'uint', 1024, 0, 0, inplace=inplace)
self._clip_type(
'uint', 1024, -120, 100, inplace=inplace, expected_min=0)
def test_record_array(self):
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '<f8'), ('z', '<f8')])
y = rec['x'].clip(-0.3, 0.5)
self._check_range(y, -0.3, 0.5)
def test_max_or_min(self):
val = np.array([0, 1, 2, 3, 4, 5, 6, 7])
x = val.clip(3)
assert_(np.all(x >= 3))
x = val.clip(min=3)
assert_(np.all(x >= 3))
x = val.clip(max=4)
assert_(np.all(x <= 4))
def test_nan(self):
input_arr = np.array([-2., np.nan, 0.5, 3., 0.25, np.nan])
result = input_arr.clip(-1, 1)
expected = np.array([-1., np.nan, 0.5, 1., 0.25, np.nan])
assert_array_equal(result, expected)
class TestCompress(TestCase):
def test_axis(self):
tgt = [[5, 6, 7, 8, 9]]
arr = np.arange(10).reshape(2, 5)
out = np.compress([0, 1], arr, axis=0)
assert_equal(out, tgt)
tgt = [[1, 3], [6, 8]]
out = np.compress([0, 1, 0, 1, 0], arr, axis=1)
assert_equal(out, tgt)
def test_truncate(self):
tgt = [[1], [6]]
arr = np.arange(10).reshape(2, 5)
out = np.compress([0, 1], arr, axis=1)
assert_equal(out, tgt)
def test_flatten(self):
arr = np.arange(10).reshape(2, 5)
out = np.compress([0, 1], arr)
assert_equal(out, 1)
class TestPutmask(object):
def tst_basic(self, x, T, mask, val):
np.putmask(x, mask, val)
assert_equal(x[mask], T(val))
assert_equal(x.dtype, T)
def test_ip_types(self):
unchecked_types = [bytes, unicode, np.void, object]
x = np.random.random(1000)*100
mask = x < 40
for val in [-100, 0, 15]:
for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
yield self.tst_basic, x.copy().astype(T), T, mask, val
def test_mask_size(self):
assert_raises(ValueError, np.putmask, np.array([1, 2, 3]), [True], 5)
def tst_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
np.putmask(x, [True, False, True], -1)
assert_array_equal(x, [-1, 2, -1])
def test_ip_byteorder(self):
for dtype in ('>i4', '<i4'):
yield self.tst_byteorder, dtype
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
np.putmask(rec['x'], [True, False], 10)
assert_array_equal(rec['x'], [10, 5])
assert_array_equal(rec['y'], [2, 4])
assert_array_equal(rec['z'], [3, 3])
np.putmask(rec['y'], [True, False], 11)
assert_array_equal(rec['x'], [10, 5])
assert_array_equal(rec['y'], [11, 4])
assert_array_equal(rec['z'], [3, 3])
class TestTake(object):
def tst_basic(self, x):
ind = list(range(x.shape[0]))
assert_array_equal(x.take(ind, axis=0), x)
def test_ip_types(self):
unchecked_types = [bytes, unicode, np.void, object]
x = np.random.random(24)*100
x.shape = 2, 3, 4
for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
yield self.tst_basic, x.copy().astype(T)
def test_raise(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_raises(IndexError, x.take, [0, 1, 2], axis=0)
assert_raises(IndexError, x.take, [-3], axis=0)
assert_array_equal(x.take([-1], axis=0)[0], x[1])
def test_clip(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_array_equal(x.take([-1], axis=0, mode='clip')[0], x[0])
assert_array_equal(x.take([2], axis=0, mode='clip')[0], x[1])
def test_wrap(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_array_equal(x.take([-1], axis=0, mode='wrap')[0], x[1])
assert_array_equal(x.take([2], axis=0, mode='wrap')[0], x[0])
assert_array_equal(x.take([3], axis=0, mode='wrap')[0], x[1])
def tst_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
assert_array_equal(x.take([0, 2, 1]), [1, 3, 2])
def test_ip_byteorder(self):
for dtype in ('>i4', '<i4'):
yield self.tst_byteorder, dtype
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
rec1 = rec.take([1])
assert_(rec1['x'] == 5.0 and rec1['y'] == 4.0)
class TestLexsort(TestCase):
def test_basic(self):
a = [1, 2, 1, 3, 1, 5]
b = [0, 4, 5, 6, 2, 3]
idx = np.lexsort((b, a))
expected_idx = np.array([0, 4, 2, 1, 3, 5])
assert_array_equal(idx, expected_idx)
x = np.vstack((b, a))
idx = np.lexsort(x)
assert_array_equal(idx, expected_idx)
assert_array_equal(x[1][idx], np.sort(x[1]))
def test_datetime(self):
a = np.array([0,0,0], dtype='datetime64[D]')
b = np.array([2,1,0], dtype='datetime64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
a = np.array([0,0,0], dtype='timedelta64[D]')
b = np.array([2,1,0], dtype='timedelta64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
def test_object(self): # gh-6312
a = np.random.choice(10, 1000)
b = np.random.choice(['abc', 'xy', 'wz', 'efghi', 'qwst', 'x'], 1000)
for u in a, b:
left = np.lexsort((u.astype('O'),))
right = np.argsort(u, kind='mergesort')
assert_array_equal(left, right)
for u, v in (a, b), (b, a):
idx = np.lexsort((u, v))
assert_array_equal(idx, np.lexsort((u.astype('O'), v)))
assert_array_equal(idx, np.lexsort((u, v.astype('O'))))
u, v = np.array(u, dtype='object'), np.array(v, dtype='object')
assert_array_equal(idx, np.lexsort((u, v)))
def test_invalid_axis(self): # gh-7528
x = np.linspace(0., 1., 42*3).reshape(42, 3)
assert_raises(ValueError, np.lexsort, x, axis=2)
class TestIO(object):
"""Test tofile, fromfile, tobytes, and fromstring"""
def setUp(self):
shape = (2, 4, 3)
rand = np.random.random
self.x = rand(shape) + rand(shape).astype(np.complex)*1j
self.x[0,:, 1] = [np.nan, np.inf, -np.inf, np.nan]
self.dtype = self.x.dtype
self.tempdir = tempfile.mkdtemp()
self.filename = tempfile.mktemp(dir=self.tempdir)
def tearDown(self):
shutil.rmtree(self.tempdir)
def test_nofile(self):
# this should probably be supported as a file
# but for now test for proper errors
b = io.BytesIO()
assert_raises(IOError, np.fromfile, b, np.uint8, 80)
d = np.ones(7)
assert_raises(IOError, lambda x: x.tofile(b), d)
def test_bool_fromstring(self):
v = np.array([True, False, True, False], dtype=np.bool_)
y = np.fromstring('1 0 -2.3 0.0', sep=' ', dtype=np.bool_)
assert_array_equal(v, y)
def test_uint64_fromstring(self):
d = np.fromstring("9923372036854775807 104783749223640",
dtype=np.uint64, sep=' ')
e = np.array([9923372036854775807, 104783749223640], dtype=np.uint64)
assert_array_equal(d, e)
def test_int64_fromstring(self):
d = np.fromstring("-25041670086757 104783749223640",
dtype=np.int64, sep=' ')
e = np.array([-25041670086757, 104783749223640], dtype=np.int64)
assert_array_equal(d, e)
def test_empty_files_binary(self):
f = open(self.filename, 'w')
f.close()
y = np.fromfile(self.filename)
assert_(y.size == 0, "Array not empty")
def test_empty_files_text(self):
f = open(self.filename, 'w')
f.close()
y = np.fromfile(self.filename, sep=" ")
assert_(y.size == 0, "Array not empty")
def test_roundtrip_file(self):
f = open(self.filename, 'wb')
self.x.tofile(f)
f.close()
# NB. doesn't work with flush+seek, due to use of C stdio
f = open(self.filename, 'rb')
y = np.fromfile(f, dtype=self.dtype)
f.close()
assert_array_equal(y, self.x.flat)
def test_roundtrip_filename(self):
self.x.tofile(self.filename)
y = np.fromfile(self.filename, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
def test_roundtrip_binary_str(self):
s = self.x.tobytes()
y = np.fromstring(s, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
s = self.x.tobytes('F')
y = np.fromstring(s, dtype=self.dtype)
assert_array_equal(y, self.x.flatten('F'))
def test_roundtrip_str(self):
x = self.x.real.ravel()
s = "@".join(map(str, x))
y = np.fromstring(s, sep="@")
# NB. str imbues less precision
nan_mask = ~np.isfinite(x)
assert_array_equal(x[nan_mask], y[nan_mask])
assert_array_almost_equal(x[~nan_mask], y[~nan_mask], decimal=5)
def test_roundtrip_repr(self):
x = self.x.real.ravel()
s = "@".join(map(repr, x))
y = np.fromstring(s, sep="@")
assert_array_equal(x, y)
def test_unbuffered_fromfile(self):
# gh-6246
self.x.tofile(self.filename)
def fail(*args, **kwargs):
raise io.IOError('Can not tell or seek')
with io.open(self.filename, 'rb', buffering=0) as f:
f.seek = fail
f.tell = fail
y = np.fromfile(self.filename, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
def test_largish_file(self):
# check the fallocate path on files > 16MB
d = np.zeros(4 * 1024 ** 2)
d.tofile(self.filename)
assert_equal(os.path.getsize(self.filename), d.nbytes)
assert_array_equal(d, np.fromfile(self.filename))
# check offset
with open(self.filename, "r+b") as f:
f.seek(d.nbytes)
d.tofile(f)
assert_equal(os.path.getsize(self.filename), d.nbytes * 2)
def test_file_position_after_fromfile(self):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE//8,
io.DEFAULT_BUFFER_SIZE,
io.DEFAULT_BUFFER_SIZE*8]
for size in sizes:
f = open(self.filename, 'wb')
f.seek(size-1)
f.write(b'\0')
f.close()
for mode in ['rb', 'r+b']:
err_msg = "%d %s" % (size, mode)
f = open(self.filename, mode)
f.read(2)
np.fromfile(f, dtype=np.float64, count=1)
pos = f.tell()
f.close()
assert_equal(pos, 10, err_msg=err_msg)
def test_file_position_after_tofile(self):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE//8,
io.DEFAULT_BUFFER_SIZE,
io.DEFAULT_BUFFER_SIZE*8]
for size in sizes:
err_msg = "%d" % (size,)
f = open(self.filename, 'wb')
f.seek(size-1)
f.write(b'\0')
f.seek(10)
f.write(b'12')
np.array([0], dtype=np.float64).tofile(f)
pos = f.tell()
f.close()
assert_equal(pos, 10 + 2 + 8, err_msg=err_msg)
f = open(self.filename, 'r+b')
f.read(2)
f.seek(0, 1) # seek between read&write required by ANSI C
np.array([0], dtype=np.float64).tofile(f)
pos = f.tell()
f.close()
assert_equal(pos, 10, err_msg=err_msg)
def _check_from(self, s, value, **kw):
y = np.fromstring(asbytes(s), **kw)
assert_array_equal(y, value)
f = open(self.filename, 'wb')
f.write(asbytes(s))
f.close()
y = np.fromfile(self.filename, **kw)
assert_array_equal(y, value)
def test_nan(self):
self._check_from(
"nan +nan -nan NaN nan(foo) +NaN(BAR) -NAN(q_u_u_x_)",
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
sep=' ')
def test_inf(self):
self._check_from(
"inf +inf -inf infinity -Infinity iNfInItY -inF",
[np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf, -np.inf],
sep=' ')
def test_numbers(self):
self._check_from("1.234 -1.234 .3 .3e55 -123133.1231e+133",
[1.234, -1.234, .3, .3e55, -123133.1231e+133], sep=' ')
def test_binary(self):
self._check_from('\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@',
np.array([1, 2, 3, 4]),
dtype='<f4')
@dec.slow # takes > 1 minute on mechanical hard drive
def test_big_binary(self):
"""Test workarounds for 32-bit limited fwrite, fseek, and ftell
calls in windows. These normally would hang doing something like this.
See http://projects.scipy.org/numpy/ticket/1660"""
if sys.platform != 'win32':
return
try:
# before workarounds, only up to 2**32-1 worked
fourgbplus = 2**32 + 2**16
testbytes = np.arange(8, dtype=np.int8)
n = len(testbytes)
flike = tempfile.NamedTemporaryFile()
f = flike.file
np.tile(testbytes, fourgbplus // testbytes.nbytes).tofile(f)
flike.seek(0)
a = np.fromfile(f, dtype=np.int8)
flike.close()
assert_(len(a) == fourgbplus)
# check only start and end for speed:
assert_((a[:n] == testbytes).all())
assert_((a[-n:] == testbytes).all())
except (MemoryError, ValueError):
pass
def test_string(self):
self._check_from('1,2,3,4', [1., 2., 3., 4.], sep=',')
def test_counted_string(self):
self._check_from('1,2,3,4', [1., 2., 3., 4.], count=4, sep=',')
self._check_from('1,2,3,4', [1., 2., 3.], count=3, sep=',')
self._check_from('1,2,3,4', [1., 2., 3., 4.], count=-1, sep=',')
def test_string_with_ws(self):
self._check_from('1 2 3 4 ', [1, 2, 3, 4], dtype=int, sep=' ')
def test_counted_string_with_ws(self):
self._check_from('1 2 3 4 ', [1, 2, 3], count=3, dtype=int,
sep=' ')
def test_ascii(self):
self._check_from('1 , 2 , 3 , 4', [1., 2., 3., 4.], sep=',')
self._check_from('1,2,3,4', [1., 2., 3., 4.], dtype=float, sep=',')
def test_malformed(self):
self._check_from('1.234 1,234', [1.234, 1.], sep=' ')
def test_long_sep(self):
self._check_from('1_x_3_x_4_x_5', [1, 3, 4, 5], sep='_x_')
def test_dtype(self):
v = np.array([1, 2, 3, 4], dtype=np.int_)
self._check_from('1,2,3,4', v, sep=',', dtype=np.int_)
def test_dtype_bool(self):
# can't use _check_from because fromstring can't handle True/False
v = np.array([True, False, True, False], dtype=np.bool_)
s = '1,0,-2.3,0'
f = open(self.filename, 'wb')
f.write(asbytes(s))
f.close()
y = np.fromfile(self.filename, sep=',', dtype=np.bool_)
assert_(y.dtype == '?')
assert_array_equal(y, v)
def test_tofile_sep(self):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
f = open(self.filename, 'w')
x.tofile(f, sep=',')
f.close()
f = open(self.filename, 'r')
s = f.read()
f.close()
#assert_equal(s, '1.51,2.0,3.51,4.0')
y = np.array([float(p) for p in s.split(',')])
assert_array_equal(x,y)
def test_tofile_format(self):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
f = open(self.filename, 'w')
x.tofile(f, sep=',', format='%.2f')
f.close()
f = open(self.filename, 'r')
s = f.read()
f.close()
assert_equal(s, '1.51,2.00,3.51,4.00')
def test_locale(self):
in_foreign_locale(self.test_numbers)()
in_foreign_locale(self.test_nan)()
in_foreign_locale(self.test_inf)()
in_foreign_locale(self.test_counted_string)()
in_foreign_locale(self.test_ascii)()
in_foreign_locale(self.test_malformed)()
in_foreign_locale(self.test_tofile_sep)()
in_foreign_locale(self.test_tofile_format)()
class TestFromBuffer(object):
def tst_basic(self, buffer, expected, kwargs):
assert_array_equal(np.frombuffer(buffer,**kwargs), expected)
def test_ip_basic(self):
for byteorder in ['<', '>']:
for dtype in [float, int, np.complex]:
dt = np.dtype(dtype).newbyteorder(byteorder)
x = (np.random.random((4, 7))*5).astype(dt)
buf = x.tobytes()
yield self.tst_basic, buf, x.flat, {'dtype':dt}
def test_empty(self):
yield self.tst_basic, asbytes(''), np.array([]), {}
class TestFlat(TestCase):
def setUp(self):
a0 = np.arange(20.0)
a = a0.reshape(4, 5)
a0.shape = (4, 5)
a.flags.writeable = False
self.a = a
self.b = a[::2, ::2]
self.a0 = a0
self.b0 = a0[::2, ::2]
def test_contiguous(self):
testpassed = False
try:
self.a.flat[12] = 100.0
except ValueError:
testpassed = True
assert_(testpassed)
assert_(self.a.flat[12] == 12.0)
def test_discontiguous(self):
testpassed = False
try:
self.b.flat[4] = 100.0
except ValueError:
testpassed = True
assert_(testpassed)
assert_(self.b.flat[4] == 12.0)
def test___array__(self):
c = self.a.flat.__array__()
d = self.b.flat.__array__()
e = self.a0.flat.__array__()
f = self.b0.flat.__array__()
assert_(c.flags.writeable is False)
assert_(d.flags.writeable is False)
assert_(e.flags.writeable is True)
assert_(f.flags.writeable is True)
assert_(c.flags.updateifcopy is False)
assert_(d.flags.updateifcopy is False)
assert_(e.flags.updateifcopy is False)
assert_(f.flags.updateifcopy is True)
assert_(f.base is self.b0)
class TestResize(TestCase):
def test_basic(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
if IS_PYPY:
x.resize((5, 5), refcheck=False)
else:
x.resize((5, 5))
assert_array_equal(x.flat[:9],
np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).flat)
assert_array_equal(x[9:].flat, 0)
def test_check_reference(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
y = x
self.assertRaises(ValueError, x.resize, (5, 1))
del y # avoid pyflakes unused variable warning.
def test_int_shape(self):
x = np.eye(3)
if IS_PYPY:
x.resize(3, refcheck=False)
else:
x.resize(3)
assert_array_equal(x, np.eye(3)[0,:])
def test_none_shape(self):
x = np.eye(3)
x.resize(None)
assert_array_equal(x, np.eye(3))
x.resize()
assert_array_equal(x, np.eye(3))
def test_invalid_arguements(self):
self.assertRaises(TypeError, np.eye(3).resize, 'hi')
self.assertRaises(ValueError, np.eye(3).resize, -1)
self.assertRaises(TypeError, np.eye(3).resize, order=1)
self.assertRaises(TypeError, np.eye(3).resize, refcheck='hi')
def test_freeform_shape(self):
x = np.eye(3)
if IS_PYPY:
x.resize(3, 2, 1, refcheck=False)
else:
x.resize(3, 2, 1)
assert_(x.shape == (3, 2, 1))
def test_zeros_appended(self):
x = np.eye(3)
if IS_PYPY:
x.resize(2, 3, 3, refcheck=False)
else:
x.resize(2, 3, 3)
assert_array_equal(x[0], np.eye(3))
assert_array_equal(x[1], np.zeros((3, 3)))
def test_obj_obj(self):
# check memory is initialized on resize, gh-4857
a = np.ones(10, dtype=[('k', object, 2)])
if IS_PYPY:
a.resize(15, refcheck=False)
else:
a.resize(15,)
assert_equal(a.shape, (15,))
assert_array_equal(a['k'][-5:], 0)
assert_array_equal(a['k'][:-5], 1)
class TestRecord(TestCase):
def test_field_rename(self):
dt = np.dtype([('f', float), ('i', int)])
dt.names = ['p', 'q']
assert_equal(dt.names, ['p', 'q'])
def test_multiple_field_name_occurrence(self):
def test_assign():
dtype = np.dtype([("A", "f8"), ("B", "f8"), ("A", "f8")])
# Error raised when multiple fields have the same name
assert_raises(ValueError, test_assign)
if sys.version_info[0] >= 3:
def test_bytes_fields(self):
# Bytes are not allowed in field names and not recognized in titles
# on Py3
assert_raises(TypeError, np.dtype, [(asbytes('a'), int)])
assert_raises(TypeError, np.dtype, [(('b', asbytes('a')), int)])
dt = np.dtype([((asbytes('a'), 'b'), int)])
assert_raises(ValueError, dt.__getitem__, asbytes('a'))
x = np.array([(1,), (2,), (3,)], dtype=dt)
assert_raises(IndexError, x.__getitem__, asbytes('a'))
y = x[0]
assert_raises(IndexError, y.__getitem__, asbytes('a'))
def test_multiple_field_name_unicode(self):
def test_assign_unicode():
dt = np.dtype([("\u20B9", "f8"),
("B", "f8"),
("\u20B9", "f8")])
# Error raised when multiple fields have the same name(unicode included)
assert_raises(ValueError, test_assign_unicode)
else:
def test_unicode_field_titles(self):
# Unicode field titles are added to field dict on Py2
title = unicode('b')
dt = np.dtype([((title, 'a'), int)])
dt[title]
dt['a']
x = np.array([(1,), (2,), (3,)], dtype=dt)
x[title]
x['a']
y = x[0]
y[title]
y['a']
def test_unicode_field_names(self):
# Unicode field names are not allowed on Py2
title = unicode('b')
assert_raises(TypeError, np.dtype, [(title, int)])
assert_raises(TypeError, np.dtype, [(('a', title), int)])
def test_field_names(self):
# Test unicode and 8-bit / byte strings can be used
a = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
is_py3 = sys.version_info[0] >= 3
if is_py3:
funcs = (str,)
# byte string indexing fails gracefully
assert_raises(IndexError, a.__setitem__, asbytes('f1'), 1)
assert_raises(IndexError, a.__getitem__, asbytes('f1'))
assert_raises(IndexError, a['f1'].__setitem__, asbytes('sf1'), 1)
assert_raises(IndexError, a['f1'].__getitem__, asbytes('sf1'))
else:
funcs = (str, unicode)
for func in funcs:
b = a.copy()
fn1 = func('f1')
b[fn1] = 1
assert_equal(b[fn1], 1)
fnn = func('not at all')
assert_raises(ValueError, b.__setitem__, fnn, 1)
assert_raises(ValueError, b.__getitem__, fnn)
b[0][fn1] = 2
assert_equal(b[fn1], 2)
# Subfield
assert_raises(ValueError, b[0].__setitem__, fnn, 1)
assert_raises(ValueError, b[0].__getitem__, fnn)
# Subfield
fn3 = func('f3')
sfn1 = func('sf1')
b[fn3][sfn1] = 1
assert_equal(b[fn3][sfn1], 1)
assert_raises(ValueError, b[fn3].__setitem__, fnn, 1)
assert_raises(ValueError, b[fn3].__getitem__, fnn)
# multiple subfields
fn2 = func('f2')
b[fn2] = 3
with suppress_warnings() as sup:
sup.filter(FutureWarning,
"Assignment between structured arrays.*")
sup.filter(FutureWarning,
"Numpy has detected that you .*")
assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3))
assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2))
assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,)))
# view of subfield view/copy
assert_equal(b[['f1', 'f2']][0].view(('i4', 2)).tolist(),
(2, 3))
assert_equal(b[['f2', 'f1']][0].view(('i4', 2)).tolist(),
(3, 2))
view_dtype = [('f1', 'i4'), ('f3', [('', 'i4')])]
assert_equal(b[['f1', 'f3']][0].view(view_dtype).tolist(),
(2, (1,)))
# non-ascii unicode field indexing is well behaved
if not is_py3:
raise SkipTest('non ascii unicode field indexing skipped; '
'raises segfault on python 2.x')
else:
assert_raises(ValueError, a.__setitem__, sixu('\u03e0'), 1)
assert_raises(ValueError, a.__getitem__, sixu('\u03e0'))
def test_field_names_deprecation(self):
def collect_warnings(f, *args, **kwargs):
with warnings.catch_warnings(record=True) as log:
warnings.simplefilter("always")
f(*args, **kwargs)
return [w.category for w in log]
a = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
a['f1'][0] = 1
a['f2'][0] = 2
a['f3'][0] = (3,)
b = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
b['f1'][0] = 1
b['f2'][0] = 2
b['f3'][0] = (3,)
# All the different functions raise a warning, but not an error
assert_equal(collect_warnings(a[['f1', 'f2']].__setitem__, 0, (10, 20)),
[FutureWarning])
# For <=1.12 a is not modified, but it will be in 1.13
assert_equal(a, b)
# Views also warn
subset = a[['f1', 'f2']]
subset_view = subset.view()
assert_equal(collect_warnings(subset_view['f1'].__setitem__, 0, 10),
[FutureWarning])
# But the write goes through:
assert_equal(subset['f1'][0], 10)
# Only one warning per multiple field indexing, though (even if there
# are multiple views involved):
assert_equal(collect_warnings(subset['f1'].__setitem__, 0, 10), [])
# make sure views of a multi-field index warn too
c = np.zeros(3, dtype='i8,i8,i8')
assert_equal(collect_warnings(c[['f0', 'f2']].view, 'i8,i8'),
[FutureWarning])
# make sure assignment using a different dtype warns
a = np.zeros(2, dtype=[('a', 'i4'), ('b', 'i4')])
b = np.zeros(2, dtype=[('b', 'i4'), ('a', 'i4')])
assert_equal(collect_warnings(a.__setitem__, (), b), [FutureWarning])
def test_record_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
a.flags.writeable = False
b = np.array([(1, 2), (3, 4)], dtype=[('num1', 'i1'), ('num2', 'i2')])
b.flags.writeable = False
c = np.array([(1, 2), (3, 4)], dtype='i1,i2')
c.flags.writeable = False
self.assertTrue(hash(a[0]) == hash(a[1]))
self.assertTrue(hash(a[0]) == hash(b[0]))
self.assertTrue(hash(a[0]) != hash(b[1]))
self.assertTrue(hash(c[0]) == hash(a[0]) and c[0] == a[0])
def test_record_no_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
self.assertRaises(TypeError, hash, a[0])
def test_empty_structure_creation(self):
# make sure these do not raise errors (gh-5631)
np.array([()], dtype={'names': [], 'formats': [],
'offsets': [], 'itemsize': 12})
np.array([(), (), (), (), ()], dtype={'names': [], 'formats': [],
'offsets': [], 'itemsize': 12})
class TestView(TestCase):
def test_basic(self):
x = np.array([(1, 2, 3, 4), (5, 6, 7, 8)],
dtype=[('r', np.int8), ('g', np.int8),
('b', np.int8), ('a', np.int8)])
# We must be specific about the endianness here:
y = x.view(dtype='<i4')
# ... and again without the keyword.
z = x.view('<i4')
assert_array_equal(y, z)
assert_array_equal(y, [67305985, 134678021])
def _mean(a, **args):
return a.mean(**args)
def _var(a, **args):
return a.var(**args)
def _std(a, **args):
return a.std(**args)
class TestStats(TestCase):
funcs = [_mean, _var, _std]
def setUp(self):
np.random.seed(range(3))
self.rmat = np.random.random((4, 5))
self.cmat = self.rmat + 1j * self.rmat
self.omat = np.array([Decimal(repr(r)) for r in self.rmat.flat])
self.omat = self.omat.reshape(4, 5)
def test_keepdims(self):
mat = np.eye(3)
for f in self.funcs:
for axis in [0, 1]:
res = f(mat, axis=axis, keepdims=True)
assert_(res.ndim == mat.ndim)
assert_(res.shape[axis] == 1)
for axis in [None]:
res = f(mat, axis=axis, keepdims=True)
assert_(res.shape == (1, 1))
def test_out(self):
mat = np.eye(3)
for f in self.funcs:
out = np.zeros(3)
tgt = f(mat, axis=1)
res = f(mat, axis=1, out=out)
assert_almost_equal(res, out)
assert_almost_equal(res, tgt)
out = np.empty(2)
assert_raises(ValueError, f, mat, axis=1, out=out)
out = np.empty((2, 2))
assert_raises(ValueError, f, mat, axis=1, out=out)
def test_dtype_from_input(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
# object type
for f in self.funcs:
mat = np.array([[Decimal(1)]*3]*3)
tgt = mat.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = type(f(mat, axis=None))
assert_(res is Decimal)
# integer types
for f in self.funcs:
for c in icodes:
mat = np.eye(3, dtype=c)
tgt = np.float64
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
# mean for float types
for f in [_mean]:
for c in fcodes:
mat = np.eye(3, dtype=c)
tgt = mat.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
# var, std for float types
for f in [_var, _std]:
for c in fcodes:
mat = np.eye(3, dtype=c)
# deal with complex types
tgt = mat.real.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
def test_dtype_from_dtype(self):
mat = np.eye(3)
# stats for integer types
# FIXME:
# this needs definition as there are lots places along the line
# where type casting may take place.
# for f in self.funcs:
# for c in np.typecodes['AllInteger']:
# tgt = np.dtype(c).type
# res = f(mat, axis=1, dtype=c).dtype.type
# assert_(res is tgt)
# # scalar case
# res = f(mat, axis=None, dtype=c).dtype.type
# assert_(res is tgt)
# stats for float types
for f in self.funcs:
for c in np.typecodes['AllFloat']:
tgt = np.dtype(c).type
res = f(mat, axis=1, dtype=c).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None, dtype=c).dtype.type
assert_(res is tgt)
def test_ddof(self):
for f in [_var]:
for ddof in range(3):
dim = self.rmat.shape[1]
tgt = f(self.rmat, axis=1) * dim
res = f(self.rmat, axis=1, ddof=ddof) * (dim - ddof)
for f in [_std]:
for ddof in range(3):
dim = self.rmat.shape[1]
tgt = f(self.rmat, axis=1) * np.sqrt(dim)
res = f(self.rmat, axis=1, ddof=ddof) * np.sqrt(dim - ddof)
assert_almost_equal(res, tgt)
assert_almost_equal(res, tgt)
def test_ddof_too_big(self):
dim = self.rmat.shape[1]
for f in [_var, _std]:
for ddof in range(dim, dim + 2):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = f(self.rmat, axis=1, ddof=ddof)
assert_(not (res < 0).any())
assert_(len(w) > 0)
assert_(issubclass(w[0].category, RuntimeWarning))
def test_empty(self):
A = np.zeros((0, 3))
for f in self.funcs:
for axis in [0, None]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(f(A, axis=axis)).all())
assert_(len(w) > 0)
assert_(issubclass(w[0].category, RuntimeWarning))
for axis in [1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_equal(f(A, axis=axis), np.zeros([]))
def test_mean_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1]:
tgt = mat.sum(axis=axis)
res = _mean(mat, axis=axis) * mat.shape[axis]
assert_almost_equal(res, tgt)
for axis in [None]:
tgt = mat.sum(axis=axis)
res = _mean(mat, axis=axis) * np.prod(mat.shape)
assert_almost_equal(res, tgt)
def test_var_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1, None]:
msqr = _mean(mat * mat.conj(), axis=axis)
mean = _mean(mat, axis=axis)
tgt = msqr - mean * mean.conjugate()
res = _var(mat, axis=axis)
assert_almost_equal(res, tgt)
def test_std_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1, None]:
tgt = np.sqrt(_var(mat, axis=axis))
res = _std(mat, axis=axis)
assert_almost_equal(res, tgt)
def test_subclass(self):
class TestArray(np.ndarray):
def __new__(cls, data, info):
result = np.array(data)
result = result.view(cls)
result.info = info
return result
def __array_finalize__(self, obj):
self.info = getattr(obj, "info", '')
dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba')
res = dat.mean(1)
assert_(res.info == dat.info)
res = dat.std(1)
assert_(res.info == dat.info)
res = dat.var(1)
assert_(res.info == dat.info)
class TestVdot(TestCase):
def test_basic(self):
dt_numeric = np.typecodes['AllFloat'] + np.typecodes['AllInteger']
dt_complex = np.typecodes['Complex']
# test real
a = np.eye(3)
for dt in dt_numeric + 'O':
b = a.astype(dt)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), 3)
# test complex
a = np.eye(3) * 1j
for dt in dt_complex + 'O':
b = a.astype(dt)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), 3)
# test boolean
b = np.eye(3, dtype=np.bool)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), True)
def test_vdot_array_order(self):
a = np.array([[1, 2], [3, 4]], order='C')
b = np.array([[1, 2], [3, 4]], order='F')
res = np.vdot(a, a)
# integer arrays are exact
assert_equal(np.vdot(a, b), res)
assert_equal(np.vdot(b, a), res)
assert_equal(np.vdot(b, b), res)
def test_vdot_uncontiguous(self):
for size in [2, 1000]:
# Different sizes match different branches in vdot.
a = np.zeros((size, 2, 2))
b = np.zeros((size, 2, 2))
a[:, 0, 0] = np.arange(size)
b[:, 0, 0] = np.arange(size) + 1
# Make a and b uncontiguous:
a = a[..., 0]
b = b[..., 0]
assert_equal(np.vdot(a, b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a, b.copy()),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a.copy(), b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a.copy('F'), b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a, b.copy('F')),
np.vdot(a.flatten(), b.flatten()))
class TestDot(TestCase):
def setUp(self):
np.random.seed(128)
self.A = np.random.rand(4, 2)
self.b1 = np.random.rand(2, 1)
self.b2 = np.random.rand(2)
self.b3 = np.random.rand(1, 2)
self.b4 = np.random.rand(4)
self.N = 7
def test_dotmatmat(self):
A = self.A
res = np.dot(A.transpose(), A)
tgt = np.array([[1.45046013, 0.86323640],
[0.86323640, 0.84934569]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotmatvec(self):
A, b1 = self.A, self.b1
res = np.dot(A, b1)
tgt = np.array([[0.32114320], [0.04889721],
[0.15696029], [0.33612621]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotmatvec2(self):
A, b2 = self.A, self.b2
res = np.dot(A, b2)
tgt = np.array([0.29677940, 0.04518649, 0.14468333, 0.31039293])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat(self):
A, b4 = self.A, self.b4
res = np.dot(b4, A)
tgt = np.array([1.23495091, 1.12222648])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat2(self):
b3, A = self.b3, self.A
res = np.dot(b3, A.transpose())
tgt = np.array([[0.58793804, 0.08957460, 0.30605758, 0.62716383]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat3(self):
A, b4 = self.A, self.b4
res = np.dot(A.transpose(), b4)
tgt = np.array([1.23495091, 1.12222648])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecvecouter(self):
b1, b3 = self.b1, self.b3
res = np.dot(b1, b3)
tgt = np.array([[0.20128610, 0.08400440], [0.07190947, 0.03001058]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecvecinner(self):
b1, b3 = self.b1, self.b3
res = np.dot(b3, b1)
tgt = np.array([[ 0.23129668]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotcolumnvect1(self):
b1 = np.ones((3, 1))
b2 = [5.3]
res = np.dot(b1, b2)
tgt = np.array([5.3, 5.3, 5.3])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotcolumnvect2(self):
b1 = np.ones((3, 1)).transpose()
b2 = [6.2]
res = np.dot(b2, b1)
tgt = np.array([6.2, 6.2, 6.2])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecscalar(self):
np.random.seed(100)
b1 = np.random.rand(1, 1)
b2 = np.random.rand(1, 4)
res = np.dot(b1, b2)
tgt = np.array([[0.15126730, 0.23068496, 0.45905553, 0.00256425]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecscalar2(self):
np.random.seed(100)
b1 = np.random.rand(4, 1)
b2 = np.random.rand(1, 1)
res = np.dot(b1, b2)
tgt = np.array([[0.00256425],[0.00131359],[0.00200324],[ 0.00398638]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_all(self):
dims = [(), (1,), (1, 1)]
dout = [(), (1,), (1, 1), (1,), (), (1,), (1, 1), (1,), (1, 1)]
for dim, (dim1, dim2) in zip(dout, itertools.product(dims, dims)):
b1 = np.zeros(dim1)
b2 = np.zeros(dim2)
res = np.dot(b1, b2)
tgt = np.zeros(dim)
assert_(res.shape == tgt.shape)
assert_almost_equal(res, tgt, decimal=self.N)
def test_vecobject(self):
class Vec(object):
def __init__(self, sequence=None):
if sequence is None:
sequence = []
self.array = np.array(sequence)
def __add__(self, other):
out = Vec()
out.array = self.array + other.array
return out
def __sub__(self, other):
out = Vec()
out.array = self.array - other.array
return out
def __mul__(self, other): # with scalar
out = Vec(self.array.copy())
out.array *= other
return out
def __rmul__(self, other):
return self*other
U_non_cont = np.transpose([[1., 1.], [1., 2.]])
U_cont = np.ascontiguousarray(U_non_cont)
x = np.array([Vec([1., 0.]), Vec([0., 1.])])
zeros = np.array([Vec([0., 0.]), Vec([0., 0.])])
zeros_test = np.dot(U_cont, x) - np.dot(U_non_cont, x)
assert_equal(zeros[0].array, zeros_test[0].array)
assert_equal(zeros[1].array, zeros_test[1].array)
def test_dot_2args(self):
from numpy.core.multiarray import dot
a = np.array([[1, 2], [3, 4]], dtype=float)
b = np.array([[1, 0], [1, 1]], dtype=float)
c = np.array([[3, 2], [7, 4]], dtype=float)
d = dot(a, b)
assert_allclose(c, d)
def test_dot_3args(self):
from numpy.core.multiarray import dot
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 32))
for i in range(12):
dot(f, v, r)
if HAS_REFCOUNT:
assert_equal(sys.getrefcount(r), 2)
r2 = dot(f, v, out=None)
assert_array_equal(r2, r)
assert_(r is dot(f, v, out=r))
v = v[:, 0].copy() # v.shape == (16,)
r = r[:, 0].copy() # r.shape == (1024,)
r2 = dot(f, v)
assert_(r is dot(f, v, r))
assert_array_equal(r2, r)
def test_dot_3args_errors(self):
from numpy.core.multiarray import dot
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 31))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((1024,))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((32,))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((32, 1024))
assert_raises(ValueError, dot, f, v, r)
assert_raises(ValueError, dot, f, v, r.T)
r = np.empty((1024, 64))
assert_raises(ValueError, dot, f, v, r[:, ::2])
assert_raises(ValueError, dot, f, v, r[:, :32])
r = np.empty((1024, 32), dtype=np.float32)
assert_raises(ValueError, dot, f, v, r)
r = np.empty((1024, 32), dtype=int)
assert_raises(ValueError, dot, f, v, r)
def test_dot_array_order(self):
a = np.array([[1, 2], [3, 4]], order='C')
b = np.array([[1, 2], [3, 4]], order='F')
res = np.dot(a, a)
# integer arrays are exact
assert_equal(np.dot(a, b), res)
assert_equal(np.dot(b, a), res)
assert_equal(np.dot(b, b), res)
def test_dot_scalar_and_matrix_of_objects(self):
# Ticket #2469
arr = np.matrix([1, 2], dtype=object)
desired = np.matrix([[3, 6]], dtype=object)
assert_equal(np.dot(arr, 3), desired)
assert_equal(np.dot(3, arr), desired)
def test_accelerate_framework_sgemv_fix(self):
def aligned_array(shape, align, dtype, order='C'):
d = dtype(0)
N = np.prod(shape)
tmp = np.zeros(N * d.nbytes + align, dtype=np.uint8)
address = tmp.__array_interface__["data"][0]
for offset in range(align):
if (address + offset) % align == 0:
break
tmp = tmp[offset:offset+N*d.nbytes].view(dtype=dtype)
return tmp.reshape(shape, order=order)
def as_aligned(arr, align, dtype, order='C'):
aligned = aligned_array(arr.shape, align, dtype, order)
aligned[:] = arr[:]
return aligned
def assert_dot_close(A, X, desired):
assert_allclose(np.dot(A, X), desired, rtol=1e-5, atol=1e-7)
m = aligned_array(100, 15, np.float32)
s = aligned_array((100, 100), 15, np.float32)
np.dot(s, m) # this will always segfault if the bug is present
testdata = itertools.product((15,32), (10000,), (200,89), ('C','F'))
for align, m, n, a_order in testdata:
# Calculation in double precision
A_d = np.random.rand(m, n)
X_d = np.random.rand(n)
desired = np.dot(A_d, X_d)
# Calculation with aligned single precision
A_f = as_aligned(A_d, align, np.float32, order=a_order)
X_f = as_aligned(X_d, align, np.float32)
assert_dot_close(A_f, X_f, desired)
# Strided A rows
A_d_2 = A_d[::2]
desired = np.dot(A_d_2, X_d)
A_f_2 = A_f[::2]
assert_dot_close(A_f_2, X_f, desired)
# Strided A columns, strided X vector
A_d_22 = A_d_2[:, ::2]
X_d_2 = X_d[::2]
desired = np.dot(A_d_22, X_d_2)
A_f_22 = A_f_2[:, ::2]
X_f_2 = X_f[::2]
assert_dot_close(A_f_22, X_f_2, desired)
# Check the strides are as expected
if a_order == 'F':
assert_equal(A_f_22.strides, (8, 8 * m))
else:
assert_equal(A_f_22.strides, (8 * n, 8))
assert_equal(X_f_2.strides, (8,))
# Strides in A rows + cols only
X_f_2c = as_aligned(X_f_2, align, np.float32)
assert_dot_close(A_f_22, X_f_2c, desired)
# Strides just in A cols
A_d_12 = A_d[:, ::2]
desired = np.dot(A_d_12, X_d_2)
A_f_12 = A_f[:, ::2]
assert_dot_close(A_f_12, X_f_2c, desired)
# Strides in A cols and X
assert_dot_close(A_f_12, X_f_2, desired)
class MatmulCommon():
"""Common tests for '@' operator and numpy.matmul.
Do not derive from TestCase to avoid nose running it.
"""
# Should work with these types. Will want to add
# "O" at some point
types = "?bhilqBHILQefdgFDG"
def test_exceptions(self):
dims = [
((1,), (2,)), # mismatched vector vector
((2, 1,), (2,)), # mismatched matrix vector
((2,), (1, 2)), # mismatched vector matrix
((1, 2), (3, 1)), # mismatched matrix matrix
((1,), ()), # vector scalar
((), (1)), # scalar vector
((1, 1), ()), # matrix scalar
((), (1, 1)), # scalar matrix
((2, 2, 1), (3, 1, 2)), # cannot broadcast
]
for dt, (dm1, dm2) in itertools.product(self.types, dims):
a = np.ones(dm1, dtype=dt)
b = np.ones(dm2, dtype=dt)
assert_raises(ValueError, self.matmul, a, b)
def test_shapes(self):
dims = [
((1, 1), (2, 1, 1)), # broadcast first argument
((2, 1, 1), (1, 1)), # broadcast second argument
((2, 1, 1), (2, 1, 1)), # matrix stack sizes match
]
for dt, (dm1, dm2) in itertools.product(self.types, dims):
a = np.ones(dm1, dtype=dt)
b = np.ones(dm2, dtype=dt)
res = self.matmul(a, b)
assert_(res.shape == (2, 1, 1))
# vector vector returns scalars.
for dt in self.types:
a = np.ones((2,), dtype=dt)
b = np.ones((2,), dtype=dt)
c = self.matmul(a, b)
assert_(np.array(c).shape == ())
def test_result_types(self):
mat = np.ones((1,1))
vec = np.ones((1,))
for dt in self.types:
m = mat.astype(dt)
v = vec.astype(dt)
for arg in [(m, v), (v, m), (m, m)]:
res = self.matmul(*arg)
assert_(res.dtype == dt)
# vector vector returns scalars
res = self.matmul(v, v)
assert_(type(res) is np.dtype(dt).type)
def test_vector_vector_values(self):
vec = np.array([1, 2])
tgt = 5
for dt in self.types[1:]:
v1 = vec.astype(dt)
res = self.matmul(v1, v1)
assert_equal(res, tgt)
# boolean type
vec = np.array([True, True], dtype='?')
res = self.matmul(vec, vec)
assert_equal(res, True)
def test_vector_matrix_values(self):
vec = np.array([1, 2])
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([7, 10])
tgt2 = np.stack([tgt1]*2, axis=0)
for dt in self.types[1:]:
v = vec.astype(dt)
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
res = self.matmul(v, m1)
assert_equal(res, tgt1)
res = self.matmul(v, m2)
assert_equal(res, tgt2)
# boolean type
vec = np.array([True, False])
mat1 = np.array([[True, False], [False, True]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([True, False])
tgt2 = np.stack([tgt1]*2, axis=0)
res = self.matmul(vec, mat1)
assert_equal(res, tgt1)
res = self.matmul(vec, mat2)
assert_equal(res, tgt2)
def test_matrix_vector_values(self):
vec = np.array([1, 2])
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([5, 11])
tgt2 = np.stack([tgt1]*2, axis=0)
for dt in self.types[1:]:
v = vec.astype(dt)
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
res = self.matmul(m1, v)
assert_equal(res, tgt1)
res = self.matmul(m2, v)
assert_equal(res, tgt2)
# boolean type
vec = np.array([True, False])
mat1 = np.array([[True, False], [False, True]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([True, False])
tgt2 = np.stack([tgt1]*2, axis=0)
res = self.matmul(vec, mat1)
assert_equal(res, tgt1)
res = self.matmul(vec, mat2)
assert_equal(res, tgt2)
def test_matrix_matrix_values(self):
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.array([[1, 0], [1, 1]])
mat12 = np.stack([mat1, mat2], axis=0)
mat21 = np.stack([mat2, mat1], axis=0)
tgt11 = np.array([[7, 10], [15, 22]])
tgt12 = np.array([[3, 2], [7, 4]])
tgt21 = np.array([[1, 2], [4, 6]])
tgt12_21 = np.stack([tgt12, tgt21], axis=0)
tgt11_12 = np.stack((tgt11, tgt12), axis=0)
tgt11_21 = np.stack((tgt11, tgt21), axis=0)
for dt in self.types[1:]:
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
m12 = mat12.astype(dt)
m21 = mat21.astype(dt)
# matrix @ matrix
res = self.matmul(m1, m2)
assert_equal(res, tgt12)
res = self.matmul(m2, m1)
assert_equal(res, tgt21)
# stacked @ matrix
res = self.matmul(m12, m1)
assert_equal(res, tgt11_21)
# matrix @ stacked
res = self.matmul(m1, m12)
assert_equal(res, tgt11_12)
# stacked @ stacked
res = self.matmul(m12, m21)
assert_equal(res, tgt12_21)
# boolean type
m1 = np.array([[1, 1], [0, 0]], dtype=np.bool_)
m2 = np.array([[1, 0], [1, 1]], dtype=np.bool_)
m12 = np.stack([m1, m2], axis=0)
m21 = np.stack([m2, m1], axis=0)
tgt11 = m1
tgt12 = m1
tgt21 = np.array([[1, 1], [1, 1]], dtype=np.bool_)
tgt12_21 = np.stack([tgt12, tgt21], axis=0)
tgt11_12 = np.stack((tgt11, tgt12), axis=0)
tgt11_21 = np.stack((tgt11, tgt21), axis=0)
# matrix @ matrix
res = self.matmul(m1, m2)
assert_equal(res, tgt12)
res = self.matmul(m2, m1)
assert_equal(res, tgt21)
# stacked @ matrix
res = self.matmul(m12, m1)
assert_equal(res, tgt11_21)
# matrix @ stacked
res = self.matmul(m1, m12)
assert_equal(res, tgt11_12)
# stacked @ stacked
res = self.matmul(m12, m21)
assert_equal(res, tgt12_21)
def test_numpy_ufunc_override(self):
# 2016-01-29: NUMPY_UFUNC_DISABLED
return
class A(np.ndarray):
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return "A"
class B(np.ndarray):
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return NotImplemented
a = A([1, 2])
b = B([1, 2])
c = np.ones(2)
assert_equal(self.matmul(a, b), "A")
assert_equal(self.matmul(b, a), "A")
assert_raises(TypeError, self.matmul, b, c)
class TestMatmul(MatmulCommon, TestCase):
matmul = np.matmul
def test_out_arg(self):
a = np.ones((2, 2), dtype=np.float)
b = np.ones((2, 2), dtype=np.float)
tgt = np.full((2,2), 2, dtype=np.float)
# test as positional argument
msg = "out positional argument"
out = np.zeros((2, 2), dtype=np.float)
self.matmul(a, b, out)
assert_array_equal(out, tgt, err_msg=msg)
# test as keyword argument
msg = "out keyword argument"
out = np.zeros((2, 2), dtype=np.float)
self.matmul(a, b, out=out)
assert_array_equal(out, tgt, err_msg=msg)
# test out with not allowed type cast (safe casting)
# einsum and cblas raise different error types, so
# use Exception.
msg = "out argument with illegal cast"
out = np.zeros((2, 2), dtype=np.int32)
assert_raises(Exception, self.matmul, a, b, out=out)
# skip following tests for now, cblas does not allow non-contiguous
# outputs and consistency with dot would require same type,
# dimensions, subtype, and c_contiguous.
# test out with allowed type cast
# msg = "out argument with allowed cast"
# out = np.zeros((2, 2), dtype=np.complex128)
# self.matmul(a, b, out=out)
# assert_array_equal(out, tgt, err_msg=msg)
# test out non-contiguous
# msg = "out argument with non-contiguous layout"
# c = np.zeros((2, 2, 2), dtype=np.float)
# self.matmul(a, b, out=c[..., 0])
# assert_array_equal(c, tgt, err_msg=msg)
if sys.version_info[:2] >= (3, 5):
class TestMatmulOperator(MatmulCommon, TestCase):
import operator
matmul = operator.matmul
def test_array_priority_override(self):
class A(object):
__array_priority__ = 1000
def __matmul__(self, other):
return "A"
def __rmatmul__(self, other):
return "A"
a = A()
b = np.ones(2)
assert_equal(self.matmul(a, b), "A")
assert_equal(self.matmul(b, a), "A")
def test_matmul_inplace():
# It would be nice to support in-place matmul eventually, but for now
# we don't have a working implementation, so better just to error out
# and nudge people to writing "a = a @ b".
a = np.eye(3)
b = np.eye(3)
assert_raises(TypeError, a.__imatmul__, b)
import operator
assert_raises(TypeError, operator.imatmul, a, b)
# we avoid writing the token `exec` so as not to crash python 2's
# parser
exec_ = getattr(builtins, "exec")
assert_raises(TypeError, exec_, "a @= b", globals(), locals())
class TestInner(TestCase):
def test_inner_type_mismatch(self):
c = 1.
A = np.array((1,1), dtype='i,i')
assert_raises(TypeError, np.inner, c, A)
assert_raises(TypeError, np.inner, A, c)
def test_inner_scalar_and_vector(self):
for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
sca = np.array(3, dtype=dt)[()]
vec = np.array([1, 2], dtype=dt)
desired = np.array([3, 6], dtype=dt)
assert_equal(np.inner(vec, sca), desired)
assert_equal(np.inner(sca, vec), desired)
def test_inner_scalar_and_matrix(self):
for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
sca = np.array(3, dtype=dt)[()]
arr = np.matrix([[1, 2], [3, 4]], dtype=dt)
desired = np.matrix([[3, 6], [9, 12]], dtype=dt)
assert_equal(np.inner(arr, sca), desired)
assert_equal(np.inner(sca, arr), desired)
def test_inner_scalar_and_matrix_of_objects(self):
# Ticket #4482
arr = np.matrix([1, 2], dtype=object)
desired = np.matrix([[3, 6]], dtype=object)
assert_equal(np.inner(arr, 3), desired)
assert_equal(np.inner(3, arr), desired)
def test_vecself(self):
# Ticket 844.
# Inner product of a vector with itself segfaults or give
# meaningless result
a = np.zeros(shape=(1, 80), dtype=np.float64)
p = np.inner(a, a)
assert_almost_equal(p, 0, decimal=14)
def test_inner_product_with_various_contiguities(self):
# github issue 6532
for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
# check an inner product involving a matrix transpose
A = np.array([[1, 2], [3, 4]], dtype=dt)
B = np.array([[1, 3], [2, 4]], dtype=dt)
C = np.array([1, 1], dtype=dt)
desired = np.array([4, 6], dtype=dt)
assert_equal(np.inner(A.T, C), desired)
assert_equal(np.inner(C, A.T), desired)
assert_equal(np.inner(B, C), desired)
assert_equal(np.inner(C, B), desired)
# check a matrix product
desired = np.array([[7, 10], [15, 22]], dtype=dt)
assert_equal(np.inner(A, B), desired)
# check the syrk vs. gemm paths
desired = np.array([[5, 11], [11, 25]], dtype=dt)
assert_equal(np.inner(A, A), desired)
assert_equal(np.inner(A, A.copy()), desired)
# check an inner product involving an aliased and reversed view
a = np.arange(5).astype(dt)
b = a[::-1]
desired = np.array(10, dtype=dt).item()
assert_equal(np.inner(b, a), desired)
def test_3d_tensor(self):
for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
a = np.arange(24).reshape(2,3,4).astype(dt)
b = np.arange(24, 48).reshape(2,3,4).astype(dt)
desired = np.array(
[[[[ 158, 182, 206],
[ 230, 254, 278]],
[[ 566, 654, 742],
[ 830, 918, 1006]],
[[ 974, 1126, 1278],
[1430, 1582, 1734]]],
[[[1382, 1598, 1814],
[2030, 2246, 2462]],
[[1790, 2070, 2350],
[2630, 2910, 3190]],
[[2198, 2542, 2886],
[3230, 3574, 3918]]]],
dtype=dt
)
assert_equal(np.inner(a, b), desired)
assert_equal(np.inner(b, a).transpose(2,3,0,1), desired)
class TestSummarization(TestCase):
def test_1d(self):
A = np.arange(1001)
strA = '[ 0 1 2 ..., 998 999 1000]'
assert_(str(A) == strA)
reprA = 'array([ 0, 1, 2, ..., 998, 999, 1000])'
assert_(repr(A) == reprA)
def test_2d(self):
A = np.arange(1002).reshape(2, 501)
strA = '[[ 0 1 2 ..., 498 499 500]\n' \
' [ 501 502 503 ..., 999 1000 1001]]'
assert_(str(A) == strA)
reprA = 'array([[ 0, 1, 2, ..., 498, 499, 500],\n' \
' [ 501, 502, 503, ..., 999, 1000, 1001]])'
assert_(repr(A) == reprA)
class TestAlen(TestCase):
def test_basic(self):
m = np.array([1, 2, 3])
self.assertEqual(np.alen(m), 3)
m = np.array([[1, 2, 3], [4, 5, 7]])
self.assertEqual(np.alen(m), 2)
m = [1, 2, 3]
self.assertEqual(np.alen(m), 3)
m = [[1, 2, 3], [4, 5, 7]]
self.assertEqual(np.alen(m), 2)
def test_singleton(self):
self.assertEqual(np.alen(5), 1)
class TestChoose(TestCase):
def setUp(self):
self.x = 2*np.ones((3,), dtype=int)
self.y = 3*np.ones((3,), dtype=int)
self.x2 = 2*np.ones((2, 3), dtype=int)
self.y2 = 3*np.ones((2, 3), dtype=int)
self.ind = [0, 0, 1]
def test_basic(self):
A = np.choose(self.ind, (self.x, self.y))
assert_equal(A, [2, 2, 3])
def test_broadcast1(self):
A = np.choose(self.ind, (self.x2, self.y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
def test_broadcast2(self):
A = np.choose(self.ind, (self.x, self.y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
class TestRepeat(TestCase):
def setUp(self):
self.m = np.array([1, 2, 3, 4, 5, 6])
self.m_rect = self.m.reshape((2, 3))
def test_basic(self):
A = np.repeat(self.m, [1, 3, 2, 1, 1, 2])
assert_equal(A, [1, 2, 2, 2, 3,
3, 4, 5, 6, 6])
def test_broadcast1(self):
A = np.repeat(self.m, 2)
assert_equal(A, [1, 1, 2, 2, 3, 3,
4, 4, 5, 5, 6, 6])
def test_axis_spec(self):
A = np.repeat(self.m_rect, [2, 1], axis=0)
assert_equal(A, [[1, 2, 3],
[1, 2, 3],
[4, 5, 6]])
A = np.repeat(self.m_rect, [1, 3, 2], axis=1)
assert_equal(A, [[1, 2, 2, 2, 3, 3],
[4, 5, 5, 5, 6, 6]])
def test_broadcast2(self):
A = np.repeat(self.m_rect, 2, axis=0)
assert_equal(A, [[1, 2, 3],
[1, 2, 3],
[4, 5, 6],
[4, 5, 6]])
A = np.repeat(self.m_rect, 2, axis=1)
assert_equal(A, [[1, 1, 2, 2, 3, 3],
[4, 4, 5, 5, 6, 6]])
# TODO: test for multidimensional
NEIGH_MODE = {'zero': 0, 'one': 1, 'constant': 2, 'circular': 3, 'mirror': 4}
class TestNeighborhoodIter(TestCase):
# Simple, 2d tests
def _test_simple2d(self, dt):
# Test zero and one padding for simple data type
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 0], [0, 0, 1]], dtype=dt),
np.array([[0, 0, 0], [0, 1, 0]], dtype=dt),
np.array([[0, 0, 1], [0, 2, 3]], dtype=dt),
np.array([[0, 1, 0], [2, 3, 0]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [np.array([[1, 1, 1], [1, 0, 1]], dtype=dt),
np.array([[1, 1, 1], [0, 1, 1]], dtype=dt),
np.array([[1, 0, 1], [1, 2, 3]], dtype=dt),
np.array([[0, 1, 1], [2, 3, 1]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['one'])
assert_array_equal(l, r)
r = [np.array([[4, 4, 4], [4, 0, 1]], dtype=dt),
np.array([[4, 4, 4], [0, 1, 4]], dtype=dt),
np.array([[4, 0, 1], [4, 2, 3]], dtype=dt),
np.array([[0, 1, 4], [2, 3, 4]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], 4,
NEIGH_MODE['constant'])
assert_array_equal(l, r)
def test_simple2d(self):
self._test_simple2d(np.float)
def test_simple2d_object(self):
self._test_simple2d(Decimal)
def _test_mirror2d(self, dt):
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 1], [0, 0, 1]], dtype=dt),
np.array([[0, 1, 1], [0, 1, 1]], dtype=dt),
np.array([[0, 0, 1], [2, 2, 3]], dtype=dt),
np.array([[0, 1, 1], [2, 3, 3]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['mirror'])
assert_array_equal(l, r)
def test_mirror2d(self):
self._test_mirror2d(np.float)
def test_mirror2d_object(self):
self._test_mirror2d(Decimal)
# Simple, 1d tests
def _test_simple(self, dt):
# Test padding with constant values
x = np.linspace(1, 5, 5).astype(dt)
r = [[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 0]]
l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [[1, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 1]]
l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['one'])
assert_array_equal(l, r)
r = [[x[4], 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, x[4]]]
l = test_neighborhood_iterator(x, [-1, 1], x[4], NEIGH_MODE['constant'])
assert_array_equal(l, r)
def test_simple_float(self):
self._test_simple(np.float)
def test_simple_object(self):
self._test_simple(Decimal)
# Test mirror modes
def _test_mirror(self, dt):
x = np.linspace(1, 5, 5).astype(dt)
r = np.array([[2, 1, 1, 2, 3], [1, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 5], [3, 4, 5, 5, 4]], dtype=dt)
l = test_neighborhood_iterator(x, [-2, 2], x[1], NEIGH_MODE['mirror'])
self.assertTrue([i.dtype == dt for i in l])
assert_array_equal(l, r)
def test_mirror(self):
self._test_mirror(np.float)
def test_mirror_object(self):
self._test_mirror(Decimal)
# Circular mode
def _test_circular(self, dt):
x = np.linspace(1, 5, 5).astype(dt)
r = np.array([[4, 5, 1, 2, 3], [5, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 1], [3, 4, 5, 1, 2]], dtype=dt)
l = test_neighborhood_iterator(x, [-2, 2], x[0], NEIGH_MODE['circular'])
assert_array_equal(l, r)
def test_circular(self):
self._test_circular(np.float)
def test_circular_object(self):
self._test_circular(Decimal)
# Test stacking neighborhood iterators
class TestStackedNeighborhoodIter(TestCase):
# Simple, 1d test: stacking 2 constant-padded neigh iterators
def test_simple_const(self):
dt = np.float64
# Test zero and one padding for simple data type
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0], dtype=dt),
np.array([0], dtype=dt),
np.array([1], dtype=dt),
np.array([2], dtype=dt),
np.array([3], dtype=dt),
np.array([0], dtype=dt),
np.array([0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-2, 4], NEIGH_MODE['zero'],
[0, 0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [np.array([1, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-1, 1], NEIGH_MODE['one'])
assert_array_equal(l, r)
# 2nd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
# mirror padding
def test_simple_mirror(self):
dt = np.float64
# Stacking zero on top of mirror
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 1], dtype=dt),
np.array([1, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 3], dtype=dt),
np.array([3, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['mirror'],
[-1, 1], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 0], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 2nd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[0, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 3rd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 0, 0, 1, 2], dtype=dt),
np.array([0, 0, 1, 2, 3], dtype=dt),
np.array([0, 1, 2, 3, 0], dtype=dt),
np.array([1, 2, 3, 0, 0], dtype=dt),
np.array([2, 3, 0, 0, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# 3rd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
# circular padding
def test_simple_circular(self):
dt = np.float64
# Stacking zero on top of mirror
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 3, 1], dtype=dt),
np.array([3, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 1], dtype=dt),
np.array([3, 1, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['circular'],
[-1, 1], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 0], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 2nd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[0, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 3rd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([3, 0, 0, 1, 2], dtype=dt),
np.array([0, 0, 1, 2, 3], dtype=dt),
np.array([0, 1, 2, 3, 0], dtype=dt),
np.array([1, 2, 3, 0, 0], dtype=dt),
np.array([2, 3, 0, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# 4th simple, 1d test: stacking 2 neigh iterators, but with lower iterator
# being strictly within the array
def test_simple_strict_within(self):
dt = np.float64
# Stacking zero on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
class TestWarnings(object):
def test_complex_warning(self):
x = np.array([1, 2])
y = np.array([1-2j, 1+2j])
with warnings.catch_warnings():
warnings.simplefilter("error", np.ComplexWarning)
assert_raises(np.ComplexWarning, x.__setitem__, slice(None), y)
assert_equal(x, [1, 2])
class TestMinScalarType(object):
def test_usigned_shortshort(self):
dt = np.min_scalar_type(2**8-1)
wanted = np.dtype('uint8')
assert_equal(wanted, dt)
def test_usigned_short(self):
dt = np.min_scalar_type(2**16-1)
wanted = np.dtype('uint16')
assert_equal(wanted, dt)
def test_usigned_int(self):
dt = np.min_scalar_type(2**32-1)
wanted = np.dtype('uint32')
assert_equal(wanted, dt)
def test_usigned_longlong(self):
dt = np.min_scalar_type(2**63-1)
wanted = np.dtype('uint64')
assert_equal(wanted, dt)
def test_object(self):
dt = np.min_scalar_type(2**64)
wanted = np.dtype('O')
assert_equal(wanted, dt)
if sys.version_info[:2] == (2, 6):
from numpy.core.multiarray import memorysimpleview as memoryview
from numpy.core._internal import _dtype_from_pep3118
class TestPEP3118Dtype(object):
def _check(self, spec, wanted):
dt = np.dtype(wanted)
if isinstance(wanted, list) and isinstance(wanted[-1], tuple):
if wanted[-1][0] == '':
names = list(dt.names)
names[-1] = ''
dt.names = tuple(names)
assert_equal(_dtype_from_pep3118(spec), dt,
err_msg="spec %r != dtype %r" % (spec, wanted))
def test_native_padding(self):
align = np.dtype('i').alignment
for j in range(8):
if j == 0:
s = 'bi'
else:
s = 'b%dxi' % j
self._check('@'+s, {'f0': ('i1', 0),
'f1': ('i', align*(1 + j//align))})
self._check('='+s, {'f0': ('i1', 0),
'f1': ('i', 1+j)})
def test_native_padding_2(self):
# Native padding should work also for structs and sub-arrays
self._check('x3T{xi}', {'f0': (({'f0': ('i', 4)}, (3,)), 4)})
self._check('^x3T{xi}', {'f0': (({'f0': ('i', 1)}, (3,)), 1)})
def test_trailing_padding(self):
# Trailing padding should be included, *and*, the item size
# should match the alignment if in aligned mode
align = np.dtype('i').alignment
def VV(n):
return 'V%d' % (align*(1 + (n-1)//align))
self._check('ix', [('f0', 'i'), ('', VV(1))])
self._check('ixx', [('f0', 'i'), ('', VV(2))])
self._check('ixxx', [('f0', 'i'), ('', VV(3))])
self._check('ixxxx', [('f0', 'i'), ('', VV(4))])
self._check('i7x', [('f0', 'i'), ('', VV(7))])
self._check('^ix', [('f0', 'i'), ('', 'V1')])
self._check('^ixx', [('f0', 'i'), ('', 'V2')])
self._check('^ixxx', [('f0', 'i'), ('', 'V3')])
self._check('^ixxxx', [('f0', 'i'), ('', 'V4')])
self._check('^i7x', [('f0', 'i'), ('', 'V7')])
def test_native_padding_3(self):
dt = np.dtype(
[('a', 'b'), ('b', 'i'),
('sub', np.dtype('b,i')), ('c', 'i')],
align=True)
self._check("T{b:a:xxxi:b:T{b:f0:=i:f1:}:sub:xxxi:c:}", dt)
dt = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
('e', 'b'), ('sub', np.dtype('b,i', align=True))])
self._check("T{b:a:=i:b:b:c:b:d:b:e:T{b:f0:xxxi:f1:}:sub:}", dt)
def test_padding_with_array_inside_struct(self):
dt = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b', (3,)),
('d', 'i')],
align=True)
self._check("T{b:a:xxxi:b:3b:c:xi:d:}", dt)
def test_byteorder_inside_struct(self):
# The byte order after @T{=i} should be '=', not '@'.
# Check this by noting the absence of native alignment.
self._check('@T{^i}xi', {'f0': ({'f0': ('i', 0)}, 0),
'f1': ('i', 5)})
def test_intra_padding(self):
# Natively aligned sub-arrays may require some internal padding
align = np.dtype('i').alignment
def VV(n):
return 'V%d' % (align*(1 + (n-1)//align))
self._check('(3)T{ix}', ({'f0': ('i', 0), '': (VV(1), 4)}, (3,)))
def test_char_vs_string(self):
dt = np.dtype('c')
self._check('c', dt)
dt = np.dtype([('f0', 'S1', (4,)), ('f1', 'S4')])
self._check('4c4s', dt)
class TestNewBufferProtocol(object):
def _check_roundtrip(self, obj):
obj = np.asarray(obj)
x = memoryview(obj)
y = np.asarray(x)
y2 = np.array(x)
assert_(not y.flags.owndata)
assert_(y2.flags.owndata)
assert_equal(y.dtype, obj.dtype)
assert_equal(y.shape, obj.shape)
assert_array_equal(obj, y)
assert_equal(y2.dtype, obj.dtype)
assert_equal(y2.shape, obj.shape)
assert_array_equal(obj, y2)
def test_roundtrip(self):
x = np.array([1, 2, 3, 4, 5], dtype='i4')
self._check_roundtrip(x)
x = np.array([[1, 2], [3, 4]], dtype=np.float64)
self._check_roundtrip(x)
x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:]
self._check_roundtrip(x)
dt = [('a', 'b'),
('b', 'h'),
('c', 'i'),
('d', 'l'),
('dx', 'q'),
('e', 'B'),
('f', 'H'),
('g', 'I'),
('h', 'L'),
('hx', 'Q'),
('i', np.single),
('j', np.double),
('k', np.longdouble),
('ix', np.csingle),
('jx', np.cdouble),
('kx', np.clongdouble),
('l', 'S4'),
('m', 'U4'),
('n', 'V3'),
('o', '?'),
('p', np.half),
]
x = np.array(
[(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
asbytes('aaaa'), 'bbbb', asbytes('xxx'), True, 1.0)],
dtype=dt)
self._check_roundtrip(x)
x = np.array(([[1, 2], [3, 4]],), dtype=[('a', (int, (2, 2)))])
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='>i2')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<i2')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='>i4')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<i4')
self._check_roundtrip(x)
# check long long can be represented as non-native
x = np.array([1, 2, 3], dtype='>q')
self._check_roundtrip(x)
# Native-only data types can be passed through the buffer interface
# only in native byte order
if sys.byteorder == 'little':
x = np.array([1, 2, 3], dtype='>g')
assert_raises(ValueError, self._check_roundtrip, x)
x = np.array([1, 2, 3], dtype='<g')
self._check_roundtrip(x)
else:
x = np.array([1, 2, 3], dtype='>g')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<g')
assert_raises(ValueError, self._check_roundtrip, x)
def test_roundtrip_half(self):
half_list = [
1.0,
-2.0,
6.5504 * 10**4, # (max half precision)
2**-14, # ~= 6.10352 * 10**-5 (minimum positive normal)
2**-24, # ~= 5.96046 * 10**-8 (minimum strictly positive subnormal)
0.0,
-0.0,
float('+inf'),
float('-inf'),
0.333251953125, # ~= 1/3
]
x = np.array(half_list, dtype='>e')
self._check_roundtrip(x)
x = np.array(half_list, dtype='<e')
self._check_roundtrip(x)
def test_roundtrip_single_types(self):
for typ in np.typeDict.values():
dtype = np.dtype(typ)
if dtype.char in 'Mm':
# datetimes cannot be used in buffers
continue
if dtype.char == 'V':
# skip void
continue
x = np.zeros(4, dtype=dtype)
self._check_roundtrip(x)
if dtype.char not in 'qQgG':
dt = dtype.newbyteorder('<')
x = np.zeros(4, dtype=dt)
self._check_roundtrip(x)
dt = dtype.newbyteorder('>')
x = np.zeros(4, dtype=dt)
self._check_roundtrip(x)
def test_roundtrip_scalar(self):
# Issue #4015.
self._check_roundtrip(0)
def test_export_simple_1d(self):
x = np.array([1, 2, 3, 4, 5], dtype='i')
y = memoryview(x)
assert_equal(y.format, 'i')
assert_equal(y.shape, (5,))
assert_equal(y.ndim, 1)
assert_equal(y.strides, (4,))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 4)
def test_export_simple_nd(self):
x = np.array([[1, 2], [3, 4]], dtype=np.float64)
y = memoryview(x)
assert_equal(y.format, 'd')
assert_equal(y.shape, (2, 2))
assert_equal(y.ndim, 2)
assert_equal(y.strides, (16, 8))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 8)
def test_export_discontiguous(self):
x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:]
y = memoryview(x)
assert_equal(y.format, 'f')
assert_equal(y.shape, (3, 3))
assert_equal(y.ndim, 2)
assert_equal(y.strides, (36, 4))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 4)
def test_export_record(self):
dt = [('a', 'b'),
('b', 'h'),
('c', 'i'),
('d', 'l'),
('dx', 'q'),
('e', 'B'),
('f', 'H'),
('g', 'I'),
('h', 'L'),
('hx', 'Q'),
('i', np.single),
('j', np.double),
('k', np.longdouble),
('ix', np.csingle),
('jx', np.cdouble),
('kx', np.clongdouble),
('l', 'S4'),
('m', 'U4'),
('n', 'V3'),
('o', '?'),
('p', np.half),
]
x = np.array(
[(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
asbytes('aaaa'), 'bbbb', asbytes(' '), True, 1.0)],
dtype=dt)
y = memoryview(x)
assert_equal(y.shape, (1,))
assert_equal(y.ndim, 1)
assert_equal(y.suboffsets, EMPTY)
sz = sum([np.dtype(b).itemsize for a, b in dt])
if np.dtype('l').itemsize == 4:
assert_equal(y.format, 'T{b:a:=h:b:i:c:l:d:q:dx:B:e:@H:f:=I:g:L:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
else:
assert_equal(y.format, 'T{b:a:=h:b:i:c:q:d:q:dx:B:e:@H:f:=I:g:Q:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
# Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides
if not (np.ones(1).strides[0] == np.iinfo(np.intp).max):
assert_equal(y.strides, (sz,))
assert_equal(y.itemsize, sz)
def test_export_subarray(self):
x = np.array(([[1, 2], [3, 4]],), dtype=[('a', ('i', (2, 2)))])
y = memoryview(x)
assert_equal(y.format, 'T{(2,2)i:a:}')
assert_equal(y.shape, EMPTY)
assert_equal(y.ndim, 0)
assert_equal(y.strides, EMPTY)
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 16)
def test_export_endian(self):
x = np.array([1, 2, 3], dtype='>i')
y = memoryview(x)
if sys.byteorder == 'little':
assert_equal(y.format, '>i')
else:
assert_equal(y.format, 'i')
x = np.array([1, 2, 3], dtype='<i')
y = memoryview(x)
if sys.byteorder == 'little':
assert_equal(y.format, 'i')
else:
assert_equal(y.format, '<i')
def test_export_flags(self):
# Check SIMPLE flag, see also gh-3613 (exception should be BufferError)
assert_raises(ValueError, get_buffer_info, np.arange(5)[::2], ('SIMPLE',))
def test_padding(self):
for j in range(8):
x = np.array([(1,), (2,)], dtype={'f0': (int, j)})
self._check_roundtrip(x)
def test_reference_leak(self):
if HAS_REFCOUNT:
count_1 = sys.getrefcount(np.core._internal)
a = np.zeros(4)
b = memoryview(a)
c = np.asarray(b)
if HAS_REFCOUNT:
count_2 = sys.getrefcount(np.core._internal)
assert_equal(count_1, count_2)
del c # avoid pyflakes unused variable warning.
def test_padded_struct_array(self):
dt1 = np.dtype(
[('a', 'b'), ('b', 'i'), ('sub', np.dtype('b,i')), ('c', 'i')],
align=True)
x1 = np.arange(dt1.itemsize, dtype=np.int8).view(dt1)
self._check_roundtrip(x1)
dt2 = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b', (3,)), ('d', 'i')],
align=True)
x2 = np.arange(dt2.itemsize, dtype=np.int8).view(dt2)
self._check_roundtrip(x2)
dt3 = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
('e', 'b'), ('sub', np.dtype('b,i', align=True))])
x3 = np.arange(dt3.itemsize, dtype=np.int8).view(dt3)
self._check_roundtrip(x3)
def test_relaxed_strides(self):
# Test that relaxed strides are converted to non-relaxed
c = np.ones((1, 10, 10), dtype='i8')
# Check for NPY_RELAXED_STRIDES_CHECKING:
if np.ones((10, 1), order="C").flags.f_contiguous:
c.strides = (-1, 80, 8)
assert_(memoryview(c).strides == (800, 80, 8))
# Writing C-contiguous data to a BytesIO buffer should work
fd = io.BytesIO()
fd.write(c.data)
fortran = c.T
assert_(memoryview(fortran).strides == (8, 80, 800))
arr = np.ones((1, 10))
if arr.flags.f_contiguous:
shape, strides = get_buffer_info(arr, ['F_CONTIGUOUS'])
assert_(strides[0] == 8)
arr = np.ones((10, 1), order='F')
shape, strides = get_buffer_info(arr, ['C_CONTIGUOUS'])
assert_(strides[-1] == 8)
class TestArrayAttributeDeletion(object):
def test_multiarray_writable_attributes_deletion(self):
# ticket #2046, should not seqfault, raise AttributeError
a = np.ones(2)
attr = ['shape', 'strides', 'data', 'dtype', 'real', 'imag', 'flat']
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, "Assigning the 'data' attribute")
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_not_writable_attributes_deletion(self):
a = np.ones(2)
attr = ["ndim", "flags", "itemsize", "size", "nbytes", "base",
"ctypes", "T", "__array_interface__", "__array_struct__",
"__array_priority__", "__array_finalize__"]
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_flags_writable_attribute_deletion(self):
a = np.ones(2).flags
attr = ['updateifcopy', 'aligned', 'writeable']
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_flags_not_writable_attribute_deletion(self):
a = np.ones(2).flags
attr = ["contiguous", "c_contiguous", "f_contiguous", "fortran",
"owndata", "fnc", "forc", "behaved", "carray", "farray",
"num"]
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_array_interface():
# Test scalar coercion within the array interface
class Foo(object):
def __init__(self, value):
self.value = value
self.iface = {'typestr': '=f8'}
def __float__(self):
return float(self.value)
@property
def __array_interface__(self):
return self.iface
f = Foo(0.5)
assert_equal(np.array(f), 0.5)
assert_equal(np.array([f]), [0.5])
assert_equal(np.array([f, f]), [0.5, 0.5])
assert_equal(np.array(f).dtype, np.dtype('=f8'))
# Test various shape definitions
f.iface['shape'] = ()
assert_equal(np.array(f), 0.5)
f.iface['shape'] = None
assert_raises(TypeError, np.array, f)
f.iface['shape'] = (1, 1)
assert_equal(np.array(f), [[0.5]])
f.iface['shape'] = (2,)
assert_raises(ValueError, np.array, f)
# test scalar with no shape
class ArrayLike(object):
array = np.array(1)
__array_interface__ = array.__array_interface__
assert_equal(np.array(ArrayLike()), 1)
def test_array_interface_itemsize():
# See gh-6361
my_dtype = np.dtype({'names': ['A', 'B'], 'formats': ['f4', 'f4'],
'offsets': [0, 8], 'itemsize': 16})
a = np.ones(10, dtype=my_dtype)
descr_t = np.dtype(a.__array_interface__['descr'])
typestr_t = np.dtype(a.__array_interface__['typestr'])
assert_equal(descr_t.itemsize, typestr_t.itemsize)
def test_flat_element_deletion():
it = np.ones(3).flat
try:
del it[1]
del it[1:2]
except TypeError:
pass
except:
raise AssertionError
def test_scalar_element_deletion():
a = np.zeros(2, dtype=[('x', 'int'), ('y', 'int')])
assert_raises(ValueError, a[0].__delitem__, 'x')
class TestMemEventHook(TestCase):
def test_mem_seteventhook(self):
# The actual tests are within the C code in
# multiarray/multiarray_tests.c.src
test_pydatamem_seteventhook_start()
# force an allocation and free of a numpy array
# needs to be larger then limit of small memory cacher in ctors.c
a = np.zeros(1000)
del a
gc.collect()
test_pydatamem_seteventhook_end()
class TestMapIter(TestCase):
def test_mapiter(self):
# The actual tests are within the C code in
# multiarray/multiarray_tests.c.src
a = np.arange(12).reshape((3, 4)).astype(float)
index = ([1, 1, 2, 0],
[0, 0, 2, 3])
vals = [50, 50, 30, 16]
test_inplace_increment(a, index, vals)
assert_equal(a, [[0.00, 1., 2.0, 19.],
[104., 5., 6.0, 7.0],
[8.00, 9., 40., 11.]])
b = np.arange(6).astype(float)
index = (np.array([1, 2, 0]),)
vals = [50, 4, 100.1]
test_inplace_increment(b, index, vals)
assert_equal(b, [100.1, 51., 6., 3., 4., 5.])
class TestAsCArray(TestCase):
def test_1darray(self):
array = np.arange(24, dtype=np.double)
from_c = test_as_c_array(array, 3)
assert_equal(array[3], from_c)
def test_2darray(self):
array = np.arange(24, dtype=np.double).reshape(3, 8)
from_c = test_as_c_array(array, 2, 4)
assert_equal(array[2, 4], from_c)
def test_3darray(self):
array = np.arange(24, dtype=np.double).reshape(2, 3, 4)
from_c = test_as_c_array(array, 1, 2, 3)
assert_equal(array[1, 2, 3], from_c)
class TestConversion(TestCase):
def test_array_scalar_relational_operation(self):
# All integer
for dt1 in np.typecodes['AllInteger']:
assert_(1 > np.array(0, dtype=dt1), "type %s failed" % (dt1,))
assert_(not 1 < np.array(0, dtype=dt1), "type %s failed" % (dt1,))
for dt2 in np.typecodes['AllInteger']:
assert_(np.array(1, dtype=dt1) > np.array(0, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(0, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
# Unsigned integers
for dt1 in 'BHILQP':
assert_(-1 < np.array(1, dtype=dt1), "type %s failed" % (dt1,))
assert_(not -1 > np.array(1, dtype=dt1), "type %s failed" % (dt1,))
assert_(-1 != np.array(1, dtype=dt1), "type %s failed" % (dt1,))
# Unsigned vs signed
for dt2 in 'bhilqp':
assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(np.array(1, dtype=dt1) != np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
# Signed integers and floats
for dt1 in 'bhlqp' + np.typecodes['Float']:
assert_(1 > np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
assert_(not 1 < np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
assert_(-1 == np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
for dt2 in 'bhlqp' + np.typecodes['Float']:
assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(np.array(-1, dtype=dt1) == np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
class TestWhere(TestCase):
def test_basic(self):
dts = [np.bool, np.int16, np.int32, np.int64, np.double, np.complex128,
np.longdouble, np.clongdouble]
for dt in dts:
c = np.ones(53, dtype=np.bool)
assert_equal(np.where( c, dt(0), dt(1)), dt(0))
assert_equal(np.where(~c, dt(0), dt(1)), dt(1))
assert_equal(np.where(True, dt(0), dt(1)), dt(0))
assert_equal(np.where(False, dt(0), dt(1)), dt(1))
d = np.ones_like(c).astype(dt)
e = np.zeros_like(d)
r = d.astype(dt)
c[7] = False
r[7] = e[7]
assert_equal(np.where(c, e, e), e)
assert_equal(np.where(c, d, e), r)
assert_equal(np.where(c, d, e[0]), r)
assert_equal(np.where(c, d[0], e), r)
assert_equal(np.where(c[::2], d[::2], e[::2]), r[::2])
assert_equal(np.where(c[1::2], d[1::2], e[1::2]), r[1::2])
assert_equal(np.where(c[::3], d[::3], e[::3]), r[::3])
assert_equal(np.where(c[1::3], d[1::3], e[1::3]), r[1::3])
assert_equal(np.where(c[::-2], d[::-2], e[::-2]), r[::-2])
assert_equal(np.where(c[::-3], d[::-3], e[::-3]), r[::-3])
assert_equal(np.where(c[1::-3], d[1::-3], e[1::-3]), r[1::-3])
def test_exotic(self):
# object
assert_array_equal(np.where(True, None, None), np.array(None))
# zero sized
m = np.array([], dtype=bool).reshape(0, 3)
b = np.array([], dtype=np.float64).reshape(0, 3)
assert_array_equal(np.where(m, 0, b), np.array([]).reshape(0, 3))
# object cast
d = np.array([-1.34, -0.16, -0.54, -0.31, -0.08, -0.95, 0.000, 0.313,
0.547, -0.18, 0.876, 0.236, 1.969, 0.310, 0.699, 1.013,
1.267, 0.229, -1.39, 0.487])
nan = float('NaN')
e = np.array(['5z', '0l', nan, 'Wz', nan, nan, 'Xq', 'cs', nan, nan,
'QN', nan, nan, 'Fd', nan, nan, 'kp', nan, '36', 'i1'],
dtype=object)
m = np.array([0, 0, 1, 0, 1, 1, 0, 0, 1, 1,
0, 1, 1, 0, 1, 1, 0, 1, 0, 0], dtype=bool)
r = e[:]
r[np.where(m)] = d[np.where(m)]
assert_array_equal(np.where(m, d, e), r)
r = e[:]
r[np.where(~m)] = d[np.where(~m)]
assert_array_equal(np.where(m, e, d), r)
assert_array_equal(np.where(m, e, e), e)
# minimal dtype result with NaN scalar (e.g required by pandas)
d = np.array([1., 2.], dtype=np.float32)
e = float('NaN')
assert_equal(np.where(True, d, e).dtype, np.float32)
e = float('Infinity')
assert_equal(np.where(True, d, e).dtype, np.float32)
e = float('-Infinity')
assert_equal(np.where(True, d, e).dtype, np.float32)
# also check upcast
e = float(1e150)
assert_equal(np.where(True, d, e).dtype, np.float64)
def test_ndim(self):
c = [True, False]
a = np.zeros((2, 25))
b = np.ones((2, 25))
r = np.where(np.array(c)[:,np.newaxis], a, b)
assert_array_equal(r[0], a[0])
assert_array_equal(r[1], b[0])
a = a.T
b = b.T
r = np.where(c, a, b)
assert_array_equal(r[:,0], a[:,0])
assert_array_equal(r[:,1], b[:,0])
def test_dtype_mix(self):
c = np.array([False, True, False, False, False, False, True, False,
False, False, True, False])
a = np.uint32(1)
b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.],
dtype=np.float64)
r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.],
dtype=np.float64)
assert_equal(np.where(c, a, b), r)
a = a.astype(np.float32)
b = b.astype(np.int64)
assert_equal(np.where(c, a, b), r)
# non bool mask
c = c.astype(np.int)
c[c != 0] = 34242324
assert_equal(np.where(c, a, b), r)
# invert
tmpmask = c != 0
c[c == 0] = 41247212
c[tmpmask] = 0
assert_equal(np.where(c, b, a), r)
def test_foreign(self):
c = np.array([False, True, False, False, False, False, True, False,
False, False, True, False])
r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.],
dtype=np.float64)
a = np.ones(1, dtype='>i4')
b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.],
dtype=np.float64)
assert_equal(np.where(c, a, b), r)
b = b.astype('>f8')
assert_equal(np.where(c, a, b), r)
a = a.astype('<i4')
assert_equal(np.where(c, a, b), r)
c = c.astype('>i4')
assert_equal(np.where(c, a, b), r)
def test_error(self):
c = [True, True]
a = np.ones((4, 5))
b = np.ones((5, 5))
assert_raises(ValueError, np.where, c, a, a)
assert_raises(ValueError, np.where, c[0], a, b)
def test_string(self):
# gh-4778 check strings are properly filled with nulls
a = np.array("abc")
b = np.array("x" * 753)
assert_equal(np.where(True, a, b), "abc")
assert_equal(np.where(False, b, a), "abc")
# check native datatype sized strings
a = np.array("abcd")
b = np.array("x" * 8)
assert_equal(np.where(True, a, b), "abcd")
assert_equal(np.where(False, b, a), "abcd")
if not IS_PYPY:
# sys.getsizeof() is not valid on PyPy
class TestSizeOf(TestCase):
def test_empty_array(self):
x = np.array([])
assert_(sys.getsizeof(x) > 0)
def check_array(self, dtype):
elem_size = dtype(0).itemsize
for length in [10, 50, 100, 500]:
x = np.arange(length, dtype=dtype)
assert_(sys.getsizeof(x) > length * elem_size)
def test_array_int32(self):
self.check_array(np.int32)
def test_array_int64(self):
self.check_array(np.int64)
def test_array_float32(self):
self.check_array(np.float32)
def test_array_float64(self):
self.check_array(np.float64)
def test_view(self):
d = np.ones(100)
assert_(sys.getsizeof(d[...]) < sys.getsizeof(d))
def test_reshape(self):
d = np.ones(100)
assert_(sys.getsizeof(d) < sys.getsizeof(d.reshape(100, 1, 1).copy()))
def test_resize(self):
d = np.ones(100)
old = sys.getsizeof(d)
d.resize(50)
assert_(old > sys.getsizeof(d))
d.resize(150)
assert_(old < sys.getsizeof(d))
def test_error(self):
d = np.ones(100)
assert_raises(TypeError, d.__sizeof__, "a")
class TestHashing(TestCase):
def test_arrays_not_hashable(self):
x = np.ones(3)
assert_raises(TypeError, hash, x)
def test_collections_hashable(self):
x = np.array([])
self.assertFalse(isinstance(x, collections.Hashable))
class TestArrayPriority(TestCase):
# This will go away when __array_priority__ is settled, meanwhile
# it serves to check unintended changes.
op = operator
binary_ops = [
op.pow, op.add, op.sub, op.mul, op.floordiv, op.truediv, op.mod,
op.and_, op.or_, op.xor, op.lshift, op.rshift, op.mod, op.gt,
op.ge, op.lt, op.le, op.ne, op.eq
]
if sys.version_info[0] < 3:
binary_ops.append(op.div)
class Foo(np.ndarray):
__array_priority__ = 100.
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
class Bar(np.ndarray):
__array_priority__ = 101.
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
class Other(object):
__array_priority__ = 1000.
def _all(self, other):
return self.__class__()
__add__ = __radd__ = _all
__sub__ = __rsub__ = _all
__mul__ = __rmul__ = _all
__pow__ = __rpow__ = _all
__div__ = __rdiv__ = _all
__mod__ = __rmod__ = _all
__truediv__ = __rtruediv__ = _all
__floordiv__ = __rfloordiv__ = _all
__and__ = __rand__ = _all
__xor__ = __rxor__ = _all
__or__ = __ror__ = _all
__lshift__ = __rlshift__ = _all
__rshift__ = __rrshift__ = _all
__eq__ = _all
__ne__ = _all
__gt__ = _all
__ge__ = _all
__lt__ = _all
__le__ = _all
def test_ndarray_subclass(self):
a = np.array([1, 2])
b = self.Bar([1, 2])
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Bar), msg)
assert_(isinstance(f(b, a), self.Bar), msg)
def test_ndarray_other(self):
a = np.array([1, 2])
b = self.Other()
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Other), msg)
assert_(isinstance(f(b, a), self.Other), msg)
def test_subclass_subclass(self):
a = self.Foo([1, 2])
b = self.Bar([1, 2])
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Bar), msg)
assert_(isinstance(f(b, a), self.Bar), msg)
def test_subclass_other(self):
a = self.Foo([1, 2])
b = self.Other()
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Other), msg)
assert_(isinstance(f(b, a), self.Other), msg)
class TestBytestringArrayNonzero(TestCase):
def test_empty_bstring_array_is_falsey(self):
self.assertFalse(np.array([''], dtype=np.str))
def test_whitespace_bstring_array_is_falsey(self):
a = np.array(['spam'], dtype=np.str)
a[0] = ' \0\0'
self.assertFalse(a)
def test_all_null_bstring_array_is_falsey(self):
a = np.array(['spam'], dtype=np.str)
a[0] = '\0\0\0\0'
self.assertFalse(a)
def test_null_inside_bstring_array_is_truthy(self):
a = np.array(['spam'], dtype=np.str)
a[0] = ' \0 \0'
self.assertTrue(a)
class TestUnicodeArrayNonzero(TestCase):
def test_empty_ustring_array_is_falsey(self):
self.assertFalse(np.array([''], dtype=np.unicode))
def test_whitespace_ustring_array_is_falsey(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = ' \0\0'
self.assertFalse(a)
def test_all_null_ustring_array_is_falsey(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = '\0\0\0\0'
self.assertFalse(a)
def test_null_inside_ustring_array_is_truthy(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = ' \0 \0'
self.assertTrue(a)
def test_orderconverter_with_nonASCII_unicode_ordering():
# gh-7475
a = np.arange(5)
assert_raises(ValueError, a.flatten, order=u'\xe2')
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
ishanic/scikit-learn | examples/linear_model/plot_robust_fit.py | 238 | 2414 | """
Robust linear estimator fitting
===============================
Here a sine function is fit with a polynomial of order 3, for values
close to zero.
Robust fitting is demoed in different situations:
- No measurement errors, only modelling errors (fitting a sine with a
polynomial)
- Measurement errors in X
- Measurement errors in y
The median absolute deviation to non corrupt new data is used to judge
the quality of the prediction.
What we can see that:
- RANSAC is good for strong outliers in the y direction
- TheilSen is good for small outliers, both in direction X and y, but has
a break point above which it performs worst than OLS.
"""
from matplotlib import pyplot as plt
import numpy as np
from sklearn import linear_model, metrics
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
np.random.seed(42)
X = np.random.normal(size=400)
y = np.sin(X)
# Make sure that it X is 2D
X = X[:, np.newaxis]
X_test = np.random.normal(size=200)
y_test = np.sin(X_test)
X_test = X_test[:, np.newaxis]
y_errors = y.copy()
y_errors[::3] = 3
X_errors = X.copy()
X_errors[::3] = 3
y_errors_large = y.copy()
y_errors_large[::3] = 10
X_errors_large = X.copy()
X_errors_large[::3] = 10
estimators = [('OLS', linear_model.LinearRegression()),
('Theil-Sen', linear_model.TheilSenRegressor(random_state=42)),
('RANSAC', linear_model.RANSACRegressor(random_state=42)), ]
x_plot = np.linspace(X.min(), X.max())
for title, this_X, this_y in [
('Modeling errors only', X, y),
('Corrupt X, small deviants', X_errors, y),
('Corrupt y, small deviants', X, y_errors),
('Corrupt X, large deviants', X_errors_large, y),
('Corrupt y, large deviants', X, y_errors_large)]:
plt.figure(figsize=(5, 4))
plt.plot(this_X[:, 0], this_y, 'k+')
for name, estimator in estimators:
model = make_pipeline(PolynomialFeatures(3), estimator)
model.fit(this_X, this_y)
mse = metrics.mean_squared_error(model.predict(X_test), y_test)
y_plot = model.predict(x_plot[:, np.newaxis])
plt.plot(x_plot, y_plot,
label='%s: error = %.3f' % (name, mse))
plt.legend(loc='best', frameon=False,
title='Error: mean absolute deviation\n to non corrupt data')
plt.xlim(-4, 10.2)
plt.ylim(-2, 10.2)
plt.title(title)
plt.show()
| bsd-3-clause |
gaamy/pyMuse | pymuse/viz.py | 1 | 6228 | __author__ = 'benjamindeleener'
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
from datetime import datetime, timedelta
from numpy import linspace
def timeTicks(x, pos):
d = timedelta(milliseconds=x)
return str(d)
class MuseViewer(object):
def __init__(self, acquisition_freq, signal_boundaries=None):
self.refresh_freq = 0.4 # default=0.15
self.acquisition_freq = acquisition_freq
self.init_time = datetime.now()
self.last_refresh = datetime.now()
if signal_boundaries is not None:
self.low, self.high = signal_boundaries[0], signal_boundaries[1]
else:
self.low, self.high = 0, 1
class MuseViewerSignal(MuseViewer):
def __init__(self, signal, acquisition_freq, signal_boundaries=None):
super(MuseViewerSignal, self).__init__(acquisition_freq, signal_boundaries)
self.signal = signal
self.figure, (self.ax1, self.ax2, self.ax3, self.ax4) = plt.subplots(4, 1, sharex=True, figsize=(15, 10))
self.ax1.set_title('Left ear')
self.ax2.set_title('Left forehead')
self.ax3.set_title('Right forehead')
self.ax4.set_title('Right ear')
if self.signal.do_fft:
self.ax1_plot, = self.ax1.plot(self.x_data[0:len(self.x_data)/2], self.signal.l_ear_fft[0:len(self.x_data)/2])
self.ax2_plot, = self.ax2.plot(self.x_data[0:len(self.x_data)/2], self.signal.l_forehead_fft[0:len(self.x_data)/2])
self.ax3_plot, = self.ax3.plot(self.x_data[0:len(self.x_data)/2], self.signal.r_forehead_fft[0:len(self.x_data)/2])
self.ax4_plot, = self.ax4.plot(self.x_data[0:len(self.x_data)/2], self.signal.r_ear_fft[0:len(self.x_data)/2])
self.ax1.set_ylim([0, 10000])
self.ax2.set_ylim([0, 10000])
self.ax3.set_ylim([0, 10000])
self.ax4.set_ylim([0, 10000])
else:
self.ax1_plot, = self.ax1.plot(self.signal.time, self.signal.l_ear)
self.ax2_plot, = self.ax2.plot(self.signal.time, self.signal.l_forehead)
self.ax3_plot, = self.ax3.plot(self.signal.time, self.signal.r_forehead)
self.ax4_plot, = self.ax4.plot(self.signal.time, self.signal.r_ear)
self.ax1.set_ylim([self.low, self.high])
self.ax2.set_ylim([self.low, self.high])
self.ax3.set_ylim([self.low, self.high])
self.ax4.set_ylim([self.low, self.high])
formatter = mticker.FuncFormatter(timeTicks)
self.ax1.xaxis.set_major_formatter(formatter)
self.ax2.xaxis.set_major_formatter(formatter)
self.ax3.xaxis.set_major_formatter(formatter)
self.ax4.xaxis.set_major_formatter(formatter)
plt.ion()
def show(self):
plt.show(block=False)
self.refresh()
def refresh(self):
time_now = datetime.now()
if (time_now - self.last_refresh).total_seconds() > self.refresh_freq:
self.last_refresh = time_now
pass
else:
return
if self.signal.do_fft:
self.ax1_plot.set_ydata(self.signal.l_ear_fft[0:len(self.x_data)/2])
self.ax2_plot.set_ydata(self.signal.l_forehead_fft[0:len(self.x_data)/2])
self.ax3_plot.set_ydata(self.signal.r_forehead_fft[0:len(self.x_data)/2])
self.ax4_plot.set_ydata(self.signal.r_ear_fft[0:len(self.x_data)/2])
else:
self.ax1_plot.set_ydata(self.signal.l_ear)
self.ax2_plot.set_ydata(self.signal.l_forehead)
self.ax3_plot.set_ydata(self.signal.r_forehead)
self.ax4_plot.set_ydata(self.signal.r_ear)
times = list(linspace(self.signal.time[0], self.signal.time[-1], self.signal.length))
self.ax1_plot.set_xdata(times)
self.ax2_plot.set_xdata(times)
self.ax3_plot.set_xdata(times)
self.ax4_plot.set_xdata(times)
self.ax1.set_xlim(self.signal.time[0], self.signal.time[-1])
self.figure.canvas.draw()
self.figure.canvas.flush_events()
class MuseViewerConcentrationMellow(object):
def __init__(self, signal_concentration, signal_mellow, signal_boundaries=None):
self.refresh_freq = 0.05
self.init_time = 0.0
self.last_refresh = datetime.now()
self.signal_concentration = signal_concentration
self.signal_mellow = signal_mellow
if signal_boundaries is not None:
self.low, self.high = signal_boundaries[0], signal_boundaries[1]
else:
self.low, self.high = 0, 1
self.x_data_concentration = range(0, self.signal_concentration.length, 1)
self.x_data_mellow = range(0, self.signal_mellow.length, 1)
self.figure, (self.ax1, self.ax2) = plt.subplots(2, 1, sharex=True, figsize=(15, 10))
self.ax1.set_title('Concentration')
self.ax2.set_title('Mellow')
self.ax1_plot, = self.ax1.plot(self.x_data_concentration, self.signal_concentration.concentration)
self.ax2_plot, = self.ax2.plot(self.x_data_mellow, self.signal_mellow.mellow)
self.ax1.set_ylim([self.low, self.high])
self.ax2.set_ylim([self.low, self.high])
formatter = mticker.FuncFormatter(timeTicks)
self.ax1.xaxis.set_major_formatter(formatter)
self.ax2.xaxis.set_major_formatter(formatter)
plt.ion()
def show(self):
plt.show(block=False)
self.refresh()
def refresh(self):
time_now = datetime.now()
if (time_now - self.last_refresh).total_seconds() > self.refresh_freq:
self.last_refresh = time_now
pass
else:
return
self.ax1_plot.set_ydata(self.signal_concentration.concentration)
self.ax2_plot.set_ydata(self.signal_mellow.mellow)
times = list(linspace(self.signal_concentration.time[0], self.signal_concentration.time[-1], self.signal_concentration.length))
self.ax1_plot.set_xdata(times)
self.ax2_plot.set_xdata(times)
plt.xlim(self.signal_concentration.time[0], self.signal_concentration.time[-1])
self.figure.canvas.draw()
self.figure.canvas.flush_events()
| mit |
ischwabacher/seaborn | doc/sphinxext/ipython_directive.py | 37 | 37557 | # -*- coding: utf-8 -*-
"""
Sphinx directive to support embedded IPython code.
This directive allows pasting of entire interactive IPython sessions, prompts
and all, and their code will actually get re-executed at doc build time, with
all prompts renumbered sequentially. It also allows you to input code as a pure
python input by giving the argument python to the directive. The output looks
like an interactive ipython section.
To enable this directive, simply list it in your Sphinx ``conf.py`` file
(making sure the directory where you placed it is visible to sphinx, as is
needed for all Sphinx directives). For example, to enable syntax highlighting
and the IPython directive::
extensions = ['IPython.sphinxext.ipython_console_highlighting',
'IPython.sphinxext.ipython_directive']
The IPython directive outputs code-blocks with the language 'ipython'. So
if you do not have the syntax highlighting extension enabled as well, then
all rendered code-blocks will be uncolored. By default this directive assumes
that your prompts are unchanged IPython ones, but this can be customized.
The configurable options that can be placed in conf.py are:
ipython_savefig_dir:
The directory in which to save the figures. This is relative to the
Sphinx source directory. The default is `html_static_path`.
ipython_rgxin:
The compiled regular expression to denote the start of IPython input
lines. The default is re.compile('In \[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_rgxout:
The compiled regular expression to denote the start of IPython output
lines. The default is re.compile('Out\[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_promptin:
The string to represent the IPython input prompt in the generated ReST.
The default is 'In [%d]:'. This expects that the line numbers are used
in the prompt.
ipython_promptout:
The string to represent the IPython prompt in the generated ReST. The
default is 'Out [%d]:'. This expects that the line numbers are used
in the prompt.
ipython_mplbackend:
The string which specifies if the embedded Sphinx shell should import
Matplotlib and set the backend. The value specifies a backend that is
passed to `matplotlib.use()` before any lines in `ipython_execlines` are
executed. If not specified in conf.py, then the default value of 'agg' is
used. To use the IPython directive without matplotlib as a dependency, set
the value to `None`. It may end up that matplotlib is still imported
if the user specifies so in `ipython_execlines` or makes use of the
@savefig pseudo decorator.
ipython_execlines:
A list of strings to be exec'd in the embedded Sphinx shell. Typical
usage is to make certain packages always available. Set this to an empty
list if you wish to have no imports always available. If specified in
conf.py as `None`, then it has the effect of making no imports available.
If omitted from conf.py altogether, then the default value of
['import numpy as np', 'import matplotlib.pyplot as plt'] is used.
ipython_holdcount
When the @suppress pseudo-decorator is used, the execution count can be
incremented or not. The default behavior is to hold the execution count,
corresponding to a value of `True`. Set this to `False` to increment
the execution count after each suppressed command.
As an example, to use the IPython directive when `matplotlib` is not available,
one sets the backend to `None`::
ipython_mplbackend = None
An example usage of the directive is:
.. code-block:: rst
.. ipython::
In [1]: x = 1
In [2]: y = x**2
In [3]: print(y)
See http://matplotlib.org/sampledoc/ipython_directive.html for additional
documentation.
ToDo
----
- Turn the ad-hoc test() function into a real test suite.
- Break up ipython-specific functionality from matplotlib stuff into better
separated code.
Authors
-------
- John D Hunter: orignal author.
- Fernando Perez: refactoring, documentation, cleanups, port to 0.11.
- VáclavŠmilauer <eudoxos-AT-arcig.cz>: Prompt generalizations.
- Skipper Seabold, refactoring, cleanups, pure python addition
"""
from __future__ import print_function
from __future__ import unicode_literals
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Stdlib
import os
import re
import sys
import tempfile
import ast
from pandas.compat import zip, range, map, lmap, u, cStringIO as StringIO
import warnings
# To keep compatibility with various python versions
try:
from hashlib import md5
except ImportError:
from md5 import md5
# Third-party
import sphinx
from docutils.parsers.rst import directives
from docutils import nodes
from sphinx.util.compat import Directive
# Our own
from IPython import Config, InteractiveShell
from IPython.core.profiledir import ProfileDir
from IPython.utils import io
from IPython.utils.py3compat import PY3
if PY3:
from io import StringIO
text_type = str
else:
from StringIO import StringIO
text_type = unicode
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
# for tokenizing blocks
COMMENT, INPUT, OUTPUT = range(3)
#-----------------------------------------------------------------------------
# Functions and class declarations
#-----------------------------------------------------------------------------
def block_parser(part, rgxin, rgxout, fmtin, fmtout):
"""
part is a string of ipython text, comprised of at most one
input, one ouput, comments, and blank lines. The block parser
parses the text into a list of::
blocks = [ (TOKEN0, data0), (TOKEN1, data1), ...]
where TOKEN is one of [COMMENT | INPUT | OUTPUT ] and
data is, depending on the type of token::
COMMENT : the comment string
INPUT: the (DECORATOR, INPUT_LINE, REST) where
DECORATOR: the input decorator (or None)
INPUT_LINE: the input as string (possibly multi-line)
REST : any stdout generated by the input line (not OUTPUT)
OUTPUT: the output string, possibly multi-line
"""
block = []
lines = part.split('\n')
N = len(lines)
i = 0
decorator = None
while 1:
if i==N:
# nothing left to parse -- the last line
break
line = lines[i]
i += 1
line_stripped = line.strip()
if line_stripped.startswith('#'):
block.append((COMMENT, line))
continue
if line_stripped.startswith('@'):
# we're assuming at most one decorator -- may need to
# rethink
decorator = line_stripped
continue
# does this look like an input line?
matchin = rgxin.match(line)
if matchin:
lineno, inputline = int(matchin.group(1)), matchin.group(2)
# the ....: continuation string
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
Nc = len(continuation)
# input lines can continue on for more than one line, if
# we have a '\' line continuation char or a function call
# echo line 'print'. The input line can only be
# terminated by the end of the block or an output line, so
# we parse out the rest of the input line if it is
# multiline as well as any echo text
rest = []
while i<N:
# look ahead; if the next line is blank, or a comment, or
# an output line, we're done
nextline = lines[i]
matchout = rgxout.match(nextline)
#print "nextline=%s, continuation=%s, starts=%s"%(nextline, continuation, nextline.startswith(continuation))
if matchout or nextline.startswith('#'):
break
elif nextline.startswith(continuation):
nextline = nextline[Nc:]
if nextline and nextline[0] == ' ':
nextline = nextline[1:]
inputline += '\n' + nextline
else:
rest.append(nextline)
i+= 1
block.append((INPUT, (decorator, inputline, '\n'.join(rest))))
continue
# if it looks like an output line grab all the text to the end
# of the block
matchout = rgxout.match(line)
if matchout:
lineno, output = int(matchout.group(1)), matchout.group(2)
if i<N-1:
output = '\n'.join([output] + lines[i:])
block.append((OUTPUT, output))
break
return block
class DecodingStringIO(StringIO, object):
def __init__(self,buf='',encodings=('utf8',), *args, **kwds):
super(DecodingStringIO, self).__init__(buf, *args, **kwds)
self.set_encodings(encodings)
def set_encodings(self, encodings):
self.encodings = encodings
def write(self,data):
if isinstance(data, text_type):
return super(DecodingStringIO, self).write(data)
else:
for enc in self.encodings:
try:
data = data.decode(enc)
return super(DecodingStringIO, self).write(data)
except :
pass
# default to brute utf8 if no encoding succeded
return super(DecodingStringIO, self).write(data.decode('utf8', 'replace'))
class EmbeddedSphinxShell(object):
"""An embedded IPython instance to run inside Sphinx"""
def __init__(self, exec_lines=None,state=None):
self.cout = DecodingStringIO(u'')
if exec_lines is None:
exec_lines = []
self.state = state
# Create config object for IPython
config = Config()
config.InteractiveShell.autocall = False
config.InteractiveShell.autoindent = False
config.InteractiveShell.colors = 'NoColor'
# create a profile so instance history isn't saved
tmp_profile_dir = tempfile.mkdtemp(prefix='profile_')
profname = 'auto_profile_sphinx_build'
pdir = os.path.join(tmp_profile_dir,profname)
profile = ProfileDir.create_profile_dir(pdir)
# Create and initialize global ipython, but don't start its mainloop.
# This will persist across different EmbededSphinxShell instances.
IP = InteractiveShell.instance(config=config, profile_dir=profile)
# io.stdout redirect must be done after instantiating InteractiveShell
io.stdout = self.cout
io.stderr = self.cout
# For debugging, so we can see normal output, use this:
#from IPython.utils.io import Tee
#io.stdout = Tee(self.cout, channel='stdout') # dbg
#io.stderr = Tee(self.cout, channel='stderr') # dbg
# Store a few parts of IPython we'll need.
self.IP = IP
self.user_ns = self.IP.user_ns
self.user_global_ns = self.IP.user_global_ns
self.input = ''
self.output = ''
self.is_verbatim = False
self.is_doctest = False
self.is_suppress = False
# Optionally, provide more detailed information to shell.
self.directive = None
# on the first call to the savefig decorator, we'll import
# pyplot as plt so we can make a call to the plt.gcf().savefig
self._pyplot_imported = False
# Prepopulate the namespace.
for line in exec_lines:
self.process_input_line(line, store_history=False)
def clear_cout(self):
self.cout.seek(0)
self.cout.truncate(0)
def process_input_line(self, line, store_history=True):
"""process the input, capturing stdout"""
stdout = sys.stdout
splitter = self.IP.input_splitter
try:
sys.stdout = self.cout
splitter.push(line)
more = splitter.push_accepts_more()
if not more:
try:
source_raw = splitter.source_raw_reset()[1]
except:
# recent ipython #4504
source_raw = splitter.raw_reset()
self.IP.run_cell(source_raw, store_history=store_history)
finally:
sys.stdout = stdout
def process_image(self, decorator):
"""
# build out an image directive like
# .. image:: somefile.png
# :width 4in
#
# from an input like
# savefig somefile.png width=4in
"""
savefig_dir = self.savefig_dir
source_dir = self.source_dir
saveargs = decorator.split(' ')
filename = saveargs[1]
# insert relative path to image file in source
outfile = os.path.relpath(os.path.join(savefig_dir,filename),
source_dir)
imagerows = ['.. image:: %s'%outfile]
for kwarg in saveargs[2:]:
arg, val = kwarg.split('=')
arg = arg.strip()
val = val.strip()
imagerows.append(' :%s: %s'%(arg, val))
image_file = os.path.basename(outfile) # only return file name
image_directive = '\n'.join(imagerows)
return image_file, image_directive
# Callbacks for each type of token
def process_input(self, data, input_prompt, lineno):
"""
Process data block for INPUT token.
"""
decorator, input, rest = data
image_file = None
image_directive = None
is_verbatim = decorator=='@verbatim' or self.is_verbatim
is_doctest = (decorator is not None and \
decorator.startswith('@doctest')) or self.is_doctest
is_suppress = decorator=='@suppress' or self.is_suppress
is_okexcept = decorator=='@okexcept' or self.is_okexcept
is_okwarning = decorator=='@okwarning' or self.is_okwarning
is_savefig = decorator is not None and \
decorator.startswith('@savefig')
# set the encodings to be used by DecodingStringIO
# to convert the execution output into unicode if
# needed. this attrib is set by IpythonDirective.run()
# based on the specified block options, defaulting to ['ut
self.cout.set_encodings(self.output_encoding)
input_lines = input.split('\n')
if len(input_lines) > 1:
if input_lines[-1] != "":
input_lines.append('') # make sure there's a blank line
# so splitter buffer gets reset
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
if is_savefig:
image_file, image_directive = self.process_image(decorator)
ret = []
is_semicolon = False
# Hold the execution count, if requested to do so.
if is_suppress and self.hold_count:
store_history = False
else:
store_history = True
# Note: catch_warnings is not thread safe
with warnings.catch_warnings(record=True) as ws:
for i, line in enumerate(input_lines):
if line.endswith(';'):
is_semicolon = True
if i == 0:
# process the first input line
if is_verbatim:
self.process_input_line('')
self.IP.execution_count += 1 # increment it anyway
else:
# only submit the line in non-verbatim mode
self.process_input_line(line, store_history=store_history)
formatted_line = '%s %s'%(input_prompt, line)
else:
# process a continuation line
if not is_verbatim:
self.process_input_line(line, store_history=store_history)
formatted_line = '%s %s'%(continuation, line)
if not is_suppress:
ret.append(formatted_line)
if not is_suppress and len(rest.strip()) and is_verbatim:
# the "rest" is the standard output of the
# input, which needs to be added in
# verbatim mode
ret.append(rest)
self.cout.seek(0)
output = self.cout.read()
if not is_suppress and not is_semicolon:
ret.append(output)
elif is_semicolon: # get spacing right
ret.append('')
# context information
filename = self.state.document.current_source
lineno = self.state.document.current_line
# output any exceptions raised during execution to stdout
# unless :okexcept: has been specified.
if not is_okexcept and "Traceback" in output:
s = "\nException in %s at block ending on line %s\n" % (filename, lineno)
s += "Specify :okexcept: as an option in the ipython:: block to suppress this message\n"
sys.stdout.write('\n\n>>>' + ('-' * 73))
sys.stdout.write(s)
sys.stdout.write(output)
sys.stdout.write('<<<' + ('-' * 73) + '\n\n')
# output any warning raised during execution to stdout
# unless :okwarning: has been specified.
if not is_okwarning:
for w in ws:
s = "\nWarning in %s at block ending on line %s\n" % (filename, lineno)
s += "Specify :okwarning: as an option in the ipython:: block to suppress this message\n"
sys.stdout.write('\n\n>>>' + ('-' * 73))
sys.stdout.write(s)
sys.stdout.write('-' * 76 + '\n')
s=warnings.formatwarning(w.message, w.category,
w.filename, w.lineno, w.line)
sys.stdout.write(s)
sys.stdout.write('<<<' + ('-' * 73) + '\n')
self.cout.truncate(0)
return (ret, input_lines, output, is_doctest, decorator, image_file,
image_directive)
def process_output(self, data, output_prompt,
input_lines, output, is_doctest, decorator, image_file):
"""
Process data block for OUTPUT token.
"""
TAB = ' ' * 4
if is_doctest and output is not None:
found = output
found = found.strip()
submitted = data.strip()
if self.directive is None:
source = 'Unavailable'
content = 'Unavailable'
else:
source = self.directive.state.document.current_source
content = self.directive.content
# Add tabs and join into a single string.
content = '\n'.join([TAB + line for line in content])
# Make sure the output contains the output prompt.
ind = found.find(output_prompt)
if ind < 0:
e = ('output does not contain output prompt\n\n'
'Document source: {0}\n\n'
'Raw content: \n{1}\n\n'
'Input line(s):\n{TAB}{2}\n\n'
'Output line(s):\n{TAB}{3}\n\n')
e = e.format(source, content, '\n'.join(input_lines),
repr(found), TAB=TAB)
raise RuntimeError(e)
found = found[len(output_prompt):].strip()
# Handle the actual doctest comparison.
if decorator.strip() == '@doctest':
# Standard doctest
if found != submitted:
e = ('doctest failure\n\n'
'Document source: {0}\n\n'
'Raw content: \n{1}\n\n'
'On input line(s):\n{TAB}{2}\n\n'
'we found output:\n{TAB}{3}\n\n'
'instead of the expected:\n{TAB}{4}\n\n')
e = e.format(source, content, '\n'.join(input_lines),
repr(found), repr(submitted), TAB=TAB)
raise RuntimeError(e)
else:
self.custom_doctest(decorator, input_lines, found, submitted)
def process_comment(self, data):
"""Process data fPblock for COMMENT token."""
if not self.is_suppress:
return [data]
def save_image(self, image_file):
"""
Saves the image file to disk.
"""
self.ensure_pyplot()
command = ('plt.gcf().savefig("%s", bbox_inches="tight", '
'dpi=100)' % image_file)
#print 'SAVEFIG', command # dbg
self.process_input_line('bookmark ipy_thisdir', store_history=False)
self.process_input_line('cd -b ipy_savedir', store_history=False)
self.process_input_line(command, store_history=False)
self.process_input_line('cd -b ipy_thisdir', store_history=False)
self.process_input_line('bookmark -d ipy_thisdir', store_history=False)
self.clear_cout()
def process_block(self, block):
"""
process block from the block_parser and return a list of processed lines
"""
ret = []
output = None
input_lines = None
lineno = self.IP.execution_count
input_prompt = self.promptin % lineno
output_prompt = self.promptout % lineno
image_file = None
image_directive = None
for token, data in block:
if token == COMMENT:
out_data = self.process_comment(data)
elif token == INPUT:
(out_data, input_lines, output, is_doctest, decorator,
image_file, image_directive) = \
self.process_input(data, input_prompt, lineno)
elif token == OUTPUT:
out_data = \
self.process_output(data, output_prompt,
input_lines, output, is_doctest,
decorator, image_file)
if out_data:
ret.extend(out_data)
# save the image files
if image_file is not None:
self.save_image(image_file)
return ret, image_directive
def ensure_pyplot(self):
"""
Ensures that pyplot has been imported into the embedded IPython shell.
Also, makes sure to set the backend appropriately if not set already.
"""
# We are here if the @figure pseudo decorator was used. Thus, it's
# possible that we could be here even if python_mplbackend were set to
# `None`. That's also strange and perhaps worthy of raising an
# exception, but for now, we just set the backend to 'agg'.
if not self._pyplot_imported:
if 'matplotlib.backends' not in sys.modules:
# Then ipython_matplotlib was set to None but there was a
# call to the @figure decorator (and ipython_execlines did
# not set a backend).
#raise Exception("No backend was set, but @figure was used!")
import matplotlib
matplotlib.use('agg')
# Always import pyplot into embedded shell.
self.process_input_line('import matplotlib.pyplot as plt',
store_history=False)
self._pyplot_imported = True
def process_pure_python(self, content):
"""
content is a list of strings. it is unedited directive content
This runs it line by line in the InteractiveShell, prepends
prompts as needed capturing stderr and stdout, then returns
the content as a list as if it were ipython code
"""
output = []
savefig = False # keep up with this to clear figure
multiline = False # to handle line continuation
multiline_start = None
fmtin = self.promptin
ct = 0
for lineno, line in enumerate(content):
line_stripped = line.strip()
if not len(line):
output.append(line)
continue
# handle decorators
if line_stripped.startswith('@'):
output.extend([line])
if 'savefig' in line:
savefig = True # and need to clear figure
continue
# handle comments
if line_stripped.startswith('#'):
output.extend([line])
continue
# deal with lines checking for multiline
continuation = u' %s:'% ''.join(['.']*(len(str(ct))+2))
if not multiline:
modified = u"%s %s" % (fmtin % ct, line_stripped)
output.append(modified)
ct += 1
try:
ast.parse(line_stripped)
output.append(u'')
except Exception: # on a multiline
multiline = True
multiline_start = lineno
else: # still on a multiline
modified = u'%s %s' % (continuation, line)
output.append(modified)
# if the next line is indented, it should be part of multiline
if len(content) > lineno + 1:
nextline = content[lineno + 1]
if len(nextline) - len(nextline.lstrip()) > 3:
continue
try:
mod = ast.parse(
'\n'.join(content[multiline_start:lineno+1]))
if isinstance(mod.body[0], ast.FunctionDef):
# check to see if we have the whole function
for element in mod.body[0].body:
if isinstance(element, ast.Return):
multiline = False
else:
output.append(u'')
multiline = False
except Exception:
pass
if savefig: # clear figure if plotted
self.ensure_pyplot()
self.process_input_line('plt.clf()', store_history=False)
self.clear_cout()
savefig = False
return output
def custom_doctest(self, decorator, input_lines, found, submitted):
"""
Perform a specialized doctest.
"""
from .custom_doctests import doctests
args = decorator.split()
doctest_type = args[1]
if doctest_type in doctests:
doctests[doctest_type](self, args, input_lines, found, submitted)
else:
e = "Invalid option to @doctest: {0}".format(doctest_type)
raise Exception(e)
class IPythonDirective(Directive):
has_content = True
required_arguments = 0
optional_arguments = 4 # python, suppress, verbatim, doctest
final_argumuent_whitespace = True
option_spec = { 'python': directives.unchanged,
'suppress' : directives.flag,
'verbatim' : directives.flag,
'doctest' : directives.flag,
'okexcept': directives.flag,
'okwarning': directives.flag,
'output_encoding': directives.unchanged_required
}
shell = None
seen_docs = set()
def get_config_options(self):
# contains sphinx configuration variables
config = self.state.document.settings.env.config
# get config variables to set figure output directory
confdir = self.state.document.settings.env.app.confdir
savefig_dir = config.ipython_savefig_dir
source_dir = os.path.dirname(self.state.document.current_source)
if savefig_dir is None:
savefig_dir = config.html_static_path
if isinstance(savefig_dir, list):
savefig_dir = savefig_dir[0] # safe to assume only one path?
savefig_dir = os.path.join(confdir, savefig_dir)
# get regex and prompt stuff
rgxin = config.ipython_rgxin
rgxout = config.ipython_rgxout
promptin = config.ipython_promptin
promptout = config.ipython_promptout
mplbackend = config.ipython_mplbackend
exec_lines = config.ipython_execlines
hold_count = config.ipython_holdcount
return (savefig_dir, source_dir, rgxin, rgxout,
promptin, promptout, mplbackend, exec_lines, hold_count)
def setup(self):
# Get configuration values.
(savefig_dir, source_dir, rgxin, rgxout, promptin, promptout,
mplbackend, exec_lines, hold_count) = self.get_config_options()
if self.shell is None:
# We will be here many times. However, when the
# EmbeddedSphinxShell is created, its interactive shell member
# is the same for each instance.
if mplbackend:
import matplotlib
# Repeated calls to use() will not hurt us since `mplbackend`
# is the same each time.
matplotlib.use(mplbackend)
# Must be called after (potentially) importing matplotlib and
# setting its backend since exec_lines might import pylab.
self.shell = EmbeddedSphinxShell(exec_lines, self.state)
# Store IPython directive to enable better error messages
self.shell.directive = self
# reset the execution count if we haven't processed this doc
#NOTE: this may be borked if there are multiple seen_doc tmp files
#check time stamp?
if not self.state.document.current_source in self.seen_docs:
self.shell.IP.history_manager.reset()
self.shell.IP.execution_count = 1
self.shell.IP.prompt_manager.width = 0
self.seen_docs.add(self.state.document.current_source)
# and attach to shell so we don't have to pass them around
self.shell.rgxin = rgxin
self.shell.rgxout = rgxout
self.shell.promptin = promptin
self.shell.promptout = promptout
self.shell.savefig_dir = savefig_dir
self.shell.source_dir = source_dir
self.shell.hold_count = hold_count
# setup bookmark for saving figures directory
self.shell.process_input_line('bookmark ipy_savedir %s'%savefig_dir,
store_history=False)
self.shell.clear_cout()
return rgxin, rgxout, promptin, promptout
def teardown(self):
# delete last bookmark
self.shell.process_input_line('bookmark -d ipy_savedir',
store_history=False)
self.shell.clear_cout()
def run(self):
debug = False
#TODO, any reason block_parser can't be a method of embeddable shell
# then we wouldn't have to carry these around
rgxin, rgxout, promptin, promptout = self.setup()
options = self.options
self.shell.is_suppress = 'suppress' in options
self.shell.is_doctest = 'doctest' in options
self.shell.is_verbatim = 'verbatim' in options
self.shell.is_okexcept = 'okexcept' in options
self.shell.is_okwarning = 'okwarning' in options
self.shell.output_encoding = [options.get('output_encoding', 'utf8')]
# handle pure python code
if 'python' in self.arguments:
content = self.content
self.content = self.shell.process_pure_python(content)
parts = '\n'.join(self.content).split('\n\n')
lines = ['.. code-block:: ipython', '']
figures = []
for part in parts:
block = block_parser(part, rgxin, rgxout, promptin, promptout)
if len(block):
rows, figure = self.shell.process_block(block)
for row in rows:
lines.extend([' %s'%line for line in row.split('\n')])
if figure is not None:
figures.append(figure)
for figure in figures:
lines.append('')
lines.extend(figure.split('\n'))
lines.append('')
if len(lines)>2:
if debug:
print('\n'.join(lines))
else:
# This has to do with input, not output. But if we comment
# these lines out, then no IPython code will appear in the
# final output.
self.state_machine.insert_input(
lines, self.state_machine.input_lines.source(0))
# cleanup
self.teardown()
return []
# Enable as a proper Sphinx directive
def setup(app):
setup.app = app
app.add_directive('ipython', IPythonDirective)
app.add_config_value('ipython_savefig_dir', None, 'env')
app.add_config_value('ipython_rgxin',
re.compile('In \[(\d+)\]:\s?(.*)\s*'), 'env')
app.add_config_value('ipython_rgxout',
re.compile('Out\[(\d+)\]:\s?(.*)\s*'), 'env')
app.add_config_value('ipython_promptin', 'In [%d]:', 'env')
app.add_config_value('ipython_promptout', 'Out[%d]:', 'env')
# We could just let matplotlib pick whatever is specified as the default
# backend in the matplotlibrc file, but this would cause issues if the
# backend didn't work in headless environments. For this reason, 'agg'
# is a good default backend choice.
app.add_config_value('ipython_mplbackend', 'agg', 'env')
# If the user sets this config value to `None`, then EmbeddedSphinxShell's
# __init__ method will treat it as [].
execlines = ['import numpy as np', 'import matplotlib.pyplot as plt']
app.add_config_value('ipython_execlines', execlines, 'env')
app.add_config_value('ipython_holdcount', True, 'env')
# Simple smoke test, needs to be converted to a proper automatic test.
def test():
examples = [
r"""
In [9]: pwd
Out[9]: '/home/jdhunter/py4science/book'
In [10]: cd bookdata/
/home/jdhunter/py4science/book/bookdata
In [2]: from pylab import *
In [2]: ion()
In [3]: im = imread('stinkbug.png')
@savefig mystinkbug.png width=4in
In [4]: imshow(im)
Out[4]: <matplotlib.image.AxesImage object at 0x39ea850>
""",
r"""
In [1]: x = 'hello world'
# string methods can be
# used to alter the string
@doctest
In [2]: x.upper()
Out[2]: 'HELLO WORLD'
@verbatim
In [3]: x.st<TAB>
x.startswith x.strip
""",
r"""
In [130]: url = 'http://ichart.finance.yahoo.com/table.csv?s=CROX\
.....: &d=9&e=22&f=2009&g=d&a=1&br=8&c=2006&ignore=.csv'
In [131]: print url.split('&')
['http://ichart.finance.yahoo.com/table.csv?s=CROX', 'd=9', 'e=22', 'f=2009', 'g=d', 'a=1', 'b=8', 'c=2006', 'ignore=.csv']
In [60]: import urllib
""",
r"""\
In [133]: import numpy.random
@suppress
In [134]: numpy.random.seed(2358)
@doctest
In [135]: numpy.random.rand(10,2)
Out[135]:
array([[ 0.64524308, 0.59943846],
[ 0.47102322, 0.8715456 ],
[ 0.29370834, 0.74776844],
[ 0.99539577, 0.1313423 ],
[ 0.16250302, 0.21103583],
[ 0.81626524, 0.1312433 ],
[ 0.67338089, 0.72302393],
[ 0.7566368 , 0.07033696],
[ 0.22591016, 0.77731835],
[ 0.0072729 , 0.34273127]])
""",
r"""
In [106]: print x
jdh
In [109]: for i in range(10):
.....: print i
.....:
.....:
0
1
2
3
4
5
6
7
8
9
""",
r"""
In [144]: from pylab import *
In [145]: ion()
# use a semicolon to suppress the output
@savefig test_hist.png width=4in
In [151]: hist(np.random.randn(10000), 100);
@savefig test_plot.png width=4in
In [151]: plot(np.random.randn(10000), 'o');
""",
r"""
# use a semicolon to suppress the output
In [151]: plt.clf()
@savefig plot_simple.png width=4in
In [151]: plot([1,2,3])
@savefig hist_simple.png width=4in
In [151]: hist(np.random.randn(10000), 100);
""",
r"""
# update the current fig
In [151]: ylabel('number')
In [152]: title('normal distribution')
@savefig hist_with_text.png
In [153]: grid(True)
@doctest float
In [154]: 0.1 + 0.2
Out[154]: 0.3
@doctest float
In [155]: np.arange(16).reshape(4,4)
Out[155]:
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
In [1]: x = np.arange(16, dtype=float).reshape(4,4)
In [2]: x[0,0] = np.inf
In [3]: x[0,1] = np.nan
@doctest float
In [4]: x
Out[4]:
array([[ inf, nan, 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]])
""",
]
# skip local-file depending first example:
examples = examples[1:]
#ipython_directive.DEBUG = True # dbg
#options = dict(suppress=True) # dbg
options = dict()
for example in examples:
content = example.split('\n')
IPythonDirective('debug', arguments=None, options=options,
content=content, lineno=0,
content_offset=None, block_text=None,
state=None, state_machine=None,
)
# Run test suite as a script
if __name__=='__main__':
if not os.path.isdir('_static'):
os.mkdir('_static')
test()
print('All OK? Check figures in _static/')
| bsd-3-clause |
bhargav/scikit-learn | examples/text/hashing_vs_dict_vectorizer.py | 93 | 3243 | """
===========================================
FeatureHasher and DictVectorizer Comparison
===========================================
Compares FeatureHasher and DictVectorizer by using both to vectorize
text documents.
The example demonstrates syntax and speed only; it doesn't actually do
anything useful with the extracted vectors. See the example scripts
{document_classification_20newsgroups,clustering}.py for actual learning
on text documents.
A discrepancy between the number of terms reported for DictVectorizer and
for FeatureHasher is to be expected due to hash collisions.
"""
# Author: Lars Buitinck
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import re
import sys
from time import time
import numpy as np
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction import DictVectorizer, FeatureHasher
def n_nonzero_columns(X):
"""Returns the number of non-zero columns in a CSR matrix X."""
return len(np.unique(X.nonzero()[1]))
def tokens(doc):
"""Extract tokens from doc.
This uses a simple regex to break strings into tokens. For a more
principled approach, see CountVectorizer or TfidfVectorizer.
"""
return (tok.lower() for tok in re.findall(r"\w+", doc))
def token_freqs(doc):
"""Extract a dict mapping tokens from doc to their frequencies."""
freq = defaultdict(int)
for tok in tokens(doc):
freq[tok] += 1
return freq
categories = [
'alt.atheism',
'comp.graphics',
'comp.sys.ibm.pc.hardware',
'misc.forsale',
'rec.autos',
'sci.space',
'talk.religion.misc',
]
# Uncomment the following line to use a larger set (11k+ documents)
#categories = None
print(__doc__)
print("Usage: %s [n_features_for_hashing]" % sys.argv[0])
print(" The default number of features is 2**18.")
print()
try:
n_features = int(sys.argv[1])
except IndexError:
n_features = 2 ** 18
except ValueError:
print("not a valid number of features: %r" % sys.argv[1])
sys.exit(1)
print("Loading 20 newsgroups training data")
raw_data = fetch_20newsgroups(subset='train', categories=categories).data
data_size_mb = sum(len(s.encode('utf-8')) for s in raw_data) / 1e6
print("%d documents - %0.3fMB" % (len(raw_data), data_size_mb))
print()
print("DictVectorizer")
t0 = time()
vectorizer = DictVectorizer()
vectorizer.fit_transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % len(vectorizer.get_feature_names()))
print()
print("FeatureHasher on frequency dicts")
t0 = time()
hasher = FeatureHasher(n_features=n_features)
X = hasher.transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
print()
print("FeatureHasher on raw tokens")
t0 = time()
hasher = FeatureHasher(n_features=n_features, input_type="string")
X = hasher.transform(tokens(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
| bsd-3-clause |
larsmans/numpy | numpy/lib/twodim_base.py | 37 | 26758 | """ Basic functions for manipulating 2d arrays
"""
from __future__ import division, absolute_import, print_function
from numpy.core.numeric import (
asanyarray, arange, zeros, greater_equal, multiply, ones, asarray,
where, int8, int16, int32, int64, empty, promote_types
)
from numpy.core import iinfo
__all__ = [
'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'rot90', 'tri', 'triu',
'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices',
'tril_indices_from', 'triu_indices', 'triu_indices_from', ]
i1 = iinfo(int8)
i2 = iinfo(int16)
i4 = iinfo(int32)
def _min_int(low, high):
""" get small int that fits the range """
if high <= i1.max and low >= i1.min:
return int8
if high <= i2.max and low >= i2.min:
return int16
if high <= i4.max and low >= i4.min:
return int32
return int64
def fliplr(m):
"""
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array, must be at least 2-D.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to A[:,::-1]. Requires the array to be at least 2-D.
Examples
--------
>>> A = np.diag([1.,2.,3.])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.fliplr(A)
array([[ 0., 0., 1.],
[ 0., 2., 0.],
[ 3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A)==A[:,::-1,...])
True
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must be >= 2-d.")
return m[:, ::-1]
def flipud(m):
"""
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``A[::-1,...]``.
Does not require the array to be two-dimensional.
Examples
--------
>>> A = np.diag([1.0, 2, 3])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.flipud(A)
array([[ 0., 0., 3.],
[ 0., 2., 0.],
[ 1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A)==A[::-1,...])
True
>>> np.flipud([1,2])
array([2, 1])
"""
m = asanyarray(m)
if m.ndim < 1:
raise ValueError("Input must be >= 1-d.")
return m[::-1, ...]
def rot90(m, k=1):
"""
Rotate an array by 90 degrees in the counter-clockwise direction.
The first two dimensions are rotated; therefore, the array must be at
least 2-D.
Parameters
----------
m : array_like
Array of two or more dimensions.
k : integer
Number of times the array is rotated by 90 degrees.
Returns
-------
y : ndarray
Rotated array.
See Also
--------
fliplr : Flip an array horizontally.
flipud : Flip an array vertically.
Examples
--------
>>> m = np.array([[1,2],[3,4]], int)
>>> m
array([[1, 2],
[3, 4]])
>>> np.rot90(m)
array([[2, 4],
[1, 3]])
>>> np.rot90(m, 2)
array([[4, 3],
[2, 1]])
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must >= 2-d.")
k = k % 4
if k == 0:
return m
elif k == 1:
return fliplr(m).swapaxes(0, 1)
elif k == 2:
return fliplr(flipud(m))
else:
# k == 3
return fliplr(m.swapaxes(0, 1))
def eye(N, M=None, k=0, dtype=float):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
See Also
--------
identity : (almost) equivalent function
diag : diagonal 2-D array from a 1-D array specified by the user.
Examples
--------
>>> np.eye(2, dtype=int)
array([[1, 0],
[0, 1]])
>>> np.eye(3, k=1)
array([[ 0., 1., 0.],
[ 0., 0., 1.],
[ 0., 0., 0.]])
"""
if M is None:
M = N
m = zeros((N, M), dtype=dtype)
if k >= M:
return m
if k >= 0:
i = k
else:
i = (-k) * M
m[:M-k].flat[i::M+1] = 1
return m
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
See the more detailed documentation for ``numpy.diagonal`` if you use this
function to extract a diagonal and wish to write to the resulting array;
whether it returns a copy or a view depends on what version of numpy you
are using.
Parameters
----------
v : array_like
If `v` is a 2-D array, return a copy of its `k`-th diagonal.
If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
diagonal.
k : int, optional
Diagonal in question. The default is 0. Use `k>0` for diagonals
above the main diagonal, and `k<0` for diagonals below the main
diagonal.
Returns
-------
out : ndarray
The extracted diagonal or constructed diagonal array.
See Also
--------
diagonal : Return specified diagonals.
diagflat : Create a 2-D array with the flattened input as a diagonal.
trace : Sum along diagonals.
triu : Upper triangle of an array.
tril : Lower triangle of an array.
Examples
--------
>>> x = np.arange(9).reshape((3,3))
>>> x
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.diag(x)
array([0, 4, 8])
>>> np.diag(x, k=1)
array([1, 5])
>>> np.diag(x, k=-1)
array([3, 7])
>>> np.diag(np.diag(x))
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 8]])
"""
v = asarray(v)
s = v.shape
if len(s) == 1:
n = s[0]+abs(k)
res = zeros((n, n), v.dtype)
if k >= 0:
i = k
else:
i = (-k) * n
res[:n-k].flat[i::n+1] = v
return res
elif len(s) == 2:
return v.diagonal(k)
else:
raise ValueError("Input must be 1- or 2-d.")
def diagflat(v, k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
v : array_like
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set; 0, the default, corresponds to the "main" diagonal,
a positive (negative) `k` giving the number of the diagonal above
(below) the main.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : MATLAB work-alike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
try:
wrap = v.__array_wrap__
except AttributeError:
wrap = None
v = asarray(v).ravel()
s = len(v)
n = s + abs(k)
res = zeros((n, n), v.dtype)
if (k >= 0):
i = arange(0, n-k)
fi = i+k+i*n
else:
i = arange(0, n+k)
fi = i+(i-k)*n
res.flat[fi] = v
if not wrap:
return res
return wrap(res)
def tri(N, M=None, k=0, dtype=float):
"""
An array with ones at and below the given diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal at and below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
Returns
-------
tri : ndarray of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.
Examples
--------
>>> np.tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> np.tri(3, 5, -1)
array([[ 0., 0., 0., 0., 0.],
[ 1., 0., 0., 0., 0.],
[ 1., 1., 0., 0., 0.]])
"""
if M is None:
M = N
m = greater_equal.outer(arange(N, dtype=_min_int(0, N)),
arange(-k, M-k, dtype=_min_int(-k, M - k)))
# Avoid making a copy if the requested type is already bool
m = m.astype(dtype, copy=False)
return m
def tril(m, k=0):
"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
tril : ndarray, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : same thing, only for the upper triangle
Examples
--------
>>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 0, 0, 0],
[ 4, 0, 0],
[ 7, 8, 0],
[10, 11, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k, dtype=bool)
return where(mask, m, zeros(1, m.dtype))
def triu(m, k=0):
"""
Upper triangle of an array.
Return a copy of a matrix with the elements below the `k`-th diagonal
zeroed.
Please refer to the documentation for `tril` for further details.
See Also
--------
tril : lower triangle of an array
Examples
--------
>>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k-1, dtype=bool)
return where(mask, zeros(1, m.dtype), m)
# Originally borrowed from John Hunter and matplotlib
def vander(x, N=None, increasing=False):
"""
Generate a Vandermonde matrix.
The columns of the output matrix are powers of the input vector. The
order of the powers is determined by the `increasing` boolean argument.
Specifically, when `increasing` is False, the `i`-th output column is
the input vector raised element-wise to the power of ``N - i - 1``. Such
a matrix with a geometric progression in each row is named for Alexandre-
Theophile Vandermonde.
Parameters
----------
x : array_like
1-D input array.
N : int, optional
Number of columns in the output. If `N` is not specified, a square
array is returned (``N = len(x)``).
increasing : bool, optional
Order of the powers of the columns. If True, the powers increase
from left to right, if False (the default) they are reversed.
.. versionadded:: 1.9.0
Returns
-------
out : ndarray
Vandermonde matrix. If `increasing` is False, the first column is
``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `increasing` is
True, the columns are ``x^0, x^1, ..., x^(N-1)``.
See Also
--------
polynomial.polynomial.polyvander
Examples
--------
>>> x = np.array([1, 2, 3, 5])
>>> N = 3
>>> np.vander(x, N)
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> np.column_stack([x**(N-1-i) for i in range(N)])
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> x = np.array([1, 2, 3, 5])
>>> np.vander(x)
array([[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]])
>>> np.vander(x, increasing=True)
array([[ 1, 1, 1, 1],
[ 1, 2, 4, 8],
[ 1, 3, 9, 27],
[ 1, 5, 25, 125]])
The determinant of a square Vandermonde matrix is the product
of the differences between the values of the input vector:
>>> np.linalg.det(np.vander(x))
48.000000000000043
>>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)
48
"""
x = asarray(x)
if x.ndim != 1:
raise ValueError("x must be a one-dimensional array or sequence.")
if N is None:
N = len(x)
v = empty((len(x), N), dtype=promote_types(x.dtype, int))
tmp = v[:, ::-1] if not increasing else v
if N > 0:
tmp[:, 0] = 1
if N > 1:
tmp[:, 1:] = x[:, None]
multiply.accumulate(tmp[:, 1:], out=tmp[:, 1:], axis=1)
return v
def histogram2d(x, y, bins=10, range=None, normed=False, weights=None):
"""
Compute the bi-dimensional histogram of two data samples.
Parameters
----------
x : array_like, shape (N,)
An array containing the x coordinates of the points to be
histogrammed.
y : array_like, shape (N,)
An array containing the y coordinates of the points to be
histogrammed.
bins : int or [int, int] or array_like or [array, array], optional
The bin specification:
* If int, the number of bins for the two dimensions (nx=ny=bins).
* If [int, int], the number of bins in each dimension
(nx, ny = bins).
* If array_like, the bin edges for the two dimensions
(x_edges=y_edges=bins).
* If [array, array], the bin edges in each dimension
(x_edges, y_edges = bins).
range : array_like, shape(2,2), optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
will be considered outliers and not tallied in the histogram.
normed : bool, optional
If False, returns the number of samples in each bin. If True,
returns the bin density ``bin_count / sample_count / bin_area``.
weights : array_like, shape(N,), optional
An array of values ``w_i`` weighing each sample ``(x_i, y_i)``.
Weights are normalized to 1 if `normed` is True. If `normed` is
False, the values of the returned histogram are equal to the sum of
the weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray, shape(nx, ny)
The bi-dimensional histogram of samples `x` and `y`. Values in `x`
are histogrammed along the first dimension and values in `y` are
histogrammed along the second dimension.
xedges : ndarray, shape(nx,)
The bin edges along the first dimension.
yedges : ndarray, shape(ny,)
The bin edges along the second dimension.
See Also
--------
histogram : 1D histogram
histogramdd : Multidimensional histogram
Notes
-----
When `normed` is True, then the returned histogram is the sample
density, defined such that the sum over bins of the product
``bin_value * bin_area`` is 1.
Please note that the histogram does not follow the Cartesian convention
where `x` values are on the abscissa and `y` values on the ordinate
axis. Rather, `x` is histogrammed along the first dimension of the
array (vertical), and `y` along the second dimension of the array
(horizontal). This ensures compatibility with `histogramdd`.
Examples
--------
>>> import matplotlib as mpl
>>> import matplotlib.pyplot as plt
Construct a 2D-histogram with variable bin width. First define the bin
edges:
>>> xedges = [0, 1, 1.5, 3, 5]
>>> yedges = [0, 2, 3, 4, 6]
Next we create a histogram H with random bin content:
>>> x = np.random.normal(3, 1, 100)
>>> y = np.random.normal(1, 1, 100)
>>> H, xedges, yedges = np.histogram2d(y, x, bins=(xedges, yedges))
Or we fill the histogram H with a determined bin content:
>>> H = np.ones((4, 4)).cumsum().reshape(4, 4)
>>> print H[::-1] # This shows the bin content in the order as plotted
[[ 13. 14. 15. 16.]
[ 9. 10. 11. 12.]
[ 5. 6. 7. 8.]
[ 1. 2. 3. 4.]]
Imshow can only do an equidistant representation of bins:
>>> fig = plt.figure(figsize=(7, 3))
>>> ax = fig.add_subplot(131)
>>> ax.set_title('imshow: equidistant')
>>> im = plt.imshow(H, interpolation='nearest', origin='low',
extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])
pcolormesh can display exact bin edges:
>>> ax = fig.add_subplot(132)
>>> ax.set_title('pcolormesh: exact bin edges')
>>> X, Y = np.meshgrid(xedges, yedges)
>>> ax.pcolormesh(X, Y, H)
>>> ax.set_aspect('equal')
NonUniformImage displays exact bin edges with interpolation:
>>> ax = fig.add_subplot(133)
>>> ax.set_title('NonUniformImage: interpolated')
>>> im = mpl.image.NonUniformImage(ax, interpolation='bilinear')
>>> xcenters = xedges[:-1] + 0.5 * (xedges[1:] - xedges[:-1])
>>> ycenters = yedges[:-1] + 0.5 * (yedges[1:] - yedges[:-1])
>>> im.set_data(xcenters, ycenters, H)
>>> ax.images.append(im)
>>> ax.set_xlim(xedges[0], xedges[-1])
>>> ax.set_ylim(yedges[0], yedges[-1])
>>> ax.set_aspect('equal')
>>> plt.show()
"""
from numpy import histogramdd
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = asarray(bins, float)
bins = [xedges, yedges]
hist, edges = histogramdd([x, y], bins, range, normed, weights)
return hist, edges[0], edges[1]
def mask_indices(n, mask_func, k=0):
"""
Return the indices to access (n, n) arrays, given a masking function.
Assume `mask_func` is a function that, for a square array a of size
``(n, n)`` with a possible offset argument `k`, when called as
``mask_func(a, k)`` returns a new array with zeros in certain locations
(functions like `triu` or `tril` do precisely this). Then this function
returns the indices where the non-zero values would be located.
Parameters
----------
n : int
The returned indices will be valid to access arrays of shape (n, n).
mask_func : callable
A function whose call signature is similar to that of `triu`, `tril`.
That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`.
`k` is an optional argument to the function.
k : scalar
An optional argument which is passed through to `mask_func`. Functions
like `triu`, `tril` take a second argument that is interpreted as an
offset.
Returns
-------
indices : tuple of arrays.
The `n` arrays of indices corresponding to the locations where
``mask_func(np.ones((n, n)), k)`` is True.
See Also
--------
triu, tril, triu_indices, tril_indices
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
These are the indices that would allow you to access the upper triangular
part of any 3x3 array:
>>> iu = np.mask_indices(3, np.triu)
For example, if `a` is a 3x3 array:
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> a[iu]
array([0, 1, 2, 4, 5, 8])
An offset can be passed also to the masking function. This gets us the
indices starting on the first diagonal right of the main one:
>>> iu1 = np.mask_indices(3, np.triu, 1)
with which we now extract only three elements:
>>> a[iu1]
array([1, 2, 5])
"""
m = ones((n, n), int)
a = mask_func(m, k)
return where(a != 0)
def tril_indices(n, k=0, m=None):
"""
Return the indices for the lower-triangle of an (n, m) array.
Parameters
----------
n : int
The row dimension of the arrays for which the returned
indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
triu_indices : similar function, for upper-triangular.
mask_indices : generic function accepting an arbitrary mask function.
tril, triu
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
lower triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> il1 = np.tril_indices(4)
>>> il2 = np.tril_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[il1]
array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
And for assigning values:
>>> a[il1] = -1
>>> a
array([[-1, 1, 2, 3],
[-1, -1, 6, 7],
[-1, -1, -1, 11],
[-1, -1, -1, -1]])
These cover almost the whole array (two diagonals right of the main one):
>>> a[il2] = -10
>>> a
array([[-10, -10, -10, 3],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]])
"""
return where(tri(n, m, k=k, dtype=bool))
def tril_indices_from(arr, k=0):
"""
Return the indices for the lower-triangle of arr.
See `tril_indices` for full details.
Parameters
----------
arr : array_like
The indices will be valid for square arrays whose dimensions are
the same as arr.
k : int, optional
Diagonal offset (see `tril` for details).
See Also
--------
tril_indices, tril
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])
def triu_indices(n, k=0, m=None):
"""
Return the indices for the upper-triangle of an (n, m) array.
Parameters
----------
n : int
The size of the arrays for which the returned indices will
be valid.
k : int, optional
Diagonal offset (see `triu` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple, shape(2) of ndarrays, shape(`n`)
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array. Can be used
to slice a ndarray of shape(`n`, `n`).
See also
--------
tril_indices : similar function, for lower-triangular.
mask_indices : generic function accepting an arbitrary mask function.
triu, tril
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
upper triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> iu1 = np.triu_indices(4)
>>> iu2 = np.triu_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[iu1]
array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15])
And for assigning values:
>>> a[iu1] = -1
>>> a
array([[-1, -1, -1, -1],
[ 4, -1, -1, -1],
[ 8, 9, -1, -1],
[12, 13, 14, -1]])
These cover only a small part of the whole array (two diagonals right
of the main one):
>>> a[iu2] = -10
>>> a
array([[ -1, -1, -10, -10],
[ 4, -1, -1, -10],
[ 8, 9, -1, -1],
[ 12, 13, 14, -1]])
"""
return where(~tri(n, m, k=k-1, dtype=bool))
def triu_indices_from(arr, k=0):
"""
Return the indices for the upper-triangle of arr.
See `triu_indices` for full details.
Parameters
----------
arr : ndarray, shape(N, N)
The indices will be valid for square arrays.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
triu_indices_from : tuple, shape(2) of ndarray, shape(N)
Indices for the upper-triangle of `arr`.
See Also
--------
triu_indices, triu
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])
| bsd-3-clause |
jeffkinnison/awe-wq | awe/voronoi.py | 2 | 3930 | #!/usr/bin/env python
# -----------------------------------------------------------------------------
# Voronoi diagram from a list of points
# Copyright (C) 2011 Nicolas P. Rougier
#
# Distributed under the terms of the BSD License.
# -----------------------------------------------------------------------------
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
def circumcircle(P1, P2, P3):
'''
Return center of the circle containing P1, P2 and P3
If P1, P2 and P3 are colinear, return None
Adapted from:
http://local.wasp.uwa.edu.au/~pbourke/geometry/circlefrom3/Circle.cpp
'''
delta_a = P2 - P1
delta_b = P3 - P2
if np.abs(delta_a[0]) <= 0.000000001 and np.abs(delta_b[1]) <= 0.000000001:
center_x = 0.5*(P2[0] + P3[0])
center_y = 0.5*(P1[1] + P2[1])
else:
aSlope = delta_a[1]/delta_a[0]
bSlope = delta_b[1]/delta_b[0]
if np.abs(aSlope-bSlope) <= 0.000000001:
return None
center_x= (aSlope*bSlope*(P1[1] - P3[1]) + bSlope*(P1[0] + P2 [0]) \
- aSlope*(P2[0]+P3[0]))/(2.*(bSlope-aSlope))
center_y = -(center_x - (P1[0]+P2[0])/2.)/aSlope + (P1[1]+P2[1])/2.
return center_x, center_y
def voronoi(X,Y):
''' Return line segments describing the voronoi diagram of X and Y '''
P = np.zeros((X.size+4,2))
P[:X.size,0], P[:Y.size,1] = X, Y
# We add four points at (pseudo) "infinity"
m = max(np.abs(X).max(), np.abs(Y).max())*1e5
P[X.size:,0] = -m, -m, +m, +m
P[Y.size:,1] = -m, +m, -m, +m
D = matplotlib.tri.Triangulation(P[:,0],P[:,1])
T = D.triangles
#axes = plt.subplot(1,1,1)
#plt.scatter(X,Y, s=5)
#patches = []
#for i,triang in enumerate(T):
# polygon = Polygon(np.array([P[triang[0]],P[triang[1]],P[triang[2]]]))
# patches.append(polygon)
#colors = 100*np.random.rand(len(patches))
#p = PatchCollection(patches, cmap=matplotlib.cm.jet, alpha=0.4)
#p.set_array(np.array(colors))
#axes.add_collection(p)
#plt.colorbar(p)
##lines = matplotlib.collections.LineCollection(segments, color='0.75')
##axes.add_collection(lines)
#plt.axis([0,1,0,1])
#plt.show()
#plt.savefig('test0.png')
n = T.shape[0]
C = np.zeros((n,2))
for i in range(n):
C[i] = circumcircle(P[T[i,0]],P[T[i,1]],P[T[i,2]])
X,Y = C[:,0], C[:,1]
#segments = []
#for i in range(n):
# for k in D.neighbors[i]:
# if k != -1:
# segments.append([(X[i],Y[i]), (X[k],Y[k])])
cells = [[] for i in range(np.max(T)+1)]
for i,triang in enumerate(T):
cells[triang[0]].append([X[i],Y[i]])
cells[triang[1]].append([X[i],Y[i]])
cells[triang[2]].append([X[i],Y[i]])
for i,cell in enumerate(cells):
angle = []
for coord in cell:
angle.append(np.arctan2((coord[1]-P[i,1]),(coord[0]-P[i,0])))
id = np.argsort(-np.array(angle))
cells[i] = np.array([cell[j] for j in id])
return cells
# -----------------------------------------------------------------------------
if __name__ == '__main__':
P = np.random.random((2,256))
X,Y = P[0],P[1]
fig = plt.figure(figsize=(10,10))
axes = plt.subplot(1,1,1)
plt.scatter(X,Y, s=5)
#segments = voronoi(X,Y)
cells = voronoi(X,Y)
patches = []
for cell in cells:
polygon = Polygon(cell,True)
patches.append(polygon)
#plt.scatter(cell[:,0],cell[:,1])
colors = 100*np.random.rand(len(patches))
print colors
p = PatchCollection(patches, cmap=matplotlib.cm.jet, alpha=0.4)
p.set_array(np.array(colors))
axes.add_collection(p)
plt.colorbar(p)
#lines = matplotlib.collections.LineCollection(segments, color='0.75')
#axes.add_collection(lines)
plt.axis([0,1,0,1])
plt.show()
plt.savefig('test.png')
| gpl-2.0 |
yufengg/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/pandas_io.py | 92 | 4535 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Methods to allow pandas.DataFrame."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.estimator.inputs.pandas_io import pandas_input_fn as core_pandas_input_fn
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
PANDAS_DTYPES = {
'int8': 'int',
'int16': 'int',
'int32': 'int',
'int64': 'int',
'uint8': 'int',
'uint16': 'int',
'uint32': 'int',
'uint64': 'int',
'float16': 'float',
'float32': 'float',
'float64': 'float',
'bool': 'i'
}
def pandas_input_fn(x,
y=None,
batch_size=128,
num_epochs=1,
shuffle=True,
queue_capacity=1000,
num_threads=1,
target_column='target'):
"""This input_fn diffs from the core version with default `shuffle`."""
return core_pandas_input_fn(x=x,
y=y,
batch_size=batch_size,
shuffle=shuffle,
num_epochs=num_epochs,
queue_capacity=queue_capacity,
num_threads=num_threads,
target_column=target_column)
def extract_pandas_data(data):
"""Extract data from pandas.DataFrame for predictors.
Given a DataFrame, will extract the values and cast them to float. The
DataFrame is expected to contain values of type int, float or bool.
Args:
data: `pandas.DataFrame` containing the data to be extracted.
Returns:
A numpy `ndarray` of the DataFrame's values as floats.
Raises:
ValueError: if data contains types other than int, float or bool.
"""
if not isinstance(data, pd.DataFrame):
return data
bad_data = [column for column in data
if data[column].dtype.name not in PANDAS_DTYPES]
if not bad_data:
return data.values.astype('float')
else:
error_report = [("'" + str(column) + "' type='" +
data[column].dtype.name + "'") for column in bad_data]
raise ValueError('Data types for extracting pandas data must be int, '
'float, or bool. Found: ' + ', '.join(error_report))
def extract_pandas_matrix(data):
"""Extracts numpy matrix from pandas DataFrame.
Args:
data: `pandas.DataFrame` containing the data to be extracted.
Returns:
A numpy `ndarray` of the DataFrame's values.
"""
if not isinstance(data, pd.DataFrame):
return data
return data.as_matrix()
def extract_pandas_labels(labels):
"""Extract data from pandas.DataFrame for labels.
Args:
labels: `pandas.DataFrame` or `pandas.Series` containing one column of
labels to be extracted.
Returns:
A numpy `ndarray` of labels from the DataFrame.
Raises:
ValueError: if more than one column is found or type is not int, float or
bool.
"""
if isinstance(labels,
pd.DataFrame): # pandas.Series also belongs to DataFrame
if len(labels.columns) > 1:
raise ValueError('Only one column for labels is allowed.')
bad_data = [column for column in labels
if labels[column].dtype.name not in PANDAS_DTYPES]
if not bad_data:
return labels.values
else:
error_report = ["'" + str(column) + "' type="
+ str(labels[column].dtype.name) for column in bad_data]
raise ValueError('Data types for extracting labels must be int, '
'float, or bool. Found: ' + ', '.join(error_report))
else:
return labels
| apache-2.0 |
BeiLuoShiMen/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_cairo.py | 69 | 16706 | """
A Cairo backend for matplotlib
Author: Steve Chaplin
Cairo is a vector graphics library with cross-device output support.
Features of Cairo:
* anti-aliasing
* alpha channel
* saves image files as PNG, PostScript, PDF
http://cairographics.org
Requires (in order, all available from Cairo website):
cairo, pycairo
Naming Conventions
* classes MixedUpperCase
* varables lowerUpper
* functions underscore_separated
"""
from __future__ import division
import os, sys, warnings, gzip
import numpy as npy
def _fn_name(): return sys._getframe(1).f_code.co_name
try:
import cairo
except ImportError:
raise ImportError("Cairo backend requires that pycairo is installed.")
_version_required = (1,2,0)
if cairo.version_info < _version_required:
raise ImportError ("Pycairo %d.%d.%d is installed\n"
"Pycairo %d.%d.%d or later is required"
% (cairo.version_info + _version_required))
backend_version = cairo.version
del _version_required
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase
from matplotlib.cbook import is_string_like
from matplotlib.figure import Figure
from matplotlib.mathtext import MathTextParser
from matplotlib.path import Path
from matplotlib.transforms import Bbox, Affine2D
from matplotlib.font_manager import ttfFontProperty
from matplotlib import rcParams
_debug = False
#_debug = True
# Image::color_conv(format) for draw_image()
if sys.byteorder == 'little':
BYTE_FORMAT = 0 # BGRA
else:
BYTE_FORMAT = 1 # ARGB
class RendererCairo(RendererBase):
fontweights = {
100 : cairo.FONT_WEIGHT_NORMAL,
200 : cairo.FONT_WEIGHT_NORMAL,
300 : cairo.FONT_WEIGHT_NORMAL,
400 : cairo.FONT_WEIGHT_NORMAL,
500 : cairo.FONT_WEIGHT_NORMAL,
600 : cairo.FONT_WEIGHT_BOLD,
700 : cairo.FONT_WEIGHT_BOLD,
800 : cairo.FONT_WEIGHT_BOLD,
900 : cairo.FONT_WEIGHT_BOLD,
'ultralight' : cairo.FONT_WEIGHT_NORMAL,
'light' : cairo.FONT_WEIGHT_NORMAL,
'normal' : cairo.FONT_WEIGHT_NORMAL,
'medium' : cairo.FONT_WEIGHT_NORMAL,
'semibold' : cairo.FONT_WEIGHT_BOLD,
'bold' : cairo.FONT_WEIGHT_BOLD,
'heavy' : cairo.FONT_WEIGHT_BOLD,
'ultrabold' : cairo.FONT_WEIGHT_BOLD,
'black' : cairo.FONT_WEIGHT_BOLD,
}
fontangles = {
'italic' : cairo.FONT_SLANT_ITALIC,
'normal' : cairo.FONT_SLANT_NORMAL,
'oblique' : cairo.FONT_SLANT_OBLIQUE,
}
def __init__(self, dpi):
"""
"""
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
self.dpi = dpi
self.text_ctx = cairo.Context (
cairo.ImageSurface (cairo.FORMAT_ARGB32,1,1))
self.mathtext_parser = MathTextParser('Cairo')
def set_ctx_from_surface (self, surface):
self.ctx = cairo.Context (surface)
self.ctx.save() # restore, save - when call new_gc()
def set_width_height(self, width, height):
self.width = width
self.height = height
self.matrix_flipy = cairo.Matrix (yy=-1, y0=self.height)
# use matrix_flipy for ALL rendering?
# - problem with text? - will need to switch matrix_flipy off, or do a
# font transform?
def _fill_and_stroke (self, ctx, fill_c, alpha):
if fill_c is not None:
ctx.save()
if len(fill_c) == 3:
ctx.set_source_rgba (fill_c[0], fill_c[1], fill_c[2], alpha)
else:
ctx.set_source_rgba (fill_c[0], fill_c[1], fill_c[2], alpha*fill_c[3])
ctx.fill_preserve()
ctx.restore()
ctx.stroke()
#@staticmethod
def convert_path(ctx, tpath):
for points, code in tpath.iter_segments():
if code == Path.MOVETO:
ctx.move_to(*points)
elif code == Path.LINETO:
ctx.line_to(*points)
elif code == Path.CURVE3:
ctx.curve_to(points[0], points[1],
points[0], points[1],
points[2], points[3])
elif code == Path.CURVE4:
ctx.curve_to(*points)
elif code == Path.CLOSEPOLY:
ctx.close_path()
convert_path = staticmethod(convert_path)
def draw_path(self, gc, path, transform, rgbFace=None):
if len(path.vertices) > 18980:
raise ValueError("The Cairo backend can not draw paths longer than 18980 points.")
ctx = gc.ctx
transform = transform + \
Affine2D().scale(1.0, -1.0).translate(0, self.height)
tpath = transform.transform_path(path)
ctx.new_path()
self.convert_path(ctx, tpath)
self._fill_and_stroke(ctx, rgbFace, gc.get_alpha())
def draw_image(self, x, y, im, bbox, clippath=None, clippath_trans=None):
# bbox - not currently used
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
im.flipud_out()
rows, cols, buf = im.color_conv (BYTE_FORMAT)
surface = cairo.ImageSurface.create_for_data (
buf, cairo.FORMAT_ARGB32, cols, rows, cols*4)
# function does not pass a 'gc' so use renderer.ctx
ctx = self.ctx
y = self.height - y - rows
ctx.set_source_surface (surface, x, y)
ctx.paint()
im.flipud_out()
def draw_text(self, gc, x, y, s, prop, angle, ismath=False):
# Note: x,y are device/display coords, not user-coords, unlike other
# draw_* methods
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
if ismath:
self._draw_mathtext(gc, x, y, s, prop, angle)
else:
ctx = gc.ctx
ctx.new_path()
ctx.move_to (x, y)
ctx.select_font_face (prop.get_name(),
self.fontangles [prop.get_style()],
self.fontweights[prop.get_weight()])
size = prop.get_size_in_points() * self.dpi / 72.0
ctx.save()
if angle:
ctx.rotate (-angle * npy.pi / 180)
ctx.set_font_size (size)
ctx.show_text (s.encode("utf-8"))
ctx.restore()
def _draw_mathtext(self, gc, x, y, s, prop, angle):
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
ctx = gc.ctx
width, height, descent, glyphs, rects = self.mathtext_parser.parse(
s, self.dpi, prop)
ctx.save()
ctx.translate(x, y)
if angle:
ctx.rotate (-angle * npy.pi / 180)
for font, fontsize, s, ox, oy in glyphs:
ctx.new_path()
ctx.move_to(ox, oy)
fontProp = ttfFontProperty(font)
ctx.save()
ctx.select_font_face (fontProp.name,
self.fontangles [fontProp.style],
self.fontweights[fontProp.weight])
size = fontsize * self.dpi / 72.0
ctx.set_font_size(size)
ctx.show_text(s.encode("utf-8"))
ctx.restore()
for ox, oy, w, h in rects:
ctx.new_path()
ctx.rectangle (ox, oy, w, h)
ctx.set_source_rgb (0, 0, 0)
ctx.fill_preserve()
ctx.restore()
def flipy(self):
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
return True
#return False # tried - all draw objects ok except text (and images?)
# which comes out mirrored!
def get_canvas_width_height(self):
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
return self.width, self.height
def get_text_width_height_descent(self, s, prop, ismath):
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
if ismath:
width, height, descent, fonts, used_characters = self.mathtext_parser.parse(
s, self.dpi, prop)
return width, height, descent
ctx = self.text_ctx
ctx.save()
ctx.select_font_face (prop.get_name(),
self.fontangles [prop.get_style()],
self.fontweights[prop.get_weight()])
# Cairo (says it) uses 1/96 inch user space units, ref: cairo_gstate.c
# but if /96.0 is used the font is too small
size = prop.get_size_in_points() * self.dpi / 72.0
# problem - scale remembers last setting and font can become
# enormous causing program to crash
# save/restore prevents the problem
ctx.set_font_size (size)
y_bearing, w, h = ctx.text_extents (s)[1:4]
ctx.restore()
return w, h, h + y_bearing
def new_gc(self):
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
self.ctx.restore() # matches save() in set_ctx_from_surface()
self.ctx.save()
return GraphicsContextCairo (renderer=self)
def points_to_pixels(self, points):
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
return points/72.0 * self.dpi
class GraphicsContextCairo(GraphicsContextBase):
_joind = {
'bevel' : cairo.LINE_JOIN_BEVEL,
'miter' : cairo.LINE_JOIN_MITER,
'round' : cairo.LINE_JOIN_ROUND,
}
_capd = {
'butt' : cairo.LINE_CAP_BUTT,
'projecting' : cairo.LINE_CAP_SQUARE,
'round' : cairo.LINE_CAP_ROUND,
}
def __init__(self, renderer):
GraphicsContextBase.__init__(self)
self.renderer = renderer
self.ctx = renderer.ctx
def set_alpha(self, alpha):
self._alpha = alpha
rgb = self._rgb
self.ctx.set_source_rgba (rgb[0], rgb[1], rgb[2], alpha)
#def set_antialiased(self, b):
# enable/disable anti-aliasing is not (yet) supported by Cairo
def set_capstyle(self, cs):
if cs in ('butt', 'round', 'projecting'):
self._capstyle = cs
self.ctx.set_line_cap (self._capd[cs])
else:
raise ValueError('Unrecognized cap style. Found %s' % cs)
def set_clip_rectangle(self, rectangle):
self._cliprect = rectangle
if rectangle is None:
return
x,y,w,h = rectangle.bounds
# pixel-aligned clip-regions are faster
x,y,w,h = round(x), round(y), round(w), round(h)
ctx = self.ctx
ctx.new_path()
ctx.rectangle (x, self.renderer.height - h - y, w, h)
ctx.clip ()
# Alternative: just set _cliprect here and actually set cairo clip rect
# in fill_and_stroke() inside ctx.save() ... ctx.restore()
def set_clip_path(self, path):
if path is not None:
tpath, affine = path.get_transformed_path_and_affine()
ctx = self.ctx
ctx.new_path()
affine = affine + Affine2D().scale(1.0, -1.0).translate(0.0, self.renderer.height)
tpath = affine.transform_path(tpath)
RendererCairo.convert_path(ctx, tpath)
ctx.clip()
def set_dashes(self, offset, dashes):
self._dashes = offset, dashes
if dashes == None:
self.ctx.set_dash([], 0) # switch dashes off
else:
self.ctx.set_dash (
self.renderer.points_to_pixels (npy.asarray(dashes)), offset)
def set_foreground(self, fg, isRGB=None):
GraphicsContextBase.set_foreground(self, fg, isRGB)
if len(self._rgb) == 3:
self.ctx.set_source_rgb(*self._rgb)
else:
self.ctx.set_source_rgba(*self._rgb)
def set_graylevel(self, frac):
GraphicsContextBase.set_graylevel(self, frac)
if len(self._rgb) == 3:
self.ctx.set_source_rgb(*self._rgb)
else:
self.ctx.set_source_rgba(*self._rgb)
def set_joinstyle(self, js):
if js in ('miter', 'round', 'bevel'):
self._joinstyle = js
self.ctx.set_line_join(self._joind[js])
else:
raise ValueError('Unrecognized join style. Found %s' % js)
def set_linewidth(self, w):
self._linewidth = w
self.ctx.set_line_width (self.renderer.points_to_pixels(w))
def new_figure_manager(num, *args, **kwargs): # called by backends/__init__.py
"""
Create a new figure manager instance
"""
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
canvas = FigureCanvasCairo(thisFig)
manager = FigureManagerBase(canvas, num)
return manager
class FigureCanvasCairo (FigureCanvasBase):
def print_png(self, fobj, *args, **kwargs):
width, height = self.get_width_height()
renderer = RendererCairo (self.figure.dpi)
renderer.set_width_height (width, height)
surface = cairo.ImageSurface (cairo.FORMAT_ARGB32, width, height)
renderer.set_ctx_from_surface (surface)
self.figure.draw (renderer)
surface.write_to_png (fobj)
def print_pdf(self, fobj, *args, **kwargs):
return self._save(fobj, 'pdf', *args, **kwargs)
def print_ps(self, fobj, *args, **kwargs):
return self._save(fobj, 'ps', *args, **kwargs)
def print_svg(self, fobj, *args, **kwargs):
return self._save(fobj, 'svg', *args, **kwargs)
def print_svgz(self, fobj, *args, **kwargs):
return self._save(fobj, 'svgz', *args, **kwargs)
def get_default_filetype(self):
return rcParams['cairo.format']
def _save (self, fo, format, **kwargs):
# save PDF/PS/SVG
orientation = kwargs.get('orientation', 'portrait')
dpi = 72
self.figure.dpi = dpi
w_in, h_in = self.figure.get_size_inches()
width_in_points, height_in_points = w_in * dpi, h_in * dpi
if orientation == 'landscape':
width_in_points, height_in_points = (height_in_points,
width_in_points)
if format == 'ps':
if not cairo.HAS_PS_SURFACE:
raise RuntimeError ('cairo has not been compiled with PS '
'support enabled')
surface = cairo.PSSurface (fo, width_in_points, height_in_points)
elif format == 'pdf':
if not cairo.HAS_PDF_SURFACE:
raise RuntimeError ('cairo has not been compiled with PDF '
'support enabled')
surface = cairo.PDFSurface (fo, width_in_points, height_in_points)
elif format in ('svg', 'svgz'):
if not cairo.HAS_SVG_SURFACE:
raise RuntimeError ('cairo has not been compiled with SVG '
'support enabled')
if format == 'svgz':
filename = fo
if is_string_like(fo):
fo = open(fo, 'wb')
fo = gzip.GzipFile(None, 'wb', fileobj=fo)
surface = cairo.SVGSurface (fo, width_in_points, height_in_points)
else:
warnings.warn ("unknown format: %s" % format)
return
# surface.set_dpi() can be used
renderer = RendererCairo (self.figure.dpi)
renderer.set_width_height (width_in_points, height_in_points)
renderer.set_ctx_from_surface (surface)
ctx = renderer.ctx
if orientation == 'landscape':
ctx.rotate (npy.pi/2)
ctx.translate (0, -height_in_points)
# cairo/src/cairo_ps_surface.c
# '%%Orientation: Portrait' is always written to the file header
# '%%Orientation: Landscape' would possibly cause problems
# since some printers would rotate again ?
# TODO:
# add portrait/landscape checkbox to FileChooser
self.figure.draw (renderer)
show_fig_border = False # for testing figure orientation and scaling
if show_fig_border:
ctx.new_path()
ctx.rectangle(0, 0, width_in_points, height_in_points)
ctx.set_line_width(4.0)
ctx.set_source_rgb(1,0,0)
ctx.stroke()
ctx.move_to(30,30)
ctx.select_font_face ('sans-serif')
ctx.set_font_size(20)
ctx.show_text('Origin corner')
ctx.show_page()
surface.finish()
| agpl-3.0 |
blue-yonder/azure-cost-mon | azure_costs_exporter/allocated_vm_collector.py | 1 | 3570 | from prometheus_client.core import GaugeMetricFamily
from pandas import DataFrame
from azure.common.credentials import ServicePrincipalCredentials
from azure.mgmt.compute import ComputeManagementClient
_BASE_COLUMNS = ['subscription', 'location', 'resource_group', 'vm_size']
_COUNT_COLUMN = ['total']
_ALL_COLUMNS = _BASE_COLUMNS + _COUNT_COLUMN
def _extract_resource_group(vm_id):
return vm_id.split('/')[4]
class AzureAllocatedVMCollector(object):
"""
Class to export information about allocated Azure virtual machines
in Prometheus compatible format.
"""
def __init__(self, application_id, application_secret, tenant_id, subscription_ids, metric_name):
"""
Constructor. Access is granted to what Microsoft calls a service principal / Azure Active Directory
Application / app registration. Read more about this topic at
https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-create-service-principal-portal.
This page will guide you how to obtain an application_id, and application_secret, and the tenant_id of
your Azure Active Directory. Please do not forget to grant "Reader" permissions to the app for all
subscriptions that you want to monitor.
:param application_id: The application ID that is created during the Azure app registration.
:param application_secret: The application secret that is created during the Azure app registration.
:param tenant_id: The ID of your Azure Active Directory instance
:param subscription_ids: A _sequence_ of subscription IDs that shall be monitored. The application_id
required "Reader" permissions on each subscription.
:param metric_name: Name of the timeseries
"""
self._metric_name = metric_name
self._subscription_ids = subscription_ids
self._credentials = ServicePrincipalCredentials(
client_id=application_id,
secret=application_secret,
tenant=tenant_id)
def _create_gauge(self):
description = "Number of virtual machines per Azure subscription, location, resource group, and vm size"
allocated_vms = GaugeMetricFamily(self._metric_name, description, labels=_BASE_COLUMNS)
return allocated_vms
def _collect_allocated_vms(self, allocated_vms):
rows = []
for subscription_id in self._subscription_ids:
compute_client = ComputeManagementClient(self._credentials, str(subscription_id))
for vm in compute_client.virtual_machines.list_all():
rows.append([
subscription_id,
vm.location,
_extract_resource_group(vm.id),
vm.hardware_profile.vm_size,
1])
df = DataFrame(data=rows, columns=_ALL_COLUMNS)
groups = df.groupby(_BASE_COLUMNS).sum()
for labels, value in groups.iterrows():
allocated_vms.add_metric(labels, int(value.total))
def describe(self):
"""
Default registry calls "collect" if "describe" is not existent to determine timeseries names.
We do not want that to avoid pointless Azure API calls.
:return: The metrics we are collecting.
"""
return [self._create_gauge()]
def collect(self):
"""
Yield the metrics.
"""
allocated_vms = self._create_gauge()
self._collect_allocated_vms(allocated_vms)
yield allocated_vms
| mit |
linjinjin123/Digital-Image-Processing-Homework | hw3/src/UI.py | 1 | 25466 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# python verson: 2.7
# @Author: JinJin Lin
# @Email: jinjin.lin@outlook.com
# @License: MIT
# @Date: 2015-10-21 18:38:17
# @Last Modified time: 2015-12-04 20:19:24
# All copyright reserved
#
import Image
import wx
import os
import numpy as np
import matplotlib.pyplot as plt
class MainWindow(wx.Frame):
def __init__(self, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
self.RESIZE_BASE_ID = 0
self.QUANTIZE_BASE_ID = 20
self.HISTOGRAM_BASE_ID = 30
self.FILTER_BASE_ID = 40
self.FILTER_HIGH_BOOST = 9998
self.DFT_BASE_ID = 50
self.IDFT_BASE_ID = 51
self.FFT_BASE_ID = 52
self.IFFT_BASE_ID = 53
self.FILTER_FREQ_AVER_7X7_ID = 60
self.FILTER_FREQ_LAPLA_ID = 61
self.is_FT = False #if Fourier
self.HELP_BASE_ID = 9999
self.currentImage = None
self.filter3x3 = [[1] * 3] * 3
self.filter7x7 = [[1] * 7] * 7
self.filter7x7_49 = np.array([float(1) / 49] * 49).reshape(7, 7)
self.filter11x11 = [[1] * 11] * 11
self.laplacian = [[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]]
self.laplacian_freq = np.array([-1, -1, -1, -1, 8, -1, -1, -1, -1]).reshape(3, 3)
self.InitUI()
def InitUI(self):
menubar = wx.MenuBar()
fileMenu = self.createFileMenu()
resizeMenu = self.createResizeMenu()
quantizeMenu = self.createQuantizeMenu()
histogramMenu = self.createHistogramMenu()
filterMenu = self.createFilterMenu()
DFTMenu = self.createDFTMenu()
filterInFreqMenu = self.createFilterInFreqMenu()
helpMenu = self.createHelpMenu()
menubar.Append(fileMenu, '&File')
menubar.Append(resizeMenu, '&Resize')
menubar.Append(quantizeMenu, '&Quantize')
menubar.Append(histogramMenu, '&Histogram')
menubar.Append(filterMenu, '&Filter')
menubar.Append(DFTMenu, '&DFT')
menubar.Append(filterInFreqMenu, '&Filter in freq')
menubar.Append(helpMenu, '&Help')
self.SetMenuBar(menubar)
self.SetSize((600, 600))
self.SetTitle('13331149_linjinjin')
self.Centre()
self.Show(True)
def createFileMenu(self):
#file menu
fileMenu = wx.Menu()
oitem = fileMenu.Append(wx.ID_OPEN, '&Open')
sitem = fileMenu.Append(wx.ID_SAVE, '&Save')
fileMenu.AppendSeparator()
qitem = fileMenu.Append(wx.ID_EXIT, '&Quit', 'Quit application')
#bind file menu event
self.Bind(wx.EVT_MENU, self.OpenImg, oitem)
self.Bind(wx.EVT_MENU, self.SaveImg, sitem)
self.Bind(wx.EVT_MENU, self.OnQuit, qitem)
return fileMenu
def createResizeMenu(self):
#resize menu
resizeMenu = wx.Menu()
self.size = [[192, 128], [96, 64], [48, 32], [24, 16], [12, 8],
[300, 200], [450, 300], [500, 200]]
length = len(self.size)
for i in range(length):
theSize = repr(self.size[i][0]) + 'x' + repr(self.size[i][1])
resizeMenu.Append(self.RESIZE_BASE_ID+i, 'Resize the image as ' + theSize)
self.Bind(wx.EVT_MENU, self.resize, id=self.RESIZE_BASE_ID+i)
return resizeMenu
def createQuantizeMenu(self):
#quantize menu
quantizeMenu = wx.Menu()
self.level = [128, 32, 8, 4, 2]
length = len(self.level)
for i in range(length):
quantizeMenu.Append(self.QUANTIZE_BASE_ID+i, repr(self.level[i])+' level', 'set grey level')
self.Bind(wx.EVT_MENU, self.setLevel, id=self.QUANTIZE_BASE_ID+i)
return quantizeMenu
def createHistogramMenu(self):
histogramMenu = wx.Menu()
histogramMenu.Append(self.HISTOGRAM_BASE_ID+0, 'Show histogram')
histogramMenu.Append(self.HISTOGRAM_BASE_ID+1, 'Equalize histogram')
self.Bind(wx.EVT_MENU, self.showHistogram, id=self.HISTOGRAM_BASE_ID+0)
self.Bind(wx.EVT_MENU, self.equalizeHistogram, id=self.HISTOGRAM_BASE_ID+1)
return histogramMenu
def createFilterMenu(self):
filterMenu = wx.Menu()
self.FILTER_TYPE = [self.filter3x3, self.filter7x7, self.filter11x11, self.laplacian]
filterName = ['3x3 averaging', '7x7 averaging', '11x11 averaging',
'Laplacian filter']
length = len(self.FILTER_TYPE)
for i in range(length):
filterMenu.Append(self.FILTER_BASE_ID+i, filterName[i])
self.Bind(wx.EVT_MENU, self.filter, id=self.FILTER_BASE_ID+i)
#Add high boost funtion
filterMenu.Append(self.FILTER_HIGH_BOOST, "High boost")
self.Bind(wx.EVT_MENU, self.filterWithHighBoost, id=self.FILTER_HIGH_BOOST)
return filterMenu
def createDFTMenu(self):
DFTMenu = wx.Menu()
DFTMenu.Append(self.DFT_BASE_ID, 'DFT')
self.Bind(wx.EVT_MENU, self.DFT, id=self.DFT_BASE_ID)
DFTMenu.Append(self.IDFT_BASE_ID, 'DFT then IDFT')
self.Bind(wx.EVT_MENU, self.DFT, id=self.IDFT_BASE_ID)
DFTMenu.Append(self.FFT_BASE_ID, 'FFT')
self.Bind(wx.EVT_MENU, self.DFT, id=self.FFT_BASE_ID)
DFTMenu.Append(self.IFFT_BASE_ID, 'FFT then IFFT')
self.Bind(wx.EVT_MENU, self.DFT, id=self.IFFT_BASE_ID)
return DFTMenu
def createFilterInFreqMenu(self):
filterInFreqMenu = wx.Menu()
filterInFreqMenu.Append(self.FILTER_FREQ_AVER_7X7_ID, '7x7 averaging filter')
self.Bind(wx.EVT_MENU, self.filterInFreq, id=self.FILTER_FREQ_AVER_7X7_ID)
filterInFreqMenu.Append(self.FILTER_FREQ_LAPLA_ID, '3X3 Laplacian filter ')
self.Bind(wx.EVT_MENU, self.filterInFreq, id=self.FILTER_FREQ_LAPLA_ID)
return filterInFreqMenu
def createHelpMenu(self):
helpMenu = wx.Menu()
helpMenu.Append(self.HELP_BASE_ID+0, "About")
self.Bind(wx.EVT_MENU, self.about, id=self.HELP_BASE_ID+0)
return helpMenu
def OnQuit(self, e):
self.Close()
def OpenImg(self, e):
dirname = ''
# filedialog for user to choose image
dlg = wx.FileDialog(self, "Choose a file", dirname, "", "*.png", wx.OPEN)
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetFilename()
dirname = dlg.GetDirectory()
path = os.path.join(dirname, filename)
self.pilImage = Image.open(path)
self.modifiedPIL = self.pilImage
self.setImageWithPIL(self.pilImage)
dlg.Destroy()
def SaveImg(self, e):
if not self.isOpenFile(): return
dirname = ''
# filedialog for user to save the image
dlg = wx.FileDialog(self, "Save this file as", dirname, "", "*.png", wx.SAVE)
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetFilename()
dirname = dlg.GetDirectory()
path = os.path.join(dirname, filename)
self.modifiedPIL.save(path)
dlg.Destroy()
def resize(self, e):
if not self.isOpenFile(): return
eventId = e.GetId()
targetWidth = self.size[eventId-self.RESIZE_BASE_ID][0]
targetHeight = self.size[eventId-self.RESIZE_BASE_ID][1]
self.modifiedPIL = self.scale(self.pilImage, (targetWidth, targetHeight))
self.setImageWithPIL(self.modifiedPIL)
def showHistogram(self, e):
if not self.isOpenFile(): return
self.plot_hist(self.pilImage)
def equalizeHistogram(self, e):
if not self.isOpenFile(): return
self.modifiedPIL = self.equalize_hist(self.pilImage)
self.setImageWithPIL(self.modifiedPIL)
self.plot_hist(self.modifiedPIL)
def filter(self, e):
if not self.isOpenFile(): return
eventId = e.GetId()
filterMatrix = self.FILTER_TYPE[eventId-self.FILTER_BASE_ID]
self.modifiedPIL = self.filter2d(self.pilImage, filterMatrix)
self.setImageWithPIL(self.modifiedPIL)
def filterWithHighBoost(self, e):
if not self.isOpenFile(): return
eventId = e.GetId()
filterMatrix = [[1] * 3] * 3
self.modifiedPIL = self.filter2dWithHighBoost(self.pilImage, filterMatrix, weight_k=1.5)
self.setImageWithPIL(self.modifiedPIL)
def about(self, e):
pass
def setLevel(self, e):
if not self.isOpenFile(): return
eventId = e.GetId()
level = self.level[eventId-self.QUANTIZE_BASE_ID]
self.modifiedPIL = self.quantize(self.pilImage, level)
self.setImageWithPIL(self.modifiedPIL)
def setImageWithPIL(self, myPilImage):
self.myWxImage = self.PilImageToWxImage(myPilImage)
if self.isOpenFile(): self.currentImage.Destroy()
self.currentImage = wx.StaticBitmap(parent = self, bitmap = self.myWxImage.ConvertToBitmap())
def isOpenFile(self):
return self.currentImage is not None
def PilImageToWxImage(self, myPilImage):
#reference from http://wiki.wxpython.org/WorkingWithImages
myWxImage = wx.EmptyImage(myPilImage.size[0], myPilImage.size[1])
myWxImage.SetData( myPilImage.convert('RGB').tostring() )
return myWxImage
def quantize(self, inputImg, level):
"""
A function that takes a gray image and a target number of gray levels as input,
and generates the quantized image as output.
Args:
inputImg: A PIL image, mode 'L' (8-bit pixels, black and white)
level: is an integer in [0, 255] defining the number of gray levels of output.
Returns:
A scaled images
"""
outputImg = Image.new("L", inputImg.size)
width, height = inputImg.size
for i in range(width):
for j in range(height):
newValue = int((level * inputImg.getpixel((i, j))/256) * (float(255)/(level-1)))
outputImg.putpixel((i, j), newValue)
return outputImg
def scale(self, inputImg, size):
"""
A function that takes a gray image and a target size as input,
and generates the scaled image as output.
Args:
inputImg: A PIL image, mode 'L' (8-bit pixels, black and white)
size: a tuple of (width, height) defining the spatial resolution of output.
Returns:
A scaled images
"""
originWidth, originHeight = inputImg.size
targetWidth = size[0]
targetHeight = size[1]
outputImg = Image.new("L", size)
for i in range(targetWidth):
for j in range(targetHeight):
x = i * (float(originWidth)/targetWidth)
y = j * (float(originHeight)/targetHeight)
srcx = int(x) #origin matrix index x
srcy = int(y) #origin matrix index y
u = x - srcx
v = y - srcy
if srcx == originWidth-1 or srcy == originHeight-1:
newValue = inputImg.getpixel((srcx, srcy))
else:
newValue = (1-u)*(1-v)*inputImg.getpixel((srcx, srcy)) + (1-u)*v*inputImg.getpixel((srcx, srcy+1))\
+ u*(1-v)*inputImg.getpixel((srcx+1, srcy)) + u*v*inputImg.getpixel((srcx+1, srcy+1))
outputImg.putpixel((i, j), newValue)
return outputImg
def plot_hist(self, inputImg):
pixelGrayProbability = self.calPixelGrayProbaility(inputImg)
for i in range(256):
plt.plot([i, i], [0, pixelGrayProbability[i]], linewidth=3, color='k')
plt.axis([0,255, 0,0.1])
plt.show()
def calPixelGrayProbaility(self, inputImg):
pixelGrayCount = [0] * 256;
pixelGrayProbability = [0] * 256
width, height = inputImg.size
for i in range(width):
for j in range(height):
pixelGrayCount[(int)(inputImg.getpixel((i, j)))] += 1
pixelNum = width * height
for i in range(256):
pixelGrayProbability[i] = (float)(pixelGrayCount[i])/pixelNum
return pixelGrayProbability
def equalize_hist(self, inputImg):
"""
A function that applies histogram equalization on a gray scale image
Args:
inputImg: A PIL image, mode 'L' (8-bit pixels, black and white)
Returns:
Returning A PIL image, a gray scale image whose histogram is approximately flat.
"""
pixelGrayProbability = self.calPixelGrayProbaility(inputImg)
transform = [0] * 256
transform[0] = pixelGrayProbability[0]
for i in range(1, 256):
transform[i] = transform[i-1] + pixelGrayProbability[i]
outputImg = Image.new("L", inputImg.size)
width, height = inputImg.size
for i in range(width):
for j in range(height):
#原来的像素灰度值在累计概率分布函数中的值 * 255 等于新的灰度值
newValue = (int)(255 * transform[inputImg.getpixel((i, j))])
outputImg.putpixel((i, j), newValue)
return outputImg
def filter2d(self, inputImg, filterMatrix):
filterSize = len(filterMatrix)
fliterValueSum = self.calFilterValueSum(filterMatrix)
# filterType : 0 when it's a averaging filter
# 1 when it's a laplacian filter
filterType = (0 if fliterValueSum != 0 else 1)
padSize = (filterSize-1)/2
extendImg = self.addPadToImage(inputImg, padSize)
outputImg = Image.new("L", inputImg.size)
originWidth, originHeigh = inputImg.size
width, height = outputImg.size
for i in xrange(width):
for j in xrange(height):
patchMatrix = self.getPatchMatrix(i+padSize, j+padSize, extendImg, filterSize)
innerProduct = self.calInnerProduct(patchMatrix, filterMatrix)
if filterType == 0:
newValue = innerProduct/fliterValueSum
elif filterType == 1:
newValue = inputImg.getpixel((i, j)) + innerProduct
if newValue > 255:
newValue = 255
elif newValue < 0:
newValue = 0
outputImg.putpixel((i, j), newValue)
return outputImg
def filter2dWithHighBoost(self, inputImg, filterMatrix, weight_k):
"""
When weight_k = 1, we have unsharp masking
When weight_k > 1, the process is referred to as highboost filtering
When weight_k < 1 de-emphasizes the contribution of the unsharp mask
"""
blurImg = self.filter2d(inputImg, filterMatrix)
maskImg = self.subtractImg(inputImg, blurImg);
highBoostImg = Image.new("L", inputImg.size)
width, height = highBoostImg.size
for i in range(width):
for j in range(height):
value = inputImg.getpixel((i, j)) + weight_k * maskImg.getpixel((i, j))
highBoostImg.putpixel((i, j), value)
return highBoostImg
def subtractImg(self, minuendImg, subtrahendImg):
width, height = minuendImg.size
maskImg = Image.new("L", (width, height))
for i in range(width):
for j in range(height):
value = minuendImg.getpixel((i, j)) - subtrahendImg.getpixel((i, j))
maskImg.putpixel((i, j), value)
return maskImg
def calInnerProduct(self, matrix_A, matrix_B):
if (len(matrix_A) != len(matrix_B)):
print "The size of matrix is not equal !"
return
size = len(matrix_A)
innerProduct = 0
for i in range(size):
for j in range(size):
innerProduct += matrix_A[i][j] * matrix_B[i][j]
return innerProduct
def calFilterValueSum(self, filterMatrix):
fliterValueSum = 0
filterSize = len(filterMatrix)
for i in range(filterSize):
for j in range(filterSize):
fliterValueSum += filterMatrix[i][j]
return fliterValueSum
def getPatchMatrix(self, patchPos_x, patchPos_y, extendImg, filterSize):
padSize = (filterSize-1)/2
patchMatrix = []
patchLeftTop_x = patchPos_x - padSize
patchLeftTop_y = patchPos_y - padSize
for i in range(filterSize):
column = []
for j in range(filterSize):
column.append(extendImg.getpixel((patchLeftTop_x+i, patchLeftTop_y+j)))
patchMatrix.append(column)
return patchMatrix
def addPadToImage(self, inputImg, padSize):
"""
A function to add pad to the image
Args:
input_img: A PIL image, mode 'L' (8-bit pixels, black and white)
padSize: the size of pad
Returns:
A extend PIL image
"""
inputImgWidth, inputImgHeight = inputImg.size
extendImgWidth = inputImgWidth + padSize*2
extendImgHeight = inputImgHeight + padSize*2
extendImg = Image.new("L", (extendImgWidth, extendImgHeight))
extendImg.paste(inputImg, (padSize, padSize))
return extendImg
"""
HW3: Filtering in the Frequency Domain
"""
def DFT(self, e):
if not self.isOpenFile(): return
eventId = e.GetId()
imgArr = np.array(self.pilImage)
if eventId == self.DFT_BASE_ID:
centralImgArr = self.centralization(imgArr)
dftArr = self.dft2d(centralImgArr, 1)
resultArr = self.convertArrValueTo0_255(np.log(abs(dftArr) + 1))
elif eventId == self.IDFT_BASE_ID:
dftArr = self.dft2d(imgArr, 1)
resultArr = self.dft2d(dftArr, -1).real
elif eventId == self.FFT_BASE_ID:
centralImgArr = self.centralization(imgArr)
fftArr = self.fft2d(centralImgArr, 1)
resultArr = self.convertArrValueTo0_255(np.log(abs(dftArr) + 1))
elif eventId == self.IFFT_BASE_ID:
fftArr = self.fft2d(imgArr, 1)
resultArr = self.fft2d(fftArr, -1).real
resultArr = resultArr[:imgArr.shape[0], :imgArr.shape[1]]
#下面这一句会出现奇怪的错误..
# self.modifiedPIL = Image.fromarray(np.trunc(resultArr), mode='L')
self.modifiedPIL = self.getPILImgFromArray(resultArr)
self.setImageWithPIL(self.modifiedPIL)
def dft2d(self, imgArr, flags):
"""
A function to perform 2-D Discrete Fourier Transform (DFT)
or Inverse Discrete Fourier Transform (IDFT)
Args:
imgArr: A PIL image, mode 'L' (8-bit pixels, black and white)
flags: Integer, 1 for DFT, -1 for IDFT
Returns:
returning the DFT / IDFT result of the given input, a PIL image
"""
row, col = imgArr.shape
maxSize = max(col, row)
xColVec = np.array(range(maxSize)).reshape(maxSize, 1)
yRowVec = np.array(range(maxSize)).reshape(1, maxSize)
uxvyArr = np.dot(xColVec, yRowVec) * 2 * np.pi
e_vy = (uxvyArr[:col, :col].copy())/col
e_vy = np.cos(e_vy) - flags * np.sin(e_vy) * 1j
e_ux = (uxvyArr[:row, :row].copy())/row
e_ux = np.cos(e_ux) - flags * np.sin(e_ux) * 1j
F_xv = np.dot(imgArr, e_vy)
F_uv = np.dot(e_ux, F_xv)
if flags == -1:
F_uv *= 1.0/(row * col)
return F_uv
def fft2d(self, imgArr, flags):
"""
A function to perform 2-D Fast Fourier Transform (FFT)
or Inverse Fast Fourier Transform (IFFT)
Args:
imgArr: A PIL image, mode 'L' (8-bit pixels, black and white)
flags: Integer, 1 for FFT, -1 for IFFT
Returns:
returning the FFT / IFFT result of the given input, a PIL image
"""
padImgArr = self.getPadImgArr(imgArr)
row, col = padImgArr.shape
resultArr = np.array([0j] * row * col).reshape(row, col)
for i in range(row):
resultArr[i] = self.fft1d(padImgArr[i], flags)
for j in range(col):
resultArr[0:row, j] = self.fft1d(resultArr[0:row, j], flags)
if flags == -1:
resultArr /= (row * col)
return resultArr
def fft1d(self, oneDimArr, flags):
#非递归版本, 失败
# N = oneDimArr.shape[0]
# n = np.arange(N)
# k = n[:, None]
# M = np.exp(flags * 2j * np.pi * n * k / N)
# X = np.dot(M, oneDimArr.reshape((N, -1)))
# while X.shape[0] < N:
# X_even = X[:, :X.shape[1]/2]
# X_odd = X[:, X.shape[1]/2:]
# factor = np.exp(flags * 1j * np.pi * np.arange(X.shape[0])
# / X.shape[0])[:, None]
# X = np.vstack([X_even] + factor * X_odd,
# [X_even] - factor * X_odd)
# return X
#递归版本
N = oneDimArr.shape[0]
if N == 1: return oneDimArr
X_even = self.fft1d(oneDimArr[::2], flags)
X_odd = self.fft1d(oneDimArr[1::2], flags)
factor = np.exp(flags* 2j * np.pi * np.arange(N) / N)
return np.concatenate([X_even + factor[:N / 2] * X_odd,
X_even + factor[N / 2:] * X_odd])
def getPadImgArr(self, imgArr):
row, col = imgArr.shape
temp = imgArr[:row][:col].copy()
M = 0
N = 0
while 2**M < row:
M += 1
while 2**N < col:
N += 1
padDownLen = 2**M - row
padRightLen = 2**N - col
return np.lib.pad(temp, ((0, padDownLen), (0, padRightLen)), 'constant', constant_values=0)
def centralization(self, imgArr):
#imgArr 里面的每一个像素点的值都是0~255,超出的话会溢出
row, col = imgArr.shape
newArr = imgArr.copy()
for x in range(row):
for y in range(col):
newArr[x][y] = imgArr[x][y] * (-1) ** (x + y)
return newArr
def convertArrValueTo0_255(self, imgArr):
maxNum = imgArr.max()
minNum = imgArr.min()
delta = maxNum - minNum
return ((imgArr - minNum) * 255 / delta).astype(int)
def filterInFreq(self, e):
if not self.isOpenFile(): return
eventId = e.GetId()
imgArr = np.array(self.pilImage)
if eventId == self.FILTER_FREQ_AVER_7X7_ID:
filterMatrix = self.filter7x7_49
resultArr = self.filter2d_in_freq(imgArr, filterMatrix, 1)
elif eventId == self.FILTER_FREQ_LAPLA_ID:
filterMatrix = self.laplacian_freq
resultArr = self.filter2d_in_freq(imgArr, filterMatrix, -1)
self.modifiedPIL = self.getPILImgFromArray(resultArr)
self.setImageWithPIL(self.modifiedPIL)
def filter2d_in_freq(self, imgArr, filterMatrix, flag):
"""
A function that performs filtering in the frequency domain
Args:
imgArr: A PIL image, mode 'L' (8-bit pixels, black and white)
filterMatrix: A filter, array
flags: Integer, 1 for D averaging filter, -1 for Laplacian filter
Returns:
returning the filtered result of the given input, a PIL image
"""
imgRows, imgCols = imgArr.shape
filterRows, filterCols = filterMatrix.shape
# padImg = imgArr
padImg = self.padImgWithZero(imgArr, imgRows+filterRows-1, imgCols+filterCols-1)
# padImg = self.padFilterWithZero(imgArr,
# imgRows+filterRows-1, imgCols+filterCols-1)
padFilter = self.padFilterWithZero(filterMatrix,
imgRows+filterRows-1, imgCols+filterCols-1)
# padFilter = self.padImgWithZero(filterMatrix,
# imgRows, imgCols)
dftImgArr = self.dft2d(padImg, 1)
dftFilterArr = self.dft2d(padFilter, 1)
G = dftFilterArr * dftImgArr
G = self.centralization(G)
resultArr = (self.dft2d(G, -1).real)[:imgRows, :imgCols]
if (flag == -1):
resultArr += imgArr
return resultArr
def padImgWithZero(self, imgArr, targetRowsLen, targetColsLen):
row, col = imgArr.shape
padDownLen = targetRowsLen - row
padRightLen = targetColsLen - col
return np.lib.pad(imgArr, ((0, padDownLen), (0, padRightLen)),
'constant', constant_values=0)
def padFilterWithZero(self, filterArr, targetRowsLen, targetColsLen):
row, col = filterArr.shape
padUpLen = (targetRowsLen - row +1)/2
padDownLen = (targetRowsLen - row)/2
padLeftLen = (targetColsLen - col + 1)/2
padRightLen = (targetColsLen - col)/2
return np.lib.pad(filterArr, ((padUpLen, padDownLen), (padLeftLen, padRightLen)),
'constant', constant_values=0)
def getPILImgFromArray(self, imgArr):
height, width = imgArr.shape
newImg = Image.new('L', (width, height))
for i in range(width):
for j in range(height):
newImg.putpixel((i,j), imgArr[j][i])
return newImg | mit |
tmills/neural-assertion | scripts/keras/singletask/assertion_split_cnn_train.py | 1 | 7771 | #!#!/usr/bin/env python
from keras.callbacks import EarlyStopping
from keras.models import Sequential, Model
from keras.layers import Input, Dense, Dropout, Activation, Embedding, Merge, Convolution1D, Lambda
from keras.optimizers import SGD
from keras.utils import np_utils
#from sklearn.datasets import load_svmlight_file
import sklearn as sk
import sklearn.cross_validation
import numpy as np
import ctakesneural.io.cleartk_io as ctk_io
import sys
import os.path
import pickle
from zipfile import ZipFile
from ctakesneural.models.nn_models import max_1d, get_mlp_optimizer
## From no early stoping:
## Best config: {'layers': (2048,), 'embed_dim': 50, 'filters': (2, 3), 'batch_size': 64, 'num_filters': (256, 64, 32)}
#nb_epoch = 80
#batch_size=64
#num_filters=(256,64,32)
#layers=(2048,)
#embed_dim=50
#width=(2,3)
## These values from the results with early stopping enabled:
nb_epoch = 80
batch_size = 32
num_filters = (32,32,64)
layers = (512,)
embed_dim = 50
width = (3,)
def main(args):
if len(args) < 1:
sys.stderr.write("Error - one required argument: <data directory>\n")
sys.exit(-1)
working_dir = args[0]
print("Reading data...")
## Read data such that each instance is a list of three sections: entity terms, left context, right context
Y, label_alphabet, X_array, feature_alphabet = ctk_io.read_token_sequence_data(working_dir)
X_segments, dimensions = split_entity_data(X_array, feature_alphabet)
Y_array = np.array(Y)
Y_adj, indices = ctk_io.flatten_outputs(Y_array)
model = get_split_cnn(dimensions, len(feature_alphabet), embed_dim, num_filters, layers, num_outputs = 1 if len(label_alphabet) <=2 else len(label_alphabet) )
model.fit(X_segments, Y_adj,
nb_epoch=nb_epoch,
batch_size=batch_size,
verbose=1,
validation_split=0.2,
callbacks=[get_early_stopper()])
model.summary()
model.save(os.path.join(working_dir, 'model.h5'), overwrite=True)
fn = open(os.path.join(working_dir, 'alphabets.pkl'), 'w')
pickle.dump( (feature_alphabet, label_alphabet), fn)
fn.close()
with ZipFile(os.path.join(working_dir, 'script.model'), 'w') as myzip:
myzip.write(os.path.join(working_dir, 'model.h5'), 'model.h5')
myzip.write(os.path.join(working_dir, 'alphabets.pkl'), 'alphabets.pkl')
def get_early_stopper():
return EarlyStopping(monitor='val_loss', patience=2, verbose=0, mode='auto')
def get_split_cnn(dimensions, vocab_size, embed_dim, conv_layers, fc_layers, num_outputs=1, filter_widths=(2,3,4)):
## Build model:
input_0 = Input(shape=(dimensions[0][1],), dtype='int32', name='Entity_Input')
input_1 = Input(shape=(dimensions[1][1],), dtype='int32', name='Left context input')
input_2 = Input(shape=(dimensions[2][1],), dtype='int32', name='Right context input')
embed = Embedding(input_dim=vocab_size, output_dim=embed_dim) #, input_length=dimensions[0][1])
## First input:
x0 = embed(input_0)
convs = []
for width in filter_widths:
conv = Convolution1D(conv_layers[0], width, activation='relu', init='uniform')(x0)
pooled = Lambda(max_1d, output_shape=(conv_layers[0],))(conv)
convs.append(pooled)
if len(convs) > 1:
x0 = Merge(mode='concat') (convs)
else:
x0 = convs[0]
## Second input:
x1 = embed(input_1)
convs = []
for width in filter_widths:
conv = Convolution1D(conv_layers[1], width, activation='relu', init='uniform')(x1)
pooled = Lambda(max_1d, output_shape=(conv_layers[1],))(conv)
convs.append(pooled)
if len(convs) > 1:
x1 = Merge(mode='concat') (convs)
else:
x1 = convs[0]
## Third input:
x2 = embed(input_2)
convs = []
for width in filter_widths:
conv = Convolution1D(conv_layers[2], width, activation='relu', init='uniform')(x2)
pooled = Lambda(max_1d, output_shape=(conv_layers[2],))(conv)
convs.append(pooled)
if len(convs) > 1:
x2 = Merge(mode='concat') (convs)
else:
x2 = convs[0]
## Merge three streams after convolution layers:
x = Merge(mode='concat')([x0, x1, x2])
## Fully connected layer after convolutions:
for num_nodes in fc_layers:
x = Dense(num_nodes, init='uniform')(x)
x = Activation('relu')(x)
x = Dropout(0.5)(x)
## Output layer
out_name = "Output"
if num_outputs == 1:
output = Dense(1, init='uniform', activation='sigmoid', name=out_name)(x)
loss = 'binary_crossentropy'
else:
output = Dense(num_outputs, init='uniform', activation='softmax', name=out_name)(x)
loss='categorical_crossentropy'
sgd = get_mlp_optimizer()
model = Model(input=[input_0, input_1, input_2], output=output)
model.compile(optimizer = sgd,
loss = loss)
return model
def split_entity_data(input_array, alphabet, input_shape=None):
''' Takes input in the form BEFORE_CONTEXT_0 ... BEFORE_CONTEXT_N-1 <e> entity_term_0 ... entity_term_M-1 </e> AFTER_CONTEXT_0 ... AFTER_CONTEXT_Q-1
represented as a list of lists of int-mapped tokens, and turns it into three matrices of int-mapped tokens, first entity tokens, then left contexts,
then right contexts.
X0, X1, X2 = split_entity_data(input_array, feature_alphabet)
this function needs the feature_alphabet so it can look up the special tokens <e> and </e> that delimit the entity
'''
num_insts, num_toks = input_array.shape
dimensions = []
X0 = []
X1 = []
X2 = []
for inst_ind in range(num_insts):
left_context = []
entity = []
right_context = []
tok_ind = 0
while input_array[inst_ind][tok_ind] != alphabet["<e>"]:
left_context.append(input_array[inst_ind][tok_ind])
tok_ind += 1
tok_ind += 1 ### Skip past <e>
while input_array[inst_ind][tok_ind] != alphabet["</e>"]:
entity.append(input_array[inst_ind][tok_ind])
tok_ind += 1
tok_ind += 1 ### Skip past </e>
while tok_ind < num_toks:
### Special case for right-context, because input is padded to the right
## with zeros, we can delete those. Right context should always have a
## constant number of features.
if input_array[inst_ind][tok_ind] == 0:
break
right_context.append(input_array[inst_ind][tok_ind])
tok_ind += 1
X0.append(entity)
X1.append(left_context)
X2.append(right_context)
if not input_shape is None:
## add pseudo instance with padding along dimensions
entity_shape, left_shape, right_shape = input_shape
X0.append( [0 for x in range(entity_shape[1])])
X1.append( [0 for x in range(left_shape[1])])
X2.append( [0 for x in range(right_shape[1])])
## Only need to pad X0 for this application -- maybe generalize in case of other use cases.
ctk_io.pad_instances(X0)
ctk_io.pad_instances(X2)
dimensions.append( (num_insts, max([len(x) for x in X0])))
dimensions.append( (num_insts, max([len(x) for x in X1])))
dimensions.append( (num_insts, max([len(x) for x in X2])))
if not input_shape is None:
## after padding delete pseudo instance
X0 = [X0[0]]
X1 = [X1[0]]
X2 = [X2[0]]
return [np.array(X0), np.array(X1), np.array(X2)], dimensions
if __name__ == "__main__":
main(sys.argv[1:])
| apache-2.0 |
rexshihaoren/scikit-learn | sklearn/tests/test_random_projection.py | 142 | 14033 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from sklearn.metrics import euclidean_distances
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import gaussian_random_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.random_projection import SparseRandomProjection
from sklearn.random_projection import GaussianRandomProjection
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils import DataDimensionalityWarning
all_sparse_random_matrix = [sparse_random_matrix]
all_dense_random_matrix = [gaussian_random_matrix]
all_random_matrix = set(all_sparse_random_matrix + all_dense_random_matrix)
all_SparseRandomProjection = [SparseRandomProjection]
all_DenseRandomProjection = [GaussianRandomProjection]
all_RandomProjection = set(all_SparseRandomProjection +
all_DenseRandomProjection)
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros):
rng = np.random.RandomState(0)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def densify(matrix):
if not sp.issparse(matrix):
return matrix
else:
return matrix.toarray()
n_samples, n_features = (10, 1000)
n_nonzeros = int(n_samples * n_features / 100.)
data, data_csr = make_sparse_random_data(n_samples, n_features, n_nonzeros)
###############################################################################
# test on JL lemma
###############################################################################
def test_invalid_jl_domain():
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 1.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 0.0)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, -0.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 0, 0.5)
def test_input_size_jl_min_dim():
assert_raises(ValueError, johnson_lindenstrauss_min_dim,
3 * [100], 2 * [0.9])
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 3 * [100],
2 * [0.9])
johnson_lindenstrauss_min_dim(np.random.randint(1, 10, size=(10, 10)),
0.5 * np.ones((10, 10)))
###############################################################################
# tests random matrix generation
###############################################################################
def check_input_size_random_matrix(random_matrix):
assert_raises(ValueError, random_matrix, 0, 0)
assert_raises(ValueError, random_matrix, -1, 1)
assert_raises(ValueError, random_matrix, 1, -1)
assert_raises(ValueError, random_matrix, 1, 0)
assert_raises(ValueError, random_matrix, -1, 0)
def check_size_generated(random_matrix):
assert_equal(random_matrix(1, 5).shape, (1, 5))
assert_equal(random_matrix(5, 1).shape, (5, 1))
assert_equal(random_matrix(5, 5).shape, (5, 5))
assert_equal(random_matrix(1, 1).shape, (1, 1))
def check_zero_mean_and_unit_norm(random_matrix):
# All random matrix should produce a transformation matrix
# with zero mean and unit norm for each columns
A = densify(random_matrix(10000, 1, random_state=0))
assert_array_almost_equal(0, np.mean(A), 3)
assert_array_almost_equal(1.0, np.linalg.norm(A), 1)
def check_input_with_sparse_random_matrix(random_matrix):
n_components, n_features = 5, 10
for density in [-1., 0.0, 1.1]:
assert_raises(ValueError,
random_matrix, n_components, n_features, density=density)
def test_basic_property_of_random_matrix():
# Check basic properties of random matrix generation
for random_matrix in all_random_matrix:
yield check_input_size_random_matrix, random_matrix
yield check_size_generated, random_matrix
yield check_zero_mean_and_unit_norm, random_matrix
for random_matrix in all_sparse_random_matrix:
yield check_input_with_sparse_random_matrix, random_matrix
random_matrix_dense = \
lambda n_components, n_features, random_state: random_matrix(
n_components, n_features, random_state=random_state,
density=1.0)
yield check_zero_mean_and_unit_norm, random_matrix_dense
def test_gaussian_random_matrix():
# Check some statical properties of Gaussian random matrix
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
# a_ij ~ N(0.0, 1 / n_components).
#
n_components = 100
n_features = 1000
A = gaussian_random_matrix(n_components, n_features, random_state=0)
assert_array_almost_equal(0.0, np.mean(A), 2)
assert_array_almost_equal(np.var(A, ddof=1), 1 / n_components, 1)
def test_sparse_random_matrix():
# Check some statical properties of sparse random matrix
n_components = 100
n_features = 500
for density in [0.3, 1.]:
s = 1 / density
A = sparse_random_matrix(n_components,
n_features,
density=density,
random_state=0)
A = densify(A)
# Check possible values
values = np.unique(A)
assert_in(np.sqrt(s) / np.sqrt(n_components), values)
assert_in(- np.sqrt(s) / np.sqrt(n_components), values)
if density == 1.0:
assert_equal(np.size(values), 2)
else:
assert_in(0., values)
assert_equal(np.size(values), 3)
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
#
# - -sqrt(s) / sqrt(n_components) with probability 1 / 2s
# - 0 with probability 1 - 1 / s
# - +sqrt(s) / sqrt(n_components) with probability 1 / 2s
#
assert_almost_equal(np.mean(A == 0.0),
1 - 1 / s, decimal=2)
assert_almost_equal(np.mean(A == np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.mean(A == - np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == 0.0, ddof=1),
(1 - 1 / s) * 1 / s, decimal=2)
assert_almost_equal(np.var(A == np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == - np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
###############################################################################
# tests on random projection transformer
###############################################################################
def test_sparse_random_projection_transformer_invalid_density():
for RandomProjection in all_SparseRandomProjection:
assert_raises(ValueError,
RandomProjection(density=1.1).fit, data)
assert_raises(ValueError,
RandomProjection(density=0).fit, data)
assert_raises(ValueError,
RandomProjection(density=-0.1).fit, data)
def test_random_projection_transformer_invalid_input():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').fit, [0, 1, 2])
assert_raises(ValueError,
RandomProjection(n_components=-10).fit, data)
def test_try_to_transform_before_fit():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').transform, data)
def test_too_many_samples_to_find_a_safe_embedding():
data, _ = make_sparse_random_data(1000, 100, 1000)
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=0.1)
expected_msg = (
'eps=0.100000 and n_samples=1000 lead to a target dimension'
' of 5920 which is larger than the original space with'
' n_features=100')
assert_raise_message(ValueError, expected_msg, rp.fit, data)
def test_random_projection_embedding_quality():
data, _ = make_sparse_random_data(8, 5000, 15000)
eps = 0.2
original_distances = euclidean_distances(data, squared=True)
original_distances = original_distances.ravel()
non_identical = original_distances != 0.0
# remove 0 distances to avoid division by 0
original_distances = original_distances[non_identical]
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=eps, random_state=0)
projected = rp.fit_transform(data)
projected_distances = euclidean_distances(projected, squared=True)
projected_distances = projected_distances.ravel()
# remove 0 distances to avoid division by 0
projected_distances = projected_distances[non_identical]
distances_ratio = projected_distances / original_distances
# check that the automatically tuned values for the density respect the
# contract for eps: pairwise distances are preserved according to the
# Johnson-Lindenstrauss lemma
assert_less(distances_ratio.max(), 1 + eps)
assert_less(1 - eps, distances_ratio.min())
def test_SparseRandomProjection_output_representation():
for SparseRandomProjection in all_SparseRandomProjection:
# when using sparse input, the projected data can be forced to be a
# dense numpy array
rp = SparseRandomProjection(n_components=10, dense_output=True,
random_state=0)
rp.fit(data)
assert isinstance(rp.transform(data), np.ndarray)
sparse_data = sp.csr_matrix(data)
assert isinstance(rp.transform(sparse_data), np.ndarray)
# the output can be left to a sparse matrix instead
rp = SparseRandomProjection(n_components=10, dense_output=False,
random_state=0)
rp = rp.fit(data)
# output for dense input will stay dense:
assert isinstance(rp.transform(data), np.ndarray)
# output for sparse output will be sparse:
assert sp.issparse(rp.transform(sparse_data))
def test_correct_RandomProjection_dimensions_embedding():
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto',
random_state=0,
eps=0.5).fit(data)
# the number of components is adjusted from the shape of the training
# set
assert_equal(rp.n_components, 'auto')
assert_equal(rp.n_components_, 110)
if RandomProjection in all_SparseRandomProjection:
assert_equal(rp.density, 'auto')
assert_almost_equal(rp.density_, 0.03, 2)
assert_equal(rp.components_.shape, (110, n_features))
projected_1 = rp.transform(data)
assert_equal(projected_1.shape, (n_samples, 110))
# once the RP is 'fitted' the projection is always the same
projected_2 = rp.transform(data)
assert_array_equal(projected_1, projected_2)
# fit transform with same random seed will lead to the same results
rp2 = RandomProjection(random_state=0, eps=0.5)
projected_3 = rp2.fit_transform(data)
assert_array_equal(projected_1, projected_3)
# Try to transform with an input X of size different from fitted.
assert_raises(ValueError, rp.transform, data[:, 1:5])
# it is also possible to fix the number of components and the density
# level
if RandomProjection in all_SparseRandomProjection:
rp = RandomProjection(n_components=100, density=0.001,
random_state=0)
projected = rp.fit_transform(data)
assert_equal(projected.shape, (n_samples, 100))
assert_equal(rp.components_.shape, (100, n_features))
assert_less(rp.components_.nnz, 115) # close to 1% density
assert_less(85, rp.components_.nnz) # close to 1% density
def test_warning_n_components_greater_than_n_features():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
assert_warns(DataDimensionalityWarning,
RandomProjection(n_components=n_features + 1).fit, data)
def test_works_with_sparse_data():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
rp_dense = RandomProjection(n_components=3,
random_state=1).fit(data)
rp_sparse = RandomProjection(n_components=3,
random_state=1).fit(sp.csr_matrix(data))
assert_array_almost_equal(densify(rp_dense.components_),
densify(rp_sparse.components_))
| bsd-3-clause |
calatre/epidemics_network | plt/SIR 1 plot_maxs.py | 1 | 1093 | # 2016/2017 Project - Andre Calatre, 73207
# "Simulation of an epidemic" - 16/5/2017
# Plotting Multiple Simulations of a SIR Epidemic Model
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
#Choosing the values for c and r to study
cvalues = [0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1]#
rvalues = [0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1]#
maxs = pd.read_csv('infection maxima.csv', index_col = 0)
x = maxs.columns
y = maxs.index
X,Y = np.meshgrid(x,y)
Z = maxs
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(X, Y, Z)
#fig = plt.figure()
#ax = fig.gca(projection='3d')
#surf = ax.plot_surface(X, Y, maxs)
#surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.coolwarm,
# linewidth=0, antialiased=False)
#ax.set_zlim(-1.01, 1.01)
#ax.zaxis.set_major_locator(LinearLocator(10))
#ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
#fig.colorbar(surf, shrink=0.5, aspect=5)
#plt.title('Original Code')
plt.show()
print(maxs)
| apache-2.0 |
jplusplus/statscraper | tests/scrapertests/test_smhi_scraper.py | 1 | 3428 | # encoding: utf-8
from unittest import TestCase
import pandas as pd
from statscraper.scrapers.SMHIScraper import SMHI, Collection, API, SMHIDataset, Station
class TestSMHI(TestCase):
def setUp(self):
"""Setting up scraper."""
def test_fetch_api(self):
scraper = SMHI()
apis = scraper.items
self.assertTrue(len(apis) > 0)
api = apis[0]
self.assertTrue(api, API)
self.assertFalse(api.label is None)
self.assertFalse(api.url is None)
self.assertTrue(isinstance(api.json, dict))
def test_fetch_dataset(self):
u"""Moving to an “API”."""
scraper = SMHI()
api = scraper.get("Meteorological Observations")
for dataset in api:
self.assertFalse(dataset.label is None)
self.assertFalse(dataset.url is None)
# Make sure its a dataset
self.assertTrue(isinstance(dataset, SMHIDataset))
# Get dimensions
self.assertGreater(len(dataset.dimensions), 0)
def test_fetch_allowed_values(self):
scraper = SMHI()
api = scraper.get("Meteorological Observations")
dataset = api.items[0]
stations = dataset.dimensions["station"].allowed_values
active_stations = [x for x in dataset.dimensions["station"].active_stations()]
self.assertTrue(len(stations)>0)
self.assertTrue(len(active_stations)>0)
station = dataset.dimensions["station"].allowed_values.get_by_label(u"Växjö A")
self.assertTrue(isinstance(station, Station))
self.assertFalse(station.label is None)
periods = dataset.dimensions["period"].allowed_values
self.assertEqual(len(periods), 4)
def test_query(self):
scraper = SMHI()
api = scraper.get("Meteorological Observations")
dataset = api.items[0]
data = dataset.fetch({"station": u"Växjö A", "period": "latest-months"})
self.assertTrue(len(data) > 0)
def test_get_stations_list(self):
scraper = SMHI()
api = scraper.get("Meteorological Observations")
dataset = api.items[0]
stations = dataset.get_stations_list()
self.assertTrue(len(stations) > 0)
for station in stations:
self.assertTrue("longitude" in station)
active_stations = dataset.get_active_stations_list()
self.assertTrue(len(active_stations) > 0)
self.assertTrue(len(stations) > len(active_stations))
def test_iterate_queries(self):
# Make same query to multiple datasets
scraper = SMHI()
api = scraper.get("Meteorological Observations")
datasets = [
u"Nederbördsmängd, summa, 1 gång per månad",
u"Lufttemperatur, medel, 1 gång per månad",
]
dfs = []
for dataset_name in datasets:
query = {
"period": ["corrected-archive"],
"station": "Abisko"
}
res = api.get(dataset_name).fetch(query)
dfs.append(res.pandas)
# Merge the two resultsets to one dataframe
df = pd.concat(dfs)
# Make sure that both parameters (datasets) are in
# the final dataframe
parameters = df["parameter"].unique()
self.assertTrue(len(parameters) == 2)
for parameter in parameters:
self.assertTrue(parameter in datasets)
| mit |
TuSimple/mxnet | example/multivariate_time_series/src/lstnet.py | 17 | 11583 | # !/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# -*- coding: utf-8 -*-
#Todo: Ensure skip connection implementation is correct
import os
import math
import numpy as np
import pandas as pd
import mxnet as mx
import argparse
import logging
import metrics
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser(description="Deep neural network for multivariate time series forecasting",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--data-dir', type=str, default='../data',
help='relative path to input data')
parser.add_argument('--max-records', type=int, default=None,
help='total records before data split')
parser.add_argument('--q', type=int, default=24*7,
help='number of historical measurements included in each training example')
parser.add_argument('--horizon', type=int, default=3,
help='number of measurements ahead to predict')
parser.add_argument('--splits', type=str, default="0.6,0.2",
help='fraction of data to use for train & validation. remainder used for test.')
parser.add_argument('--batch-size', type=int, default=128,
help='the batch size.')
parser.add_argument('--filter-list', type=str, default="6,12,18",
help='unique filter sizes')
parser.add_argument('--num-filters', type=int, default=100,
help='number of each filter size')
parser.add_argument('--recurrent-state-size', type=int, default=100,
help='number of hidden units in each unrolled recurrent cell')
parser.add_argument('--seasonal-period', type=int, default=24,
help='time between seasonal measurements')
parser.add_argument('--time-interval', type=int, default=1,
help='time between each measurement')
parser.add_argument('--gpus', type=str, default='',
help='list of gpus to run, e.g. 0 or 0,2,5. empty means using cpu. ')
parser.add_argument('--optimizer', type=str, default='adam',
help='the optimizer type')
parser.add_argument('--lr', type=float, default=0.001,
help='initial learning rate')
parser.add_argument('--dropout', type=float, default=0.2,
help='dropout rate for network')
parser.add_argument('--num-epochs', type=int, default=100,
help='max num of epochs')
parser.add_argument('--save-period', type=int, default=20,
help='save checkpoint for every n epochs')
parser.add_argument('--model_prefix', type=str, default='electricity_model',
help='prefix for saving model params')
def build_iters(data_dir, max_records, q, horizon, splits, batch_size):
"""
Load & generate training examples from multivariate time series data
:return: data iters & variables required to define network architecture
"""
# Read in data as numpy array
df = pd.read_csv(os.path.join(data_dir, "electricity.txt"), sep=",", header=None)
feature_df = df.iloc[:, :].astype(float)
x = feature_df.as_matrix()
x = x[:max_records] if max_records else x
# Construct training examples based on horizon and window
x_ts = np.zeros((x.shape[0] - q, q, x.shape[1]))
y_ts = np.zeros((x.shape[0] - q, x.shape[1]))
for n in range(x.shape[0]):
if n + 1 < q:
continue
elif n + 1 + horizon > x.shape[0]:
continue
else:
y_n = x[n + horizon, :]
x_n = x[n + 1 - q:n + 1, :]
x_ts[n-q] = x_n
y_ts[n-q] = y_n
# Split into training and testing data
training_examples = int(x_ts.shape[0] * splits[0])
valid_examples = int(x_ts.shape[0] * splits[1])
x_train, y_train = x_ts[:training_examples], \
y_ts[:training_examples]
x_valid, y_valid = x_ts[training_examples:training_examples + valid_examples], \
y_ts[training_examples:training_examples + valid_examples]
x_test, y_test = x_ts[training_examples + valid_examples:], \
y_ts[training_examples + valid_examples:]
#build iterators to feed batches to network
train_iter = mx.io.NDArrayIter(data=x_train,
label=y_train,
batch_size=batch_size)
val_iter = mx.io.NDArrayIter(data=x_valid,
label=y_valid,
batch_size=batch_size)
test_iter = mx.io.NDArrayIter(data=x_test,
label=y_test,
batch_size=batch_size)
return train_iter, val_iter, test_iter
def sym_gen(train_iter, q, filter_list, num_filter, dropout, rcells, skiprcells, seasonal_period, time_interval):
input_feature_shape = train_iter.provide_data[0][1]
X = mx.symbol.Variable(train_iter.provide_data[0].name)
Y = mx.sym.Variable(train_iter.provide_label[0].name)
# reshape data before applying convolutional layer (takes 4D shape incase you ever work with images)
conv_input = mx.sym.reshape(data=X, shape=(0, 1, q, -1))
###############
# CNN Component
###############
outputs = []
for i, filter_size in enumerate(filter_list):
# pad input array to ensure number output rows = number input rows after applying kernel
padi = mx.sym.pad(data=conv_input, mode="constant", constant_value=0,
pad_width=(0, 0, 0, 0, filter_size - 1, 0, 0, 0))
convi = mx.sym.Convolution(data=padi, kernel=(filter_size, input_feature_shape[2]), num_filter=num_filter)
acti = mx.sym.Activation(data=convi, act_type='relu')
trans = mx.sym.reshape(mx.sym.transpose(data=acti, axes=(0, 2, 1, 3)), shape=(0, 0, 0))
outputs.append(trans)
cnn_features = mx.sym.Concat(*outputs, dim=2)
cnn_reg_features = mx.sym.Dropout(cnn_features, p=dropout)
###############
# RNN Component
###############
stacked_rnn_cells = mx.rnn.SequentialRNNCell()
for i, recurrent_cell in enumerate(rcells):
stacked_rnn_cells.add(recurrent_cell)
stacked_rnn_cells.add(mx.rnn.DropoutCell(dropout))
outputs, states = stacked_rnn_cells.unroll(length=q, inputs=cnn_reg_features, merge_outputs=False)
rnn_features = outputs[-1] #only take value from final unrolled cell for use later
####################
# Skip-RNN Component
####################
stacked_rnn_cells = mx.rnn.SequentialRNNCell()
for i, recurrent_cell in enumerate(skiprcells):
stacked_rnn_cells.add(recurrent_cell)
stacked_rnn_cells.add(mx.rnn.DropoutCell(dropout))
outputs, states = stacked_rnn_cells.unroll(length=q, inputs=cnn_reg_features, merge_outputs=False)
# Take output from cells p steps apart
p = int(seasonal_period / time_interval)
output_indices = list(range(0, q, p))
outputs.reverse()
skip_outputs = [outputs[i] for i in output_indices]
skip_rnn_features = mx.sym.concat(*skip_outputs, dim=1)
##########################
# Autoregressive Component
##########################
auto_list = []
for i in list(range(input_feature_shape[2])):
time_series = mx.sym.slice_axis(data=X, axis=2, begin=i, end=i+1)
fc_ts = mx.sym.FullyConnected(data=time_series, num_hidden=1)
auto_list.append(fc_ts)
ar_output = mx.sym.concat(*auto_list, dim=1)
######################
# Prediction Component
######################
neural_components = mx.sym.concat(*[rnn_features, skip_rnn_features], dim=1)
neural_output = mx.sym.FullyConnected(data=neural_components, num_hidden=input_feature_shape[2])
model_output = neural_output + ar_output
loss_grad = mx.sym.LinearRegressionOutput(data=model_output, label=Y)
return loss_grad, [v.name for v in train_iter.provide_data], [v.name for v in train_iter.provide_label]
def train(symbol, train_iter, valid_iter, data_names, label_names):
devs = mx.cpu() if args.gpus is None or args.gpus is '' else [mx.gpu(int(i)) for i in args.gpus.split(',')]
module = mx.mod.Module(symbol, data_names=data_names, label_names=label_names, context=devs)
module.bind(data_shapes=train_iter.provide_data, label_shapes=train_iter.provide_label)
module.init_params(mx.initializer.Uniform(0.1))
module.init_optimizer(optimizer=args.optimizer, optimizer_params={'learning_rate': args.lr})
for epoch in range(1, args.num_epochs+1):
train_iter.reset()
val_iter.reset()
for batch in train_iter:
module.forward(batch, is_train=True) # compute predictions
module.backward() # compute gradients
module.update() # update parameters
train_pred = module.predict(train_iter).asnumpy()
train_label = train_iter.label[0][1].asnumpy()
print('\nMetrics: Epoch %d, Training %s' % (epoch, metrics.evaluate(train_pred, train_label)))
val_pred = module.predict(val_iter).asnumpy()
val_label = val_iter.label[0][1].asnumpy()
print('Metrics: Epoch %d, Validation %s' % (epoch, metrics.evaluate(val_pred, val_label)))
if epoch % args.save_period == 0 and epoch > 1:
module.save_checkpoint(prefix=os.path.join("../models/", args.model_prefix), epoch=epoch, save_optimizer_states=False)
if epoch == args.num_epochs:
module.save_checkpoint(prefix=os.path.join("../models/", args.model_prefix), epoch=epoch, save_optimizer_states=False)
if __name__ == '__main__':
# parse args
args = parser.parse_args()
args.splits = list(map(float, args.splits.split(',')))
args.filter_list = list(map(int, args.filter_list.split(',')))
# Check valid args
if not max(args.filter_list) <= args.q:
raise AssertionError("no filter can be larger than q")
if not args.q >= math.ceil(args.seasonal_period / args.time_interval):
raise AssertionError("size of skip connections cannot exceed q")
# Build data iterators
train_iter, val_iter, test_iter = build_iters(args.data_dir, args.max_records, args.q, args.horizon, args.splits, args.batch_size)
# Choose cells for recurrent layers: each cell will take the output of the previous cell in the list
rcells = [mx.rnn.GRUCell(num_hidden=args.recurrent_state_size)]
skiprcells = [mx.rnn.LSTMCell(num_hidden=args.recurrent_state_size)]
# Define network symbol
symbol, data_names, label_names = sym_gen(train_iter, args.q, args.filter_list, args.num_filters,
args.dropout, rcells, skiprcells, args.seasonal_period, args.time_interval)
# train cnn model
train(symbol, train_iter, val_iter, data_names, label_names)
| apache-2.0 |
kmather73/zipline | tests/test_versioning.py | 30 | 3539 | #
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pandas
import pickle
from nose_parameterized import parameterized
from unittest import TestCase
from zipline.finance.blotter import Order
from .serialization_cases import (
object_serialization_cases,
assert_dict_equal
)
base_state_dir = 'tests/resources/saved_state_archive'
BASE_STATE_DIR = os.path.join(
os.path.dirname(__file__),
'resources',
'saved_state_archive')
class VersioningTestCase(TestCase):
def load_state_from_disk(self, cls):
state_dir = cls.__module__ + '.' + cls.__name__
full_dir = BASE_STATE_DIR + '/' + state_dir
state_files = \
[f for f in os.listdir(full_dir) if 'State_Version_' in f]
for f_name in state_files:
f = open(full_dir + '/' + f_name, 'r')
yield pickle.load(f)
# Only test versioning in minutely mode right now
@parameterized.expand(object_serialization_cases(skip_daily=True))
def test_object_serialization(self,
_,
cls,
initargs,
di_vars,
comparison_method='dict'):
# The state generated under one version of pandas may not be
# compatible with another. To ensure that tests pass under the travis
# pandas version matrix, we only run versioning tests under the
# current version of pandas. This will need to be updated once we
# change the pandas version on prod.
if pandas.__version__ != '0.12.0':
return
# Make reference object
obj = cls(*initargs)
for k, v in di_vars.items():
setattr(obj, k, v)
# Fetch state
state_versions = self.load_state_from_disk(cls)
for version in state_versions:
# For each version inflate a new object and ensure that it
# matches the original.
newargs = version['newargs']
initargs = version['initargs']
state = version['obj_state']
if newargs is not None:
obj2 = cls.__new__(cls, *newargs)
else:
obj2 = cls.__new__(cls)
if initargs is not None:
obj2.__init__(*initargs)
obj2.__setstate__(state)
for k, v in di_vars.items():
setattr(obj2, k, v)
# The ObjectId generated on instantiation of Order will
# not be the same as the one loaded from saved state.
if cls == Order:
obj.__dict__['id'] = obj2.__dict__['id']
if comparison_method == 'repr':
self.assertEqual(obj.__repr__(), obj2.__repr__())
elif comparison_method == 'to_dict':
assert_dict_equal(obj.to_dict(), obj2.to_dict())
else:
assert_dict_equal(obj.__dict__, obj2.__dict__)
| apache-2.0 |
krischer/pyadjoint | src/pyadjoint/utils.py | 1 | 9017 | #!/usr/bin/env python
# -*- encoding: utf8 -*-
"""
Utility functions for Pyadjoint.
:copyright:
Lion Krischer (krischer@geophysik.uni-muenchen.de), 2015
:license:
BSD 3-Clause ("BSD New" or "BSD Simplified")
"""
from __future__ import absolute_import, division, print_function
import inspect
import os
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import numpy as np
import obspy
EXAMPLE_DATA_PDIFF = (800, 900)
EXAMPLE_DATA_SDIFF = (1500, 1600)
def taper_window(trace, left_border_in_seconds, right_border_in_seconds,
taper_percentage, taper_type, **kwargs):
"""
Helper function to taper a window within a data trace.
This function modifies the passed trace object in-place.
:param trace: The trace to be tapered.
:type trace: :class:`obspy.core.trace.Trace`
:param left_border_in_seconds: The left window border in seconds since
the first sample.
:type left_border_in_seconds: float
:param right_border_in_seconds: The right window border in seconds since
the first sample.
:type right_border_in_seconds: float
:param taper_percentage: Decimal percentage of taper at one end (ranging
from ``0.0`` (0%) to ``0.5`` (50%)).
:type taper_percentage: float
:param taper_type: The taper type, supports anything
:meth:`obspy.core.trace.Trace.taper` can use.
:type taper_type: str
Any additional keyword arguments are passed to the
:meth:`obspy.core.trace.Trace.taper` method.
.. rubric:: Example
>>> import obspy
>>> tr = obspy.read()[0]
>>> tr.plot()
.. plot::
import obspy
tr = obspy.read()[0]
tr.plot()
>>> from pyadjoint.utils import taper_window
>>> taper_window(tr, 4, 11, taper_percentage=0.10, taper_type="hann")
>>> tr.plot()
.. plot::
import obspy
from pyadjoint.utils import taper_window
tr = obspy.read()[0]
taper_window(tr, 4, 11, taper_percentage=0.10, taper_type="hann")
tr.plot()
"""
s, e = trace.stats.starttime, trace.stats.endtime
trace.trim(s + left_border_in_seconds, s + right_border_in_seconds)
trace.taper(max_percentage=taper_percentage, type=taper_type, **kwargs)
trace.trim(s, e, pad=True, fill_value=0.0)
# Enable method chaining.
return trace
def window_taper(signal, taper_percentage, taper_type):
"""
window taper function.
:param signal: time series
:type signal: ndarray(float)
:param taper_percentage: total percentage of taper in decimal
:type taper_percentage: float
return : tapered input ndarray
taper_type:
1, cos
2, cos_p10
3, hann
4, hamming
To do:
with options of more tapers
"""
taper_collection = ('cos', 'cos_p10', 'hann', "hamming")
if taper_type not in taper_collection:
raise ValueError("Window taper not supported")
if taper_percentage < 0 or taper_percentage > 1:
raise ValueError("Wrong taper percentage")
npts = len(signal)
if taper_percentage == 0.0 or taper_percentage == 1.0:
frac = int(npts*taper_percentage / 2.0)
else:
frac = int(npts*taper_percentage / 2.0 + 0.5)
idx1 = frac
idx2 = npts - frac
if taper_type == 'hann':
signal[:idx1] *=\
(0.5 - 0.5 * np.cos(2.0 * np.pi * np.arange(0, frac) /
(2 * frac - 1)))
signal[idx2:] *=\
(0.5 - 0.5 * np.cos(2.0 * np.pi * np.arange(frac, 2 * frac) /
(2 * frac - 1)))
if taper_type == 'hamming':
signal[:idx1] *=\
(0.54 - 0.46 * np.cos(2.0 * np.pi * np.arange(0, frac) /
(2 * frac - 1)))
signal[idx2:] *=\
(0.54 - 0.46 * np.cos(2.0 * np.pi * np.arange(frac, 2 * frac) /
(2 * frac - 1)))
if taper_type == 'cos':
power = 1.
signal[:idx1] *= np.cos(np.pi * np.arange(0, frac) /
(2 * frac - 1) - np.pi / 2.0) ** power
signal[idx2:] *= np.cos(np.pi * np.arange(frac, 2 * frac) /
(2 * frac - 1) - np.pi / 2.0) ** power
if taper_type == 'cos_p10':
power = 10.
signal[:idx1] *= 1. - np.cos(np.pi * np.arange(0, frac) /
(2 * frac - 1)) ** power
signal[idx2:] *= 1. - np.cos(np.pi * np.arange(frac, 2 * frac) /
(2 * frac - 1)) ** power
return signal
def get_example_data():
"""
Helper function returning example data for Pyadjoint.
The returned data is fully preprocessed and ready to be used with Pyflex.
:returns: Tuple of observed and synthetic streams
:rtype: tuple of :class:`obspy.core.stream.Stream` objects
.. rubric:: Example
>>> from pyadjoint.utils import get_example_data
>>> observed, synthetic = get_example_data()
>>> print(observed) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
3 Trace(s) in Stream:
SY.DBO.S3.MXR | 2014-11-15T02:31:50.259999Z - ... | 1.0 Hz, 3600 samples
SY.DBO.S3.MXT | 2014-11-15T02:31:50.259999Z - ... | 1.0 Hz, 3600 samples
SY.DBO.S3.MXZ | 2014-11-15T02:31:50.259999Z - ... | 1.0 Hz, 3600 samples
>>> print(synthetic) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
3 Trace(s) in Stream:
SY.DBO..LXR | 2014-11-15T02:31:50.259999Z - ... | 1.0 Hz, 3600 samples
SY.DBO..LXT | 2014-11-15T02:31:50.259999Z - ... | 1.0 Hz, 3600 samples
SY.DBO..LXZ | 2014-11-15T02:31:50.259999Z - ... | 1.0 Hz, 3600 samples
"""
path = os.path.join(
os.path.dirname(inspect.getfile(inspect.currentframe())),
"example_data")
observed = obspy.read(os.path.join(path, "observed_processed.mseed"))
observed.sort()
synthetic = obspy.read(os.path.join(path, "synthetic_processed.mseed"))
synthetic.sort()
return observed, synthetic
def generic_adjoint_source_plot(observed, synthetic, adjoint_source, misfit,
window, adjoint_source_name):
"""
Generic plotting function for adjoint sources and data.
Many types of adjoint sources can be represented in the same manner.
This is a convenience function that can be called by different
the implementations for different adjoint sources.
:param observed: The observed data.
:type observed: :class:`obspy.core.trace.Trace`
:param synthetic: The synthetic data.
:type synthetic: :class:`obspy.core.trace.Trace`
:param adjoint_source: The adjoint source.
:type adjoint_source: `numpy.ndarray`
:param misfit: The associated misfit value.
:float misfit: misfit value
:param left_window_border: Left border of the window to be tapered in
seconds since the first sample in the data arrays.
:type left_window_border: float
:param right_window_border: Right border of the window to be tapered in
seconds since the first sample in the data arrays.
:type right_window_border: float
:param adjoint_source_name: The name of the adjoint source.
:type adjoint_source_name: str
"""
x_range = observed.stats.endtime - observed.stats.starttime
left_window_border = 60000.
right_window_border = 0.
for wins in window:
left_window_border = min(left_window_border, wins[0])
right_window_border = max(right_window_border, wins[1])
buf = (right_window_border - left_window_border) * 0.3
left_window_border -= buf
right_window_border += buf
left_window_border = max(0, left_window_border)
right_window_border = min(x_range, right_window_border)
plt.subplot(211)
plt.plot(observed.times(), observed.data, color="0.2", label="Observed",
lw=2)
plt.plot(synthetic.times(), synthetic.data, color="#bb474f",
label="Synthetic", lw=2)
for wins in window:
re = patches.Rectangle((wins[0], plt.ylim()[0]),
wins[1] - wins[0],
plt.ylim()[1] - plt.ylim()[0],
color="blue", alpha=0.1)
plt.gca().add_patch(re)
plt.grid()
plt.legend(fancybox=True, framealpha=0.5)
plt.xlim(left_window_border, right_window_border)
ylim = max(map(abs, plt.ylim()))
plt.ylim(-ylim, ylim)
plt.subplot(212)
plt.plot(observed.times(), adjoint_source[::-1], color="#2f8d5b", lw=2,
label="Adjoint Source")
plt.grid()
plt.legend(fancybox=True, framealpha=0.5)
# No time reversal for comparison with data
# plt.xlim(x_range - right_window_border, x_range - left_window_border)
# plt.xlabel("Time in seconds since first sample")
plt.xlim(left_window_border, right_window_border)
plt.xlabel("Time (seconds)")
ylim = max(map(abs, plt.ylim()))
plt.ylim(-ylim, ylim)
plt.suptitle("%s Adjoint Source with a Misfit of %.3g" % (
adjoint_source_name, misfit))
| bsd-3-clause |
stefco/geco_data | geco_diagnostic_plot_pages.py | 1 | 18782 | #!/usr/bin/env python
# (c) Stefan Countryman 2017
DESC="""Plot a list of timing diagnostic channels, which will be read from
stdin as a newline-delimited channel list, for a time window around a given
GPS time. This script can also generate a summary webpage for easy viewing of
plot results. Use this script when there is a timing error to make plots on a
bunch of timing channels to quickly find the source of the problem."""
DT = 30
MAX_SIMULTANEOUS_CHANS = 5
# channels to use if there are none specified via CLI
DEFAULT_CHANNELS=[
"L1:SYS-TIMING_C_GPS_A_ERROR_FLAG",
"L1:SYS-TIMING_C_GPS_A_ERROR_FLAG",
"L1:SYS-TIMING_C_GPS_A_LATITUDE",
"L1:SYS-TIMING_C_GPS_A_LONGITUDE",
"L1:SYS-TIMING_C_GPS_A_ALTITUDE",
"L1:SYS-TIMING_C_GPS_A_SURVEYPROGRESS",
"L1:SYS-TIMING_C_GPS_A_HOLDOVERDURATION",
"L1:SYS-TIMING_C_GPS_A_DACVOLTAGE",
"L1:SYS-TIMING_C_GPS_A_TEMPERATURE",
"L1:SYS-TIMING_C_GPS_A_RECEIVERMODE",
"L1:SYS-TIMING_C_GPS_A_DECODINGSTATUS",
"L1:SYS-TIMING_C_GPS_A_TIMEVALID",
"L1:SYS-TIMING_C_GPS_A_UTCOFFSET",
"L1:SYS-TIMING_C_GPS_A_TIMESOURCE",
"L1:SYS-TIMING_C_GPS_A_PPSSOURCE",
"L1:SYS-TIMING_C_GPS_A_ALMANACINCOMPLETE",
"L1:SYS-TIMING_C_GPS_A_NOTTRACKINGSATTELITES",
"L1:SYS-TIMING_C_GPS_A_SURVEYINPROGRESS",
"L1:SYS-TIMING_C_GPS_A_GPS",
"L1:SYS-TIMING_C_GPS_A_YEAR",
"L1:SYS-TIMING_C_GPS_A_MONTH",
"L1:SYS-TIMING_C_GPS_A_DAY",
"L1:SYS-TIMING_C_GPS_A_HOUR",
"L1:SYS-TIMING_C_GPS_A_MINUTE",
"L1:SYS-TIMING_C_GPS_A_SECOND",
"L1:SYS-TIMING_C_GPS_A_LEAP",
"L1:SYS-TIMING_C_GPS_A_LEAPSECONDPENDING",
"L1:SYS-TIMING_C_GPS_A_WEEK",
"L1:SYS-TIMING_C_GPS_A_TOW",
"L1:SYS-TIMING_C_GPS_A_PPSOFFSET",
"L1:SYS-TIMING_C_GPS_A_DISCIPLININGMODE",
"L1:SYS-TIMING_C_GPS_A_DISCIPLININGACTIVITY",
"L1:SYS-TIMING_C_GPS_A_DACATRAIL",
"L1:SYS-TIMING_C_GPS_A_DACNEARRAIL",
"L1:SYS-TIMING_C_GPS_A_NOSTOREDPOSITION",
"L1:SYS-TIMING_C_GPS_A_POSITIONQUESTIONABLE",
"L1:SYS-TIMING_C_GPS_A_NOPPS",
"L1:SYS-TIMING_C_GPS_A_ANTENNAOPEN",
"L1:SYS-TIMING_C_GPS_A_ANTENNASHORTED",
"L1:SYS-TIMING_C_GPS_A_ERROR_FLAG",
"L1:SYS-TIMING_C_GPS_A_ERROR_CODE",
"L1:SYS-TIMING_X_GPS_A_ERROR_FLAG",
"L1:SYS-TIMING_X_GPS_A_LATITUDE",
"L1:SYS-TIMING_X_GPS_A_LONGITUDE",
"L1:SYS-TIMING_X_GPS_A_ALTITUDE",
"L1:SYS-TIMING_X_GPS_A_SPEED3D",
"L1:SYS-TIMING_X_GPS_A_SPEED2D",
"L1:SYS-TIMING_X_GPS_A_HEADING",
"L1:SYS-TIMING_X_GPS_A_DOP",
"L1:SYS-TIMING_X_GPS_A_VISSATELLITES",
"L1:SYS-TIMING_X_GPS_A_TRACKSATELLITES",
"L1:SYS-TIMING_X_GPS_A_TIMEVALID",
"L1:SYS-TIMING_X_GPS_A_RECEIVERMODE",
"L1:SYS-TIMING_X_GPS_A_UTCOFFSET",
"L1:SYS-TIMING_X_GPS_A_TIMESOURCE",
"L1:SYS-TIMING_X_GPS_A_ALMANACINCOMPLETE",
"L1:SYS-TIMING_X_GPS_A_GPS",
"L1:SYS-TIMING_X_GPS_A_YEAR",
"L1:SYS-TIMING_X_GPS_A_MONTH",
"L1:SYS-TIMING_X_GPS_A_DAY",
"L1:SYS-TIMING_X_GPS_A_HOUR",
"L1:SYS-TIMING_X_GPS_A_MINUTE",
"L1:SYS-TIMING_X_GPS_A_SECOND",
"L1:SYS-TIMING_X_GPS_A_LEAP",
"L1:SYS-TIMING_X_GPS_A_WEEK",
"L1:SYS-TIMING_X_GPS_A_TOW",
"L1:SYS-TIMING_X_GPS_A_NOTTRACKINGSATTELITES",
"L1:SYS-TIMING_X_GPS_A_SURVEYINPROGRESS",
"L1:SYS-TIMING_X_GPS_A_ANTENNAOPEN",
"L1:SYS-TIMING_X_GPS_A_ANTENNASHORTED",
"L1:SYS-TIMING_X_GPS_A_NARROWBAND",
"L1:SYS-TIMING_X_GPS_A_FASTACQUISITION",
"L1:SYS-TIMING_X_GPS_A_FILTERRESET",
"L1:SYS-TIMING_X_GPS_A_POSITIONLOCK",
"L1:SYS-TIMING_X_GPS_A_DIFFERENTIALFIX",
"L1:SYS-TIMING_Y_GPS_A_ERROR_FLAG",
"L1:SYS-TIMING_Y_GPS_A_LATITUDE",
"L1:SYS-TIMING_Y_GPS_A_LONGITUDE",
"L1:SYS-TIMING_Y_GPS_A_ALTITUDE",
"L1:SYS-TIMING_Y_GPS_A_SPEED3D",
"L1:SYS-TIMING_Y_GPS_A_SPEED2D",
"L1:SYS-TIMING_Y_GPS_A_HEADING",
"L1:SYS-TIMING_Y_GPS_A_DOP",
"L1:SYS-TIMING_Y_GPS_A_VISSATELLITES",
"L1:SYS-TIMING_Y_GPS_A_TRACKSATELLITES",
"L1:SYS-TIMING_Y_GPS_A_TIMEVALID",
"L1:SYS-TIMING_Y_GPS_A_RECEIVERMODE",
"L1:SYS-TIMING_Y_GPS_A_UTCOFFSET",
"L1:SYS-TIMING_Y_GPS_A_TIMESOURCE",
"L1:SYS-TIMING_Y_GPS_A_ALMANACINCOMPLETE",
"L1:SYS-TIMING_Y_GPS_A_GPS",
"L1:SYS-TIMING_Y_GPS_A_YEAR",
"L1:SYS-TIMING_Y_GPS_A_MONTH",
"L1:SYS-TIMING_Y_GPS_A_DAY",
"L1:SYS-TIMING_Y_GPS_A_HOUR",
"L1:SYS-TIMING_Y_GPS_A_MINUTE",
"L1:SYS-TIMING_Y_GPS_A_SECOND",
"L1:SYS-TIMING_Y_GPS_A_LEAP",
"L1:SYS-TIMING_Y_GPS_A_WEEK",
"L1:SYS-TIMING_Y_GPS_A_TOW",
"L1:SYS-TIMING_Y_GPS_A_NOTTRACKINGSATTELITES",
"L1:SYS-TIMING_Y_GPS_A_SURVEYINPROGRESS",
"L1:SYS-TIMING_Y_GPS_A_ANTENNAOPEN",
"L1:SYS-TIMING_Y_GPS_A_ANTENNASHORTED",
"L1:SYS-TIMING_Y_GPS_A_NARROWBAND",
"L1:SYS-TIMING_Y_GPS_A_FASTACQUISITION",
"L1:SYS-TIMING_Y_GPS_A_FILTERRESET",
"L1:SYS-TIMING_Y_GPS_A_POSITIONLOCK",
"L1:SYS-TIMING_Y_GPS_A_DIFFERENTIALFIX",
"L1:SYS-TIMING_C_PPS_B_SIGNAL_0_DIFF",
"L1:SYS-TIMING_C_PPS_B_SIGNAL_1_DIFF",
"L1:SYS-TIMING_C_PPS_B_SIGNAL_2_DIFF",
"L1:SYS-TIMING_C_PPS_B_SIGNAL_3_DIFF",
"L1:SYS-TIMING_C_PPS_B_SIGNAL_4_DIFF",
"L1:SYS-TIMING_C_PPS_B_SIGNAL_5_DIFF",
"L1:SYS-TIMING_C_PPS_B_SIGNAL_6_DIFF",
"L1:SYS-TIMING_C_PPS_B_SIGNAL_7_DIFF",
"L1:SYS-TIMING_X_PPS_A_SIGNAL_0_DIFF",
"L1:SYS-TIMING_X_PPS_A_SIGNAL_1_DIFF",
"L1:SYS-TIMING_X_PPS_A_SIGNAL_2_DIFF",
"L1:SYS-TIMING_X_PPS_A_SIGNAL_3_DIFF",
"L1:SYS-TIMING_X_PPS_A_SIGNAL_4_DIFF",
"L1:SYS-TIMING_X_PPS_A_SIGNAL_5_DIFF",
"L1:SYS-TIMING_X_PPS_A_SIGNAL_6_DIFF",
"L1:SYS-TIMING_X_PPS_A_SIGNAL_7_DIFF",
"L1:SYS-TIMING_Y_PPS_A_SIGNAL_0_DIFF",
"L1:SYS-TIMING_Y_PPS_A_SIGNAL_1_DIFF",
"L1:SYS-TIMING_Y_PPS_A_SIGNAL_2_DIFF",
"L1:SYS-TIMING_Y_PPS_A_SIGNAL_3_DIFF",
"L1:SYS-TIMING_Y_PPS_A_SIGNAL_4_DIFF",
"L1:SYS-TIMING_Y_PPS_A_SIGNAL_5_DIFF",
"L1:SYS-TIMING_Y_PPS_A_SIGNAL_6_DIFF",
"L1:SYS-TIMING_Y_PPS_A_SIGNAL_7_DIFF",
"H1:SYS-TIMING_C_GPS_A_ERROR_FLAG",
"H1:SYS-TIMING_C_GPS_A_ERROR_FLAG",
"H1:SYS-TIMING_C_GPS_A_LATITUDE",
"H1:SYS-TIMING_C_GPS_A_LONGITUDE",
"H1:SYS-TIMING_C_GPS_A_ALTITUDE",
"H1:SYS-TIMING_C_GPS_A_SURVEYPROGRESS",
"H1:SYS-TIMING_C_GPS_A_HOLDOVERDURATION",
"H1:SYS-TIMING_C_GPS_A_DACVOLTAGE",
"H1:SYS-TIMING_C_GPS_A_TEMPERATURE",
"H1:SYS-TIMING_C_GPS_A_RECEIVERMODE",
"H1:SYS-TIMING_C_GPS_A_DECODINGSTATUS",
"H1:SYS-TIMING_C_GPS_A_TIMEVALID",
"H1:SYS-TIMING_C_GPS_A_UTCOFFSET",
"H1:SYS-TIMING_C_GPS_A_TIMESOURCE",
"H1:SYS-TIMING_C_GPS_A_PPSSOURCE",
"H1:SYS-TIMING_C_GPS_A_ALMANACINCOMPLETE",
"H1:SYS-TIMING_C_GPS_A_NOTTRACKINGSATTELITES",
"H1:SYS-TIMING_C_GPS_A_SURVEYINPROGRESS",
"H1:SYS-TIMING_C_GPS_A_GPS",
"H1:SYS-TIMING_C_GPS_A_YEAR",
"H1:SYS-TIMING_C_GPS_A_MONTH",
"H1:SYS-TIMING_C_GPS_A_DAY",
"H1:SYS-TIMING_C_GPS_A_HOUR",
"H1:SYS-TIMING_C_GPS_A_MINUTE",
"H1:SYS-TIMING_C_GPS_A_SECOND",
"H1:SYS-TIMING_C_GPS_A_LEAP",
"H1:SYS-TIMING_C_GPS_A_LEAPSECONDPENDING",
"H1:SYS-TIMING_C_GPS_A_WEEK",
"H1:SYS-TIMING_C_GPS_A_TOW",
"H1:SYS-TIMING_C_GPS_A_PPSOFFSET",
"H1:SYS-TIMING_C_GPS_A_DISCIPLININGMODE",
"H1:SYS-TIMING_C_GPS_A_DISCIPLININGACTIVITY",
"H1:SYS-TIMING_C_GPS_A_DACATRAIL",
"H1:SYS-TIMING_C_GPS_A_DACNEARRAIL",
"H1:SYS-TIMING_C_GPS_A_NOSTOREDPOSITION",
"H1:SYS-TIMING_C_GPS_A_POSITIONQUESTIONABLE",
"H1:SYS-TIMING_C_GPS_A_NOPPS",
"H1:SYS-TIMING_C_GPS_A_ANTENNAOPEN",
"H1:SYS-TIMING_C_GPS_A_ANTENNASHORTED",
"H1:SYS-TIMING_C_GPS_A_ERROR_FLAG",
"H1:SYS-TIMING_C_GPS_A_ERROR_CODE",
"H1:SYS-TIMING_X_GPS_A_ERROR_FLAG",
"H1:SYS-TIMING_X_GPS_A_LATITUDE",
"H1:SYS-TIMING_X_GPS_A_LONGITUDE",
"H1:SYS-TIMING_X_GPS_A_ALTITUDE",
"H1:SYS-TIMING_X_GPS_A_SPEED3D",
"H1:SYS-TIMING_X_GPS_A_SPEED2D",
"H1:SYS-TIMING_X_GPS_A_HEADING",
"H1:SYS-TIMING_X_GPS_A_DOP",
"H1:SYS-TIMING_X_GPS_A_VISSATELLITES",
"H1:SYS-TIMING_X_GPS_A_TRACKSATELLITES",
"H1:SYS-TIMING_X_GPS_A_TIMEVALID",
"H1:SYS-TIMING_X_GPS_A_RECEIVERMODE",
"H1:SYS-TIMING_X_GPS_A_UTCOFFSET",
"H1:SYS-TIMING_X_GPS_A_TIMESOURCE",
"H1:SYS-TIMING_X_GPS_A_ALMANACINCOMPLETE",
"H1:SYS-TIMING_X_GPS_A_GPS",
"H1:SYS-TIMING_X_GPS_A_YEAR",
"H1:SYS-TIMING_X_GPS_A_MONTH",
"H1:SYS-TIMING_X_GPS_A_DAY",
"H1:SYS-TIMING_X_GPS_A_HOUR",
"H1:SYS-TIMING_X_GPS_A_MINUTE",
"H1:SYS-TIMING_X_GPS_A_SECOND",
"H1:SYS-TIMING_X_GPS_A_LEAP",
"H1:SYS-TIMING_X_GPS_A_WEEK",
"H1:SYS-TIMING_X_GPS_A_TOW",
"H1:SYS-TIMING_X_GPS_A_NOTTRACKINGSATTELITES",
"H1:SYS-TIMING_X_GPS_A_SURVEYINPROGRESS",
"H1:SYS-TIMING_X_GPS_A_ANTENNAOPEN",
"H1:SYS-TIMING_X_GPS_A_ANTENNASHORTED",
"H1:SYS-TIMING_X_GPS_A_NARROWBAND",
"H1:SYS-TIMING_X_GPS_A_FASTACQUISITION",
"H1:SYS-TIMING_X_GPS_A_FILTERRESET",
"H1:SYS-TIMING_X_GPS_A_POSITIONLOCK",
"H1:SYS-TIMING_X_GPS_A_DIFFERENTIALFIX",
"H1:SYS-TIMING_Y_GPS_A_ERROR_FLAG",
"H1:SYS-TIMING_Y_GPS_A_LATITUDE",
"H1:SYS-TIMING_Y_GPS_A_LONGITUDE",
"H1:SYS-TIMING_Y_GPS_A_ALTITUDE",
"H1:SYS-TIMING_Y_GPS_A_SPEED3D",
"H1:SYS-TIMING_Y_GPS_A_SPEED2D",
"H1:SYS-TIMING_Y_GPS_A_HEADING",
"H1:SYS-TIMING_Y_GPS_A_DOP",
"H1:SYS-TIMING_Y_GPS_A_VISSATELLITES",
"H1:SYS-TIMING_Y_GPS_A_TRACKSATELLITES",
"H1:SYS-TIMING_Y_GPS_A_TIMEVALID",
"H1:SYS-TIMING_Y_GPS_A_RECEIVERMODE",
"H1:SYS-TIMING_Y_GPS_A_UTCOFFSET",
"H1:SYS-TIMING_Y_GPS_A_TIMESOURCE",
"H1:SYS-TIMING_Y_GPS_A_ALMANACINCOMPLETE",
"H1:SYS-TIMING_Y_GPS_A_GPS",
"H1:SYS-TIMING_Y_GPS_A_YEAR",
"H1:SYS-TIMING_Y_GPS_A_MONTH",
"H1:SYS-TIMING_Y_GPS_A_DAY",
"H1:SYS-TIMING_Y_GPS_A_HOUR",
"H1:SYS-TIMING_Y_GPS_A_MINUTE",
"H1:SYS-TIMING_Y_GPS_A_SECOND",
"H1:SYS-TIMING_Y_GPS_A_LEAP",
"H1:SYS-TIMING_Y_GPS_A_WEEK",
"H1:SYS-TIMING_Y_GPS_A_TOW",
"H1:SYS-TIMING_Y_GPS_A_NOTTRACKINGSATTELITES",
"H1:SYS-TIMING_Y_GPS_A_SURVEYINPROGRESS",
"H1:SYS-TIMING_Y_GPS_A_ANTENNAOPEN",
"H1:SYS-TIMING_Y_GPS_A_ANTENNASHORTED",
"H1:SYS-TIMING_Y_GPS_A_NARROWBAND",
"H1:SYS-TIMING_Y_GPS_A_FASTACQUISITION",
"H1:SYS-TIMING_Y_GPS_A_FILTERRESET",
"H1:SYS-TIMING_Y_GPS_A_POSITIONLOCK",
"H1:SYS-TIMING_Y_GPS_A_DIFFERENTIALFIX",
"H1:SYS-TIMING_C_PPS_B_SIGNAL_0_DIFF",
"H1:SYS-TIMING_C_PPS_B_SIGNAL_1_DIFF",
"H1:SYS-TIMING_C_PPS_B_SIGNAL_2_DIFF",
"H1:SYS-TIMING_C_PPS_B_SIGNAL_3_DIFF",
"H1:SYS-TIMING_C_PPS_B_SIGNAL_4_DIFF",
"H1:SYS-TIMING_C_PPS_B_SIGNAL_5_DIFF",
"H1:SYS-TIMING_C_PPS_B_SIGNAL_6_DIFF",
"H1:SYS-TIMING_C_PPS_B_SIGNAL_7_DIFF",
"H1:SYS-TIMING_X_PPS_A_SIGNAL_0_DIFF",
"H1:SYS-TIMING_X_PPS_A_SIGNAL_1_DIFF",
"H1:SYS-TIMING_X_PPS_A_SIGNAL_2_DIFF",
"H1:SYS-TIMING_X_PPS_A_SIGNAL_3_DIFF",
"H1:SYS-TIMING_X_PPS_A_SIGNAL_4_DIFF",
"H1:SYS-TIMING_X_PPS_A_SIGNAL_5_DIFF",
"H1:SYS-TIMING_X_PPS_A_SIGNAL_6_DIFF",
"H1:SYS-TIMING_X_PPS_A_SIGNAL_7_DIFF",
"H1:SYS-TIMING_Y_PPS_A_SIGNAL_0_DIFF",
"H1:SYS-TIMING_Y_PPS_A_SIGNAL_1_DIFF",
"H1:SYS-TIMING_Y_PPS_A_SIGNAL_2_DIFF",
"H1:SYS-TIMING_Y_PPS_A_SIGNAL_3_DIFF",
"H1:SYS-TIMING_Y_PPS_A_SIGNAL_4_DIFF",
"H1:SYS-TIMING_Y_PPS_A_SIGNAL_5_DIFF",
"H1:SYS-TIMING_Y_PPS_A_SIGNAL_6_DIFF",
"H1:SYS-TIMING_Y_PPS_A_SIGNAL_7_DIFF"
]
CSS="""
img, p {{
max-width: 600px;
}}
div {{
display: inline
}}
"""
# THE REST OF THE IMPORTS ARE AFTER THIS IF STATEMENT.
# Quits immediately on --help or -h flags to skip slow imports when you just
# want to read the help documentation.
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description=DESC)
parser.add_argument(
"-t",
"--gpstimes",
type=int,
nargs='*',
help="""
The GPS times about which the plots should be centered.
"""
)
parser.add_argument(
"-o",
"--outdir",
default=".",
help="""
Where should the files be saved? Defaults to current directory
"""
)
parser.add_argument(
"-l",
"--channellist",
help="""
Path to a text file containing the list of channels to plot. If
not provided, this script will try to read the channel list in
from STDIN. If nothing is available from stdin, a default,
comprehensive channel list will be used. The channel list
should be a newline delimited list of valid EPICS channel names,
though badly-formed or invalid channel names will be skipped.
"""
)
parser.add_argument(
"-d",
"--deltat",
type=int,
default=DT,
help="""
The size of the time window to be plotted. The generated plots
will show +/- deltat seconds of data around the central GPS time.
Defaults to: {}
""".format(DT)
)
parser.add_argument(
"-w",
"--skipwebpage",
action='store_true',
help="""
If this flag is provided, no preview webpage will be generated.
By default, the webpage is generated.
"""
)
parser.add_argument(
"-p",
"--skipplots",
action='store_true',
help="""
If this flag is provided, no plots will be generated (useful if
you already made plots and just want to regenerate the webpage).
By default, the plots are generated.
"""
)
parser.add_argument(
"-s",
"--maxsimultaneous",
type=int,
default=MAX_SIMULTANEOUS_CHANS,
help="""
The maximum number of simultaneous channels for which to fetch
data at a given time. It is generally faster to fetch a batch of
channels at a time, but getting too many channels at once might
slow things down. Defaults to: {}
""".format(MAX_SIMULTANEOUS_CHANS)
)
parser.add_argument(
"-v",
"--verbose",
action='store_true',
help="""
Print verbose output for status monitoring while plotting.
Defaults to False.
"""
)
parser.add_argument(
"--print-default-channels",
action='store_true',
help="""
Print the default list of channels as a newline-delimited list
and exit.
"""
)
args = parser.parse_args()
if args.print_default_channels:
print('\n'.join(DEFAULT_CHANNELS))
exit(0)
import matplotlib
matplotlib.use('Agg')
import gwpy.timeseries
import sys
import os
def channel_fname(gps, chan):
return '{}_{}.png'.format(gps, chan.replace(':', '..'))
def image_link_html(gpstimes, chan):
"""return an HTML string for an image element for this plot"""
headerfmt = "<th>{}</th>"
imgfmt = """
<td>
<img src="./{}">
</td>
"""
header = '\n'.join([headerfmt.format(gps) for gps in gpstimes])
images = '\n'.join(
[imgfmt.format(channel_fname(gps, chan)) for gps in gpstimes]
)
channel_entry_fmt = """
<div>
<p>{}</p>
<br>
<table>
<tr>{}</tr>
<tr>{}</tr>
</table>
</div>
"""
return channel_entry_fmt.format(chan, header, images)
def read_channels(filedescriptor):
return list(set(filedescriptor.read().split('\n')) - {''})
def get_channel_list():
"""Get the channel list from the file descriptor specified via the CLI.
If no channel list is provided, try to read from sys.stdin. Otherwise,
use the default comprehensive channel list."""
if args.channellist is None:
# if stdin is a tty, then nothing is being piped in
if sys.stdin.isatty():
return DEFAULT_CHANNELS
else:
return read_channels(sys.stdin)
else:
with open(args.channellist, 'r') as f:
return read_channels(f)
def make_preview_webpage(outdir, chans, gpstimes):
"""Make a webpage called index.html displaying all generated plots and save
it to the specified output directory."""
gpstimes_str = ', '.join(str(gpstimes))
HTML_TOP = ("""
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>Plots around {}</title>
<style type="text/css">
""" + CSS.replace('\n', '\n' + 16*' ') + """
</style>
</head>
<body>
""").replace('\n ', '\n').format(gpstimes_str) # replace will remove 1st indent
HTML_BOTTOM = """
</body>
</html>
""".replace('\n ', '\n') # replace will remove 1st indent
filepath = os.path.join(outdir, 'index.html')
with open(filepath, 'w') as f:
f.write(HTML_TOP)
f.write('\n')
for chan in chans:
f.write(image_link_html(gpstimes, chan))
f.write(HTML_BOTTOM)
def make_channel_plots(outdir, chans, gps, dt=DT, verbose=False,
max_simultaneous_chans=MAX_SIMULTANEOUS_CHANS):
for i in range(0, len(chans), max_simultaneous_chans):
if verbose:
print('plotting {} thru {} of {}'.format(i,
i+max_simultaneous_chans,
len(chans)))
chans_sublist = chans[i:i+max_simultaneous_chans]
try:
bufs = gwpy.timeseries.TimeSeriesDict.fetch(chans_sublist, gps-dt,
gps+dt,
verbose=verbose)
except RuntimeError:
if verbose:
print('Could not fetch all at once:\n{}'.format(chans_sublist))
bufs = {}
for chan in chans_sublist:
try:
buf = gwpy.timeseries.TimeSeries.fetch(chan, gps-dt, gps+dt,
verbose=verbose)
bufs[chan] = buf
except RuntimeError:
if verbose: print('Bad channel: {}'.format(chan))
for chan in bufs:
buf = bufs[chan]
filepath = os.path.join(outdir, channel_fname(gps, chan))
plot = buf.plot()
plot.set_title(buf.channel.name.replace('_', '\_'))
plot.savefig(filepath)
if __name__ == '__main__':
chans = get_channel_list()
if args.verbose: print('channels to plot: {}'.format(chans))
if not args.skipwebpage:
make_preview_webpage(args.outdir, chans, args.gpstimes)
if not args.skipplots:
for gps in args.gpstimes:
make_channel_plots(args.outdir, chans, gps, args.deltat,
args.verbose, args.maxsimultaneous)
| mit |
DonBeo/scikit-learn | sklearn/preprocessing/label.py | 2 | 28579 | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Joel Nothman <joel.nothman@gmail.com>
# Hamzeh Alsalhi <ha258@cornell.edu>
# License: BSD 3 clause
from collections import defaultdict
import itertools
import array
import warnings
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..utils.fixes import np_version
from ..utils.fixes import sparse_min_max
from ..utils.fixes import astype
from ..utils.fixes import in1d
from ..utils import deprecated, column_or_1d
from ..utils.validation import check_array
from ..utils.validation import _num_samples
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..externals import six
zip = six.moves.zip
map = six.moves.map
__all__ = [
'label_binarize',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
]
def _check_numpy_unicode_bug(labels):
"""Check that user is not subject to an old numpy bug
Fixed in master before 1.7.0:
https://github.com/numpy/numpy/pull/243
"""
if np_version[:3] < (1, 7, 0) and labels.dtype.kind == 'U':
raise RuntimeError("NumPy < 1.7.0 does not implement searchsorted"
" on unicode data correctly. Please upgrade"
" NumPy to use LabelEncoder with unicode inputs.")
class LabelEncoder(BaseEstimator, TransformerMixin):
"""Encode labels with value between 0 and n_classes-1.
Attributes
----------
classes_ : array of shape (n_class,)
Holds the label for each class.
Examples
--------
`LabelEncoder` can be used to normalize labels.
>>> from sklearn import preprocessing
>>> le = preprocessing.LabelEncoder()
>>> le.fit([1, 2, 2, 6])
LabelEncoder()
>>> le.classes_
array([1, 2, 6])
>>> le.transform([1, 1, 2, 6]) #doctest: +ELLIPSIS
array([0, 0, 1, 2]...)
>>> le.inverse_transform([0, 0, 1, 2])
array([1, 1, 2, 6])
It can also be used to transform non-numerical labels (as long as they are
hashable and comparable) to numerical labels.
>>> le = preprocessing.LabelEncoder()
>>> le.fit(["paris", "paris", "tokyo", "amsterdam"])
LabelEncoder()
>>> list(le.classes_)
['amsterdam', 'paris', 'tokyo']
>>> le.transform(["tokyo", "tokyo", "paris"]) #doctest: +ELLIPSIS
array([2, 2, 1]...)
>>> list(le.inverse_transform([2, 2, 1]))
['tokyo', 'tokyo', 'paris']
"""
def _check_fitted(self):
if not hasattr(self, "classes_"):
raise ValueError("LabelEncoder was not fitted yet.")
def fit(self, y):
"""Fit label encoder
Parameters
----------
y : array-like of shape (n_samples,)
Target values.
Returns
-------
self : returns an instance of self.
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_ = np.unique(y)
return self
def fit_transform(self, y):
"""Fit label encoder and return encoded labels
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_, y = np.unique(y, return_inverse=True)
return y
def transform(self, y):
"""Transform labels to normalized encoding.
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
self._check_fitted()
classes = np.unique(y)
_check_numpy_unicode_bug(classes)
if len(np.intersect1d(classes, self.classes_)) < len(classes):
diff = np.setdiff1d(classes, self.classes_)
raise ValueError("y contains new labels: %s" % str(diff))
return np.searchsorted(self.classes_, y)
def inverse_transform(self, y):
"""Transform labels back to original encoding.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
y : numpy array of shape [n_samples]
"""
self._check_fitted()
y = np.asarray(y)
return self.classes_[y]
class LabelBinarizer(BaseEstimator, TransformerMixin):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
At learning time, this simply consists in learning one regressor
or binary classifier per class. In doing so, one needs to convert
multi-class labels to binary labels (belong or does not belong
to the class). LabelBinarizer makes this process easy with the
transform method.
At prediction time, one assigns the class for which the corresponding
model gave the greatest confidence. LabelBinarizer makes this easy
with the inverse_transform method.
Parameters
----------
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False)
True if the returned array from transform is desired to be in sparse
CSR format.
Attributes
----------
classes_ : array of shape [n_class]
Holds the label for each class.
y_type_ : str,
Represents the type of the target data as evaluated by
utils.multiclass.type_of_target. Possible type are 'continuous',
'continuous-multioutput', 'binary', 'multiclass',
'mutliclass-multioutput', 'multilabel-sequences',
'multilabel-indicator', and 'unknown'.
multilabel_ : boolean
True if the transformer was fitted on a multilabel rather than a
multiclass set of labels. The ``multilabel_`` attribute is deprecated
and will be removed in 0.18
sparse_input_ : boolean,
True if the input data to transform is given as a sparse matrix, False
otherwise.
indicator_matrix_ : str
'sparse' when the input data to tansform is a multilable-indicator and
is sparse, None otherwise. The ``indicator_matrix_`` attribute is
deprecated as of version 0.16 and will be removed in 0.18
Examples
--------
>>> from sklearn import preprocessing
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit([1, 2, 6, 4, 2])
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([1, 2, 4, 6])
>>> lb.transform([1, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
Binary targets transform to a column vector
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit_transform(['yes', 'no', 'no', 'yes'])
array([[1],
[0],
[0],
[1]])
Passing a 2D matrix for multilabel classification
>>> import numpy as np
>>> lb.fit(np.array([[0, 1, 1], [1, 0, 0]]))
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([0, 1, 2])
>>> lb.transform([0, 1, 2, 1])
array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 1, 0]])
See also
--------
label_binarize : function to perform the transform operation of
LabelBinarizer with fixed classes.
"""
def __init__(self, neg_label=0, pos_label=1, sparse_output=False):
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if sparse_output and (pos_label == 0 or neg_label != 0):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
self.neg_label = neg_label
self.pos_label = pos_label
self.sparse_output = sparse_output
@property
@deprecated("Attribute ``indicator_matrix_`` is deprecated and will be "
"removed in 0.17. Use ``y_type_ == 'multilabel-indicator'`` "
"instead")
def indicator_matrix_(self):
return self.y_type_ == 'multilabel-indicator'
@property
@deprecated("Attribute ``multilabel_`` is deprecated and will be removed "
"in 0.17. Use ``y_type_.startswith('multilabel')`` "
"instead")
def multilabel_(self):
return self.y_type_.startswith('multilabel')
def _check_fitted(self):
if not hasattr(self, "classes_"):
raise ValueError("LabelBinarizer was not fitted yet.")
def fit(self, y):
"""Fit label binarizer
Parameters
----------
y : numpy array of shape (n_samples,) or (n_samples, n_classes)
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification.
Returns
-------
self : returns an instance of self.
"""
self.y_type_ = type_of_target(y)
if 'multioutput' in self.y_type_:
raise ValueError("Multioutput target data is not supported with "
"label binarization")
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
self.sparse_input_ = sp.issparse(y)
self.classes_ = unique_labels(y)
return self
def transform(self, y):
"""Transform multi-class labels to binary labels
The output of transform is sometimes referred to by some authors as the
1-of-K coding scheme.
Parameters
----------
y : numpy array or sparse matrix of shape (n_samples,) or
(n_samples, n_classes) Target values. The 2-d matrix should only
contain 0 and 1, represents multilabel classification. Sparse
matrix can be CSR, CSC, COO, DOK, or LIL.
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
"""
self._check_fitted()
y_is_multilabel = type_of_target(y).startswith('multilabel')
if y_is_multilabel and not self.y_type_.startswith('multilabel'):
raise ValueError("The object was not fitted with multilabel"
" input.")
return label_binarize(y, self.classes_,
pos_label=self.pos_label,
neg_label=self.neg_label,
sparse_output=self.sparse_output)
def inverse_transform(self, Y, threshold=None):
"""Transform binary labels back to multi-class labels
Parameters
----------
Y : numpy array or sparse matrix with shape [n_samples, n_classes]
Target values. All sparse matrices are converted to CSR before
inverse transformation.
threshold : float or None
Threshold used in the binary and multi-label cases.
Use 0 when:
- Y contains the output of decision_function (classifier)
Use 0.5 when:
- Y contains the output of predict_proba
If None, the threshold is assumed to be half way between
neg_label and pos_label.
Returns
-------
y : numpy array or CSR matrix of shape [n_samples] Target values.
Notes
-----
In the case when the binary labels are fractional
(probabilistic), inverse_transform chooses the class with the
greatest value. Typically, this allows to use the output of a
linear model's decision_function method directly as the input
of inverse_transform.
"""
self._check_fitted()
if threshold is None:
threshold = (self.pos_label + self.neg_label) / 2.
if self.y_type_ == "multiclass":
y_inv = _inverse_binarize_multiclass(Y, self.classes_)
else:
y_inv = _inverse_binarize_thresholding(Y, self.y_type_,
self.classes_, threshold)
if self.sparse_input_:
y_inv = sp.csr_matrix(y_inv)
elif sp.issparse(y_inv):
y_inv = y_inv.toarray()
return y_inv
def label_binarize(y, classes, neg_label=0, pos_label=1,
sparse_output=False, multilabel=None):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
This function makes it possible to compute this transformation for a
fixed set of class labels known ahead of time.
Parameters
----------
y : array-like
Sequence of integer labels or multilabel data to encode.
classes : array-like of shape [n_classes]
Uniquely holds the label for each class.
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
Examples
--------
>>> from sklearn.preprocessing import label_binarize
>>> label_binarize([1, 6], classes=[1, 2, 4, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
The class ordering is preserved:
>>> label_binarize([1, 6], classes=[1, 6, 4, 2])
array([[1, 0, 0, 0],
[0, 1, 0, 0]])
Binary targets transform to a column vector
>>> label_binarize(['yes', 'no', 'no', 'yes'], classes=['no', 'yes'])
array([[1],
[0],
[0],
[1]])
See also
--------
LabelBinarizer : class used to wrap the functionality of label_binarize and
allow for fitting to classes independently of the transform operation
"""
if not isinstance(y, list):
# XXX Workaround that will be removed when list of list format is
# dropped
y = check_array(y, accept_sparse='csr', ensure_2d=False, dtype=None)
else:
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if (sparse_output and (pos_label == 0 or neg_label != 0)):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
if multilabel is not None:
warnings.warn("The multilabel parameter is deprecated as of version "
"0.15 and will be removed in 0.17. The parameter is no "
"longer necessary because the value is automatically "
"inferred.", DeprecationWarning)
# To account for pos_label == 0 in the dense case
pos_switch = pos_label == 0
if pos_switch:
pos_label = -neg_label
y_type = type_of_target(y)
if 'multioutput' in y_type:
raise ValueError("Multioutput target data is not supported with label "
"binarization")
n_samples = y.shape[0] if sp.issparse(y) else len(y)
n_classes = len(classes)
classes = np.asarray(classes)
if y_type == "binary":
if len(classes) == 1:
Y = np.zeros((len(y), 1), dtype=np.int)
Y += neg_label
return Y
elif len(classes) >= 3:
y_type = "multiclass"
sorted_class = np.sort(classes)
if (y_type == "multilabel-indicator" and classes.size != y.shape[1]):
raise ValueError("classes {0} missmatch with the labels {1}"
"found in the data".format(classes, unique_labels(y)))
if y_type in ("binary", "multiclass"):
y = column_or_1d(y)
# pick out the known labels from y
y_in_classes = in1d(y, classes)
y_seen = y[y_in_classes]
indices = np.searchsorted(sorted_class, y_seen)
indptr = np.hstack((0, np.cumsum(y_in_classes)))
data = np.empty_like(indices)
data.fill(pos_label)
Y = sp.csr_matrix((data, indices, indptr),
shape=(n_samples, n_classes))
elif y_type == "multilabel-indicator":
Y = sp.csr_matrix(y)
if pos_label != 1:
data = np.empty_like(Y.data)
data.fill(pos_label)
Y.data = data
elif y_type == "multilabel-sequences":
Y = MultiLabelBinarizer(classes=classes,
sparse_output=sparse_output).fit_transform(y)
if sp.issparse(Y):
Y.data[:] = pos_label
else:
Y[Y == 1] = pos_label
return Y
if not sparse_output:
Y = Y.toarray()
Y = astype(Y, int, copy=False)
if neg_label != 0:
Y[Y == 0] = neg_label
if pos_switch:
Y[Y == pos_label] = 0
else:
Y.data = astype(Y.data, int, copy=False)
# preserve label ordering
if np.any(classes != sorted_class):
indices = np.argsort(classes)
Y = Y[:, indices]
if y_type == "binary":
if sparse_output:
Y = Y.getcol(-1)
else:
Y = Y[:, -1].reshape((-1, 1))
return Y
def _inverse_binarize_multiclass(y, classes):
"""Inverse label binarization transformation for multiclass.
Multiclass uses the maximal score instead of a threshold.
"""
classes = np.asarray(classes)
if sp.issparse(y):
# Find the argmax for each row in y where y is a CSR matrix
y = y.tocsr()
n_samples, n_outputs = y.shape
outputs = np.arange(n_outputs)
row_max = sparse_min_max(y, 1)[1]
row_nnz = np.diff(y.indptr)
y_data_repeated_max = np.repeat(row_max, row_nnz)
# picks out all indices obtaining the maximum per row
y_i_all_argmax = np.flatnonzero(y_data_repeated_max == y.data)
# For corner case where last row has a max of 0
if row_max[-1] == 0:
y_i_all_argmax = np.append(y_i_all_argmax, [len(y.data)])
# Gets the index of the first argmax in each row from y_i_all_argmax
index_first_argmax = np.searchsorted(y_i_all_argmax, y.indptr[:-1])
# first argmax of each row
y_ind_ext = np.append(y.indices, [0])
y_i_argmax = y_ind_ext[y_i_all_argmax[index_first_argmax]]
# Handle rows of all 0
y_i_argmax[np.where(row_nnz == 0)[0]] = 0
# Handles rows with max of 0 that contain negative numbers
samples = np.arange(n_samples)[(row_nnz > 0) &
(row_max.ravel() == 0)]
for i in samples:
ind = y.indices[y.indptr[i]:y.indptr[i + 1]]
y_i_argmax[i] = classes[np.setdiff1d(outputs, ind)][0]
return classes[y_i_argmax]
else:
return classes.take(y.argmax(axis=1), mode="clip")
def _inverse_binarize_thresholding(y, output_type, classes, threshold):
"""Inverse label binarization transformation using thresholding."""
if output_type == "binary" and y.ndim == 2 and y.shape[1] > 2:
raise ValueError("output_type='binary', but y.shape = {0}".
format(y.shape))
if output_type != "binary" and y.shape[1] != len(classes):
raise ValueError("The number of class is not equal to the number of "
"dimension of y.")
classes = np.asarray(classes)
# Perform thresholding
if sp.issparse(y):
if threshold > 0:
if y.format not in ('csr', 'csc'):
y = y.tocsr()
y.data = np.array(y.data > threshold, dtype=np.int)
y.eliminate_zeros()
else:
y = np.array(y.toarray() > threshold, dtype=np.int)
else:
y = np.array(y > threshold, dtype=np.int)
# Inverse transform data
if output_type == "binary":
if sp.issparse(y):
y = y.toarray()
if y.ndim == 2 and y.shape[1] == 2:
return classes[y[:, 1]]
else:
if len(classes) == 1:
y = np.empty(len(y), dtype=classes.dtype)
y.fill(classes[0])
return y
else:
return classes[y.ravel()]
elif output_type == "multilabel-indicator":
return y
elif output_type == "multilabel-sequences":
warnings.warn('Direct support for sequence of sequences multilabel '
'representation will be unavailable from version 0.17. '
'Use sklearn.preprocessing.MultiLabelBinarizer to '
'convert to a label indicator representation.',
DeprecationWarning)
mlb = MultiLabelBinarizer(classes=classes).fit([])
return mlb.inverse_transform(y)
else:
raise ValueError("{0} format is not supported".format(output_type))
class MultiLabelBinarizer(BaseEstimator, TransformerMixin):
"""Transform between iterable of iterables and a multilabel format
Although a list of sets or tuples is a very intuitive format for multilabel
data, it is unwieldy to process. This transformer converts between this
intuitive format and the supported multilabel format: a (samples x classes)
binary matrix indicating the presence of a class label.
Parameters
----------
classes : array-like of shape [n_classes] (optional)
Indicates an ordering for the class labels
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Attributes
----------
classes_ : array of labels
A copy of the `classes` parameter where provided,
or otherwise, the sorted set of classes found when fitting.
Examples
--------
>>> mlb = MultiLabelBinarizer()
>>> mlb.fit_transform([(1, 2), (3,)])
array([[1, 1, 0],
[0, 0, 1]])
>>> mlb.classes_
array([1, 2, 3])
>>> mlb.fit_transform([set(['sci-fi', 'thriller']), set(['comedy'])])
array([[0, 1, 1],
[1, 0, 0]])
>>> list(mlb.classes_)
['comedy', 'sci-fi', 'thriller']
"""
def __init__(self, classes=None, sparse_output=False):
self.classes = classes
self.sparse_output = sparse_output
def fit(self, y):
"""Fit the label sets binarizer, storing `classes_`
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
self : returns this MultiLabelBinarizer instance
"""
if self.classes is None:
classes = sorted(set(itertools.chain.from_iterable(y)))
else:
classes = self.classes
dtype = np.int if all(isinstance(c, int) for c in classes) else object
self.classes_ = np.empty(len(classes), dtype=dtype)
self.classes_[:] = classes
return self
def fit_transform(self, y):
"""Fit the label sets binarizer and transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
if self.classes is not None:
return self.fit(y).transform(y)
# Automatically increment on new class
class_mapping = defaultdict(int)
class_mapping.default_factory = class_mapping.__len__
yt = self._transform(y, class_mapping)
# sort classes and reorder columns
tmp = sorted(class_mapping, key=class_mapping.get)
# (make safe for tuples)
dtype = np.int if all(isinstance(c, int) for c in tmp) else object
class_mapping = np.empty(len(tmp), dtype=dtype)
class_mapping[:] = tmp
self.classes_, inverse = np.unique(class_mapping, return_inverse=True)
yt.indices = np.take(inverse, yt.indices)
if not self.sparse_output:
yt = yt.toarray()
return yt
def transform(self, y):
"""Transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
class_to_index = dict(zip(self.classes_, range(len(self.classes_))))
yt = self._transform(y, class_to_index)
if not self.sparse_output:
yt = yt.toarray()
return yt
def _transform(self, y, class_mapping):
"""Transforms the label sets with a given mapping
Parameters
----------
y : iterable of iterables
class_mapping : Mapping
Maps from label to column index in label indicator matrix
Returns
-------
y_indicator : sparse CSR matrix, shape (n_samples, n_classes)
Label indicator matrix
"""
indices = array.array('i')
indptr = array.array('i', [0])
for labels in y:
indices.extend(set(class_mapping[label] for label in labels))
indptr.append(len(indices))
data = np.ones(len(indices), dtype=int)
return sp.csr_matrix((data, indices, indptr),
shape=(len(indptr) - 1, len(class_mapping)))
def inverse_transform(self, yt):
"""Transform the given indicator matrix into label sets
Parameters
----------
yt : array or sparse matrix of shape (n_samples, n_classes)
A matrix containing only 1s ands 0s.
Returns
-------
y : list of tuples
The set of labels for each sample such that `y[i]` consists of
`classes_[j]` for each `yt[i, j] == 1`.
"""
if yt.shape[1] != len(self.classes_):
raise ValueError('Expected indicator for {0} classes, but got {1}'
.format(len(self.classes_), yt.shape[1]))
if sp.issparse(yt):
yt = yt.tocsr()
if len(yt.data) != 0 and len(np.setdiff1d(yt.data, [0, 1])) > 0:
raise ValueError('Expected only 0s and 1s in label indicator.')
return [tuple(self.classes_.take(yt.indices[start:end]))
for start, end in zip(yt.indptr[:-1], yt.indptr[1:])]
else:
unexpected = np.setdiff1d(yt, [0, 1])
if len(unexpected) > 0:
raise ValueError('Expected only 0s and 1s in label indicator. '
'Also got {0}'.format(unexpected))
return [tuple(self.classes_.compress(indicators)) for indicators
in yt]
| bsd-3-clause |
scramblingbalam/Alta_Real | update_mongo.py | 1 | 15367 | # -*- coding: utf-8 -*-
"""
Created on Sun Jul 16 12:22:55 2017
@author: scram
"""
from gensim import corpora, models, similarities
import gensim
import json
import os
import pickle
from pymongo import MongoClient
import unicodedata as uniD
import sys
import nltk
import networkx as nx
import numpy as np
import itertools as it
import pprint
import collections as coll
import re
from collections import Counter
import feature_functions as feature
import nested_dict
from functools import reduce
from scipy import spatial
from sklearn.externals import joblib
from Mongo_functions import id2edgelist
pp = pprint.PrettyPrinter(indent=0)
def translatelabel(label):
if label == 'supporting'or label == 'support':
return 0
if label == 'denying'or label == 'deny':
return 1
if label == 'appeal-for-more-information'or label == 'query':
return 2
if label == 'comment' or label == '0':
return 3
if label =='imp-denying':
return 4
id_target_dic = {}
label_list =[]
event_target_dic = {}
event_ID_dic = {}
unlabeled_thread_list=[]
thread_list=[]
def files_to_mongo(root_path,label_dict,db):
""" Moves tweets from their file structure to mongo
"""
walk = os.walk(root_path)
threads_errors = []
root_errors = []
replies_errors = []
for current_dir in walk:
if 'structure.json' in current_dir[-1]:
event = current_dir[0].split("\\")[-2]
last_dir = current_dir[0].split("\\")[-1]
if last_dir == "replies":
json_list =[]
root_tweet = {}
rep_path =current_dir[0]
root_path = rep_path.split("\\")
with open("\\".join(root_path[:-1])+"\\"+'structure.json',"r")as jsonfile:
structure = json.load(jsonfile)
source_path = root_path
source_path[-1]="source-tweet"
source_path = "\\".join(source_path)
root =os.listdir(source_path)[0]
with open(source_path+"\\"+root,"r")as jsonfile:
root_tweet = json.load(jsonfile)
for json_path in current_dir[-1]:
with open(current_dir[0]+"\\"+json_path,"r")as jsonfile:
json_list.append(json.load(jsonfile))
thread_list = list(map(lambda x: x.get("id_str"),json_list))
root_id = root_tweet['id']
thread_list.append(str(root_id))
structure = nested_dict.subset_by_key(structure, thread_list)
edge_list = nested_dict.to_edge_list(nested_dict.map_keys(int,structure))
fields = ["parent","child"]
mongo_update = {"_id":root_id,
"id_str":str(root_id),
"event":event,
"edge_list":nested_dict.vecs_to_recs(edge_list,fields)}
try:
db.edge_list.insert_one(mongo_update).inserted_id
except Exception as err:
# print(err)
threads_errors.append(root_id)
for twt in json_list:
twt["_id"] = twt["id"]
twt['label'] = label_dict[int(twt['id'])]
try:
db.replies_to_trump.insert_one(twt).inserted_id
except Exception as err:
# print(err)
replies_errors.append(twt["id"])
root_tweet["_id"] = root_id
root_tweet["label"] = label_dict[int(root_id)]
try:
db.trump_tweets.insert_one(root_tweet)
except Exception as err:
# print(err)
root_errors.append(root_id)
return threads_errors, root_errors, replies_errors
def get_labeled_thread_tweets(thread,db=MongoClient('localhost', 27017).Alta_Real_New):
root_id = thread['_id']
thread_tweets = list(db.trump_tweets.find({'_id':root_id}))
thread_ids = set()
thread_edge_list =[]
# print(root_id)
# print( thread_tweets[0]['text'])
thread
for edg in thread['edge_list']:
parent = edg['parent']
child = edg['child']
thread_edge_list.append((parent,child))
thread_ids.add(child)
thread_ids.add(parent)
thread_ids = list(thread_ids)
thread_tweets += list(db.replies_to_trump.find({'_id':{'$in':thread_ids},'label':{'$exists':True}}))
thread_ids = [i['id'] for i in thread_tweets]
print(root_id in thread_ids)
print(root_id)
print(len(thread_edge_list))
print(thread_ids)
print(len(thread_ids))
# print(thread_ids)
thread_edge_list = list(filter(lambda x: x[0] in thread_ids and x[1] in thread_ids, thread_edge_list))
thread_ids = [root_id] + thread_ids
# print( len(thread_tweets))
return thread_tweets,thread_edge_list,thread_ids
def labels_to_train(db,db_source,labels_dic):
""" checks the Alta_Real database and creates sub-trees for any labeld tweets
"""
threads_errors = []
root_errors = []
replies_errors = []
print(db_source)
roots = list(db_source.trump_tweets.find({"label":{"$exists":True}}))
print("LENGTH ROOTS",len(roots))
# put labels to DB from target label file
for labeled_id in labels_dic:
tweet = list(db_source.replies_to_trump.find({'_id':labeled_id}))
if tweet:
tweet = tweet[0]
if not tweet.get('label',None):
db_source.replies_to_trump.update_one(
{"_id":labeled_id},
{"$set":
{"label":labels_dic[labeled_id]}
}
)
for root_tweet in roots:
root_id = root_tweet['id']
thread_edges = list(db_source.edge_list.find({"_id":root_id}))[0]
json_list,edge_list,thread_list = get_labeled_thread_tweets(thread_edges,db_source)
thread_list.append(str(root_id))
fields = ["parent","child"]
event_text = root_tweet['text']
# event_text = event_text.replace('/','').replace(':','').replace('-','').replace('#','')
if len(event_text) >20:
event = event_text.replace(" ","_")[:20]
else:
event = event_text.replace(" ","_")
mongo_update = {"_id":root_id,
"id_str":str(root_id),
"event":event,
"edge_list":nested_dict.vecs_to_recs(edge_list,fields)}
# print(mongo_update)
try:
db.edge_list.insert_one(mongo_update).inserted_id
except Exception as err:
# print(err)
threads_errors.append(root_id)
for twt in json_list:
twt["_id"] = twt["id"]
if not twt['label']:
print(twt['id'])
print(twt['label'])
print(bool(twt['label']))
try:
db.replies_to_trump.insert_one(twt).inserted_id
except Exception as err:
# print(err)
replies_errors.append(twt["id"])
root_tweet["_id"] = root_id
try:
db.trump_tweets.insert_one(root_tweet)
except Exception as err:
# print(err)
root_errors.append(root_id)
return threads_errors, root_errors, replies_errors
def process_tweet(tweet):
feature_vector =[]
if tweet.get("full_text",None):
text =tweet['full_text']
else:
text =tweet['text']
ID = tweet['id_str']
thread_list.append(event)
if ID in id_target_dic:
label_list.append(id_target_dic[ID])
# print id_target_dic[root_id.split(".")[0]]
attribute_paths = [[u'entities',u'media'],
[u'entities',u'urls'],
[u'in_reply_to_screen_name']]
format_binary_vec = list(feature.attribute_binary_gen(tweet, attribute_paths))
feature_vector += format_binary_vec
punc_vec = list(feature.punc_binary_gen(text,punc_list))
feature_vector += punc_vec
if token_type == "zub":
cap_ratio = feature.zub_capital_ratio(text)
# print "Twit & Zub using same Capitalization Ratio\n\tIs this desirable?"
elif token_type == "twit":
cap_ratio = feature.zub_capital_ratio(text)
# print "Twit & Zub using same Capitalization Ratio\n\tIs this desirable?"
feature_vector += cap_ratio
word_char_count = feature.word_char_count(id_text_dic[ID])
feature_vector += word_char_count
swear_bool = feature.word_bool(text,swear_list)
feature_vector += swear_bool
neg_bool = feature.word_bool(text,negationwords)
feature_vector += neg_bool
if ID in id_pos_dic_full:
pos_vec = id_pos_dic_full[tweet["id"]]
else:
pos_vec = id_pos_dic[tweet["id"]]
feature_vector += feature.pos_vector(pos_vec)
d2v_text = D2V_id_text_dic[ID]
if embed_type == "word2vec":
embed_vector = feature.mean_W2V_vector(d2v_text,D2Vmodel)
elif embed_type == "doc2vec":
embed_vector = D2Vmodel.docvecs[ID]
feature_vector = np.concatenate((embed_vector,
np.array(feature_vector)))
# thread_id_list.append(ID)
# thread_dic[ID] = feature_vector
return int(ID), feature_vector
def get_thread_tweets(thread,db=MongoClient('localhost', 27017).Alta_Real_New):
root_id = thread['_id']
thread_tweets = list(db.trump_tweets.find({'_id':root_id}))
thread_ids = set()
thread_edge_list =[]
for edg in thread['edge_list']:
parent = edg['parent']
child = edg['child']
thread_ids.add(child)
thread_edge_list.append((parent,child))
thread_ids = list(thread_ids)
thread_tweets += list(db.replies_to_trump.find({'_id':{'$in':thread_ids}}))
thread_ids = [root_id] + thread_ids
return thread_tweets,thread_edge_list,thread_ids
#### RUN FUNCTIONS TO CREATE FEATURES
if __name__ == '__main__':
### Import if python 2
if sys.version_info[0] < 3:
import cPickle as pickle
# Global Variables
dims =str(300)
#token_type = "zub"
embed_type = "word2vec"
token_type = "twit"
embed_type = "doc2vec"
id_text_dic = {}
text_list = []
id_list = []
POS_dir ="Data\\twitIE_pos\\"
doc2vec_dir = "Data\\doc2vec\\trump_plus"
ids_around_IDless_tweets=[136,139,1085,1087]
pos_file_path1 = POS_dir+token_type+"_semeval2017"+"_twitIE_POS"
pos_file_path2 = POS_dir+token_type+"_Alta_Real_New"+"_twitIE_POS"
pos_file_path3 = POS_dir+token_type+"_Alta_Real_New"+"_twitIE_POS_FULL_TEXT"
pos_file_path = [pos_file_path1, pos_file_path2,pos_file_path3]
id_pos_dic, index_pos_dic = feature.pos_extract(pos_file_path)
# pos_file_path1 = POS_dir+token_type+"_semeval2017"+"_twitIE_POS_FULL"
pos_file_path_full = POS_dir+token_type+"_Alta_Real_New"+"_twitIE_POS_FULL_TEXT"
pos_file_path_full = [pos_file_path_full]
id_pos_dic_full, index_pos_dic_full = feature.pos_extract(pos_file_path_full)
swear_path = "Data\\badwords.txt"
swear_list=[]
with open(swear_path,"r")as swearfile:
swear_list = swearfile.readlines()
id_text_dic ={}
with open(doc2vec_dir+token_type+"_"+"id_text_dic.json",'r')as textDicFile:
id_text_dic = json.load(textDicFile)
negationwords = ['not', 'no', 'nobody', 'nothing', 'none',
'never', 'neither', 'nor', 'nowhere', 'hardly',
'scarcely', 'barely', 'don*', 'isn*', 'wasn*',
'shouldn*', 'wouldn*', 'couldn*', 'doesn*',
'don', 'isn', 'wasn', 'nothin',
'shouldn', 'wouldn', 'couldn', 'doesn']
punc_list = [u"?",u"!",u"."]
attribute_paths = [[u'entities',u'media'],
[u'entities',u'urls'],
[u'in_reply_to_screen_name']]
doc2vec_dir ="Data/doc2vec/"
doc2vec_dir ="Data/doc2vec/trump_plus"
D2Vmodel = models.Doc2Vec.load(doc2vec_dir+token_type+"_"+'rumorEval_doc2vec_set'+dims+'.model')
print(D2Vmodel.most_similar('black'),"\n")
doc2vec_id = []
with open(doc2vec_dir+"id_list.json","r") as picfile:
doc2vec_id_key={str(twtID):str(key) for key,twtID in enumerate(json.load(picfile))}
D2V_id_text_dic ={}
with open(doc2vec_dir+token_type+"_"+"id_text_dic.json", "r")as d2vtextfile:
D2V_id_text_dic = json.load(d2vtextfile)
err_dic = {}
graph_root_id = ""
graph_event = ""
graph_size = 0
graph_2_vis =[]
thread_dic = {}
event_target_dic = {}
root_id = 0
event_model_dic = {}
#MongoDB credentials and collections
# DBname = 'test-tree'
# DBname = 'test_recurse'
DBname = 'Alta_Real_New'
DBhost = 'localhost'
DBport = 27017
DBname_t = 'Train'
# initiate Mongo Client
client = MongoClient()
client = MongoClient(DBhost, DBport)
DB_trump = client[DBname]
DB_train = client[DBname_t]
# collection where SemEval data is stored
dev = "rumoureval-subtaskA-dev.json"
train = "rumoureval-subtaskA-train.json"
train_dic_dir = "traindev"
train_data_dir ="rumoureval-data"
train_dir = "Data\\semeval2017-task8-dataset"
target_path = "\\".join([train_dir,train_dic_dir,train])
with open(target_path,"r")as targetfile:
id_target_dic = {int(k):v for k,v in json.load(targetfile).items()}
target_path = "\\".join([train_dir,train_dic_dir,dev])
with open(target_path,"r")as targetfile:
id_target_dic.update({int(k):v for k,v in json.load(targetfile).items()})
with open("kats_label_dict","r")as targetfile:
id_target_dic.update({int(k):v for k,v in json.load(targetfile).items()})
if not DB_train.edge_list.find_one():
top_path = "\\".join([train_dir,train_data_dir])
posted = files_to_mongo(top_path,id_target_dic,DB_train)
transfered = labels_to_train(DB_train,DB_trump,id_target_dic)
train_threads, train_roots, train_replies = posted
else:
DB_train.edge_list.find_one()
top_path = "\\".join([train_dir,train_data_dir])
posted = files_to_mongo(top_path,id_target_dic,DB_train)
transfered = labels_to_train(DB_train,DB_trump,id_target_dic)
train_threads, train_roots, train_replies = posted
print("No_New_Threads",set(train_threads)==set(DB_train.edge_list.distinct("_id")))
print("No_New_Roots",set(train_roots)==set(DB_train.trump_tweets.distinct("_id")))
print("No_New_Replies",set(train_replies)==set(DB_train.replies_to_trump.distinct("_id")))
######## Sets 'event' field in main DB to the root_id for that thread
if not DB_trump.edge_list.distinct('event'):
for thrd in list(DB_trump.edge_list.find()):
DB_trump.edge_list.update_one(
{'_id':thrd['_id']},
{'$set':
{'event':thrd['_id'],
'id_str':str(thrd['_id'])}
}
)
big_win_edge = list(DB_train.edge_list.find({'event':'Big_win_in_the_House'}))[0]
print(len(big_win_edge['edge_list'])) | mit |
rubikloud/scikit-learn | examples/ensemble/plot_adaboost_multiclass.py | 354 | 4124 | """
=====================================
Multi-class AdaBoosted Decision Trees
=====================================
This example reproduces Figure 1 of Zhu et al [1] and shows how boosting can
improve prediction accuracy on a multi-class problem. The classification
dataset is constructed by taking a ten-dimensional standard normal distribution
and defining three classes separated by nested concentric ten-dimensional
spheres such that roughly equal numbers of samples are in each class (quantiles
of the :math:`\chi^2` distribution).
The performance of the SAMME and SAMME.R [1] algorithms are compared. SAMME.R
uses the probability estimates to update the additive model, while SAMME uses
the classifications only. As the example illustrates, the SAMME.R algorithm
typically converges faster than SAMME, achieving a lower test error with fewer
boosting iterations. The error of each algorithm on the test set after each
boosting iteration is shown on the left, the classification error on the test
set of each tree is shown in the middle, and the boost weight of each tree is
shown on the right. All trees have a weight of one in the SAMME.R algorithm and
therefore are not shown.
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
from sklearn.externals.six.moves import zip
import matplotlib.pyplot as plt
from sklearn.datasets import make_gaussian_quantiles
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeClassifier
X, y = make_gaussian_quantiles(n_samples=13000, n_features=10,
n_classes=3, random_state=1)
n_split = 3000
X_train, X_test = X[:n_split], X[n_split:]
y_train, y_test = y[:n_split], y[n_split:]
bdt_real = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1)
bdt_discrete = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1.5,
algorithm="SAMME")
bdt_real.fit(X_train, y_train)
bdt_discrete.fit(X_train, y_train)
real_test_errors = []
discrete_test_errors = []
for real_test_predict, discrete_train_predict in zip(
bdt_real.staged_predict(X_test), bdt_discrete.staged_predict(X_test)):
real_test_errors.append(
1. - accuracy_score(real_test_predict, y_test))
discrete_test_errors.append(
1. - accuracy_score(discrete_train_predict, y_test))
n_trees_discrete = len(bdt_discrete)
n_trees_real = len(bdt_real)
# Boosting might terminate early, but the following arrays are always
# n_estimators long. We crop them to the actual number of trees here:
discrete_estimator_errors = bdt_discrete.estimator_errors_[:n_trees_discrete]
real_estimator_errors = bdt_real.estimator_errors_[:n_trees_real]
discrete_estimator_weights = bdt_discrete.estimator_weights_[:n_trees_discrete]
plt.figure(figsize=(15, 5))
plt.subplot(131)
plt.plot(range(1, n_trees_discrete + 1),
discrete_test_errors, c='black', label='SAMME')
plt.plot(range(1, n_trees_real + 1),
real_test_errors, c='black',
linestyle='dashed', label='SAMME.R')
plt.legend()
plt.ylim(0.18, 0.62)
plt.ylabel('Test Error')
plt.xlabel('Number of Trees')
plt.subplot(132)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_errors,
"b", label='SAMME', alpha=.5)
plt.plot(range(1, n_trees_real + 1), real_estimator_errors,
"r", label='SAMME.R', alpha=.5)
plt.legend()
plt.ylabel('Error')
plt.xlabel('Number of Trees')
plt.ylim((.2,
max(real_estimator_errors.max(),
discrete_estimator_errors.max()) * 1.2))
plt.xlim((-20, len(bdt_discrete) + 20))
plt.subplot(133)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_weights,
"b", label='SAMME')
plt.legend()
plt.ylabel('Weight')
plt.xlabel('Number of Trees')
plt.ylim((0, discrete_estimator_weights.max() * 1.2))
plt.xlim((-20, n_trees_discrete + 20))
# prevent overlapping y-axis labels
plt.subplots_adjust(wspace=0.25)
plt.show()
| bsd-3-clause |
wschenck/nest-simulator | pynest/examples/spatial/connex.py | 20 | 2341 | # -*- coding: utf-8 -*-
#
# connex.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Connect with circular mask, flat probability using 2 populations of iaf_psc_alpha neurons
-------------------------------------------------------------------------------------------
Create two populations on a 30x30 grid of iaf_psc_alpha neurons,
connect with circular mask, flat probability,
visualize.
BCCN Tutorial @ CNS*09
Hans Ekkehard Plesser, UMB
"""
import nest
import matplotlib.pyplot as plt
import numpy as np
nest.ResetKernel()
pos = nest.spatial.grid(shape=[30, 30], extent=[3., 3.])
#######################################################
# create and connect two populations
a = nest.Create('iaf_psc_alpha', positions=pos)
b = nest.Create('iaf_psc_alpha', positions=pos)
cdict = {'rule': 'pairwise_bernoulli',
'p': 0.5,
'mask': {'circular': {'radius': 0.5}}}
nest.Connect(a, b,
conn_spec=cdict,
syn_spec={'weight': nest.random.uniform(0.5, 2.)})
#################################################################
# plot targets of neurons in different grid locations
# first, clear existing figure, get current figure
plt.clf()
fig = plt.gcf()
# plot targets of two source neurons into same figure, with mask
for src_index in [30 * 15 + 15, 0]:
# obtain node id for center
src = a[src_index:src_index + 1]
nest.PlotTargets(src, b, mask=cdict['mask'], fig=fig)
# beautify
plt.axes().set_xticks(np.arange(-1.5, 1.55, 0.5))
plt.axes().set_yticks(np.arange(-1.5, 1.55, 0.5))
plt.grid(True)
plt.axis([-2.0, 2.0, -2.0, 2.0])
plt.axes().set_aspect('equal', 'box')
plt.title('Connection targets')
plt.show()
# plt.savefig('connex.pdf')
| gpl-2.0 |
meduz/scikit-learn | examples/model_selection/plot_roc.py | 102 | 5056 | """
=======================================
Receiver Operating Characteristic (ROC)
=======================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
Multiclass settings
-------------------
ROC curves are typically used in binary classification to study the output of
a classifier. In order to extend ROC curve and ROC area to multi-class
or multi-label classification, it is necessary to binarize the output. One ROC
curve can be drawn per label, but one can also draw a ROC curve by considering
each element of the label indicator matrix as a binary prediction
(micro-averaging).
Another evaluation measure for multi-class classification is
macro-averaging, which gives equal weight to the classification of each
label.
.. note::
See also :func:`sklearn.metrics.roc_auc_score`,
:ref:`sphx_glr_auto_examples_model_selection_plot_roc_crossval.py`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from itertools import cycle
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
from scipy import interp
# Import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features to make the problem harder
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# shuffle and split training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
# Learn to predict each class against the other
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
random_state=random_state))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
##############################################################################
# Plot of a ROC curve for a specific class
plt.figure()
lw = 2
plt.plot(fpr[2], tpr[2], color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc[2])
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
##############################################################################
# Plot ROC curves for the multiclass problem
# Compute macro-average ROC curve and ROC area
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
# Plot all ROC curves
plt.figure()
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]),
color='deeppink', linestyle=':', linewidth=4)
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]),
color='navy', linestyle=':', linewidth=4)
colors = cycle(['aqua', 'darkorange', 'cornflowerblue'])
for i, color in zip(range(n_classes), colors):
plt.plot(fpr[i], tpr[i], color=color, lw=lw,
label='ROC curve of class {0} (area = {1:0.2f})'
''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--', lw=lw)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Some extension of Receiver operating characteristic to multi-class')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
ch3ll0v3k/scikit-learn | examples/ensemble/plot_gradient_boosting_regularization.py | 355 | 2843 | """
================================
Gradient Boosting regularization
================================
Illustration of the effect of different regularization strategies
for Gradient Boosting. The example is taken from Hastie et al 2009.
The loss function used is binomial deviance. Regularization via
shrinkage (``learning_rate < 1.0``) improves performance considerably.
In combination with shrinkage, stochastic gradient boosting
(``subsample < 1.0``) can produce more accurate models by reducing the
variance via bagging.
Subsampling without shrinkage usually does poorly.
Another strategy to reduce the variance is by subsampling the features
analogous to the random splits in Random Forests
(via the ``max_features`` parameter).
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X = X.astype(np.float32)
# map labels from {-1, 1} to {0, 1}
labels, y = np.unique(y, return_inverse=True)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
original_params = {'n_estimators': 1000, 'max_leaf_nodes': 4, 'max_depth': None, 'random_state': 2,
'min_samples_split': 5}
plt.figure()
for label, color, setting in [('No shrinkage', 'orange',
{'learning_rate': 1.0, 'subsample': 1.0}),
('learning_rate=0.1', 'turquoise',
{'learning_rate': 0.1, 'subsample': 1.0}),
('subsample=0.5', 'blue',
{'learning_rate': 1.0, 'subsample': 0.5}),
('learning_rate=0.1, subsample=0.5', 'gray',
{'learning_rate': 0.1, 'subsample': 0.5}),
('learning_rate=0.1, max_features=2', 'magenta',
{'learning_rate': 0.1, 'max_features': 2})]:
params = dict(original_params)
params.update(setting)
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
# compute test set deviance
test_deviance = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
# clf.loss_ assumes that y_test[i] in {0, 1}
test_deviance[i] = clf.loss_(y_test, y_pred)
plt.plot((np.arange(test_deviance.shape[0]) + 1)[::5], test_deviance[::5],
'-', color=color, label=label)
plt.legend(loc='upper left')
plt.xlabel('Boosting Iterations')
plt.ylabel('Test Set Deviance')
plt.show()
| bsd-3-clause |
schinmayee/object-tracking | src/plot_errors.py | 2 | 3422 | #!/usr/bin/env python
import argparse, os
import numpy as np
import pickle
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser("Plot errors")
parser.add_argument('--config', dest='config', type=str, default='config.txt',
help='config file containing error directories'
'relative to config file')
parser.add_argument('--output', dest='output_dir', type=str,
help='Output directory')
args = parser.parse_args()
if not args.output_dir:
print('Output directory name required')
parser.print_help()
exit(1)
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
assert(os.path.isdir(args.output_dir))
# read configuration for plot
config_dir = os.path.dirname(os.path.realpath(args.config))
error_names = list()
error_dirs = list()
with open(args.config) as config:
for line in config.readlines():
data = line.split()
if len(data) != 2:
continue
error_names.append(data[0])
error_dirs.append(data[1])
N = len(error_names)
# read errors
mean = list()
dev = list()
for dir_name in error_dirs:
file_name = os.path.join(config_dir, dir_name, 'position_error')
assert(os.path.isfile(file_name))
data = open(file_name)
errors = pickle.load(data)
data.close()
#errors = errors[0:100]
mean.append(np.mean(errors))
dev.append(np.std(errors))
#print(mean)
#print(dev)
ind = np.arange(N) # the x locations for the groups
width = 0.6 # the width of the bars
fig, ax = plt.subplots()
rects = ax.bar(ind, mean, width, color='k', yerr=dev, align='center')
# add some text for labels, title and axes ticks
ax.set_ylabel('Mean error in position estimate')
ax.set_ylim([0.8*min(mean),1.2*max(mean)])
ax.set_title('Error in Position')
ax.set_xticks(ind)
ax.set_xticklabels(error_names)
def autolabel(rects):
# attach some text labels
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2., 1.05*height,
'%.04f' % height,
ha='center', va='bottom')
autolabel(rects)
plt.savefig(os.path.join(args.output_dir, 'position_error.png'))
'''
Relative position error does not make much sense as computed below.
'''
'''
# read errors
mean = list()
dev = list()
for dir_name in error_dirs:
file_name = os.path.join(config_dir, dir_name, 'pos_rel_error')
assert(os.path.isfile(file_name))
data = open(file_name)
errors = pickle.load(data)
data.close()
#errors = errors[0:100]
mean.append(np.mean(errors))
dev.append(np.std(errors))
#print(mean)
#print(dev)
ind = np.arange(N) # the x locations for the groups
width = 0.6 # the width of the bars
fig, ax = plt.subplots()
rects = ax.bar(ind, mean, width, color='k', yerr=dev, align='center')
# add some text for labels, title and axes ticks
ax.set_ylabel('Mean Relative Position Error')
ax.set_ylim([0.8*min(mean),1.2*max(mean)])
ax.set_title('Relative Position Error')
ax.set_xticks(ind)
ax.set_xticklabels(error_names)
def autolabel(rects):
# attach some text labels
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2., 1.05*height,
'%.04f' % height,
ha='center', va='bottom')
autolabel(rects)
plt.savefig(os.path.join(args.output_dir, 'position_rel_error.png'))
'''
| mit |
kevin-intel/scikit-learn | sklearn/cluster/tests/test_k_means.py | 2 | 44252 | """Testing for K-means"""
import re
import sys
import numpy as np
from scipy import sparse as sp
from threadpoolctl import threadpool_limits
import pytest
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_allclose
from sklearn.utils.fixes import _astype_copy_false
from sklearn.base import clone
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils.extmath import row_norms
from sklearn.metrics import pairwise_distances
from sklearn.metrics import pairwise_distances_argmin
from sklearn.metrics.cluster import v_measure_score
from sklearn.cluster import KMeans, k_means, kmeans_plusplus
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster._kmeans import _labels_inertia
from sklearn.cluster._kmeans import _mini_batch_step
from sklearn.cluster._k_means_common import _relocate_empty_clusters_dense
from sklearn.cluster._k_means_common import _relocate_empty_clusters_sparse
from sklearn.cluster._k_means_common import _euclidean_dense_dense_wrapper
from sklearn.cluster._k_means_common import _euclidean_sparse_dense_wrapper
from sklearn.cluster._k_means_common import _inertia_dense
from sklearn.cluster._k_means_common import _inertia_sparse
from sklearn.datasets import make_blobs
from io import StringIO
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 100
n_clusters, n_features = centers.shape
X, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
X_csr = sp.csr_matrix(X)
@pytest.mark.parametrize("array_constr", [np.array, sp.csr_matrix],
ids=["dense", "sparse"])
@pytest.mark.parametrize("algo", ["full", "elkan"])
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_kmeans_results(array_constr, algo, dtype):
# Checks that KMeans works as intended on toy dataset by comparing with
# expected results computed by hand.
X = array_constr([[0, 0], [0.5, 0], [0.5, 1], [1, 1]], dtype=dtype)
sample_weight = [3, 1, 1, 3]
init_centers = np.array([[0, 0], [1, 1]], dtype=dtype)
expected_labels = [0, 0, 1, 1]
expected_inertia = 0.375
expected_centers = np.array([[0.125, 0], [0.875, 1]], dtype=dtype)
expected_n_iter = 2
kmeans = KMeans(n_clusters=2, n_init=1, init=init_centers, algorithm=algo)
kmeans.fit(X, sample_weight=sample_weight)
assert_array_equal(kmeans.labels_, expected_labels)
assert_allclose(kmeans.inertia_, expected_inertia)
assert_allclose(kmeans.cluster_centers_, expected_centers)
assert kmeans.n_iter_ == expected_n_iter
@pytest.mark.parametrize("array_constr", [np.array, sp.csr_matrix],
ids=['dense', 'sparse'])
@pytest.mark.parametrize("algo", ['full', 'elkan'])
def test_kmeans_relocated_clusters(array_constr, algo):
# check that empty clusters are relocated as expected
X = array_constr([[0, 0], [0.5, 0], [0.5, 1], [1, 1]])
# second center too far from others points will be empty at first iter
init_centers = np.array([[0.5, 0.5], [3, 3]])
expected_labels = [0, 0, 1, 1]
expected_inertia = 0.25
expected_centers = [[0.25, 0], [0.75, 1]]
expected_n_iter = 3
kmeans = KMeans(n_clusters=2, n_init=1, init=init_centers, algorithm=algo)
kmeans.fit(X)
assert_array_equal(kmeans.labels_, expected_labels)
assert_allclose(kmeans.inertia_, expected_inertia)
assert_allclose(kmeans.cluster_centers_, expected_centers)
assert kmeans.n_iter_ == expected_n_iter
@pytest.mark.parametrize("array_constr", [np.array, sp.csr_matrix],
ids=["dense", "sparse"])
def test_relocate_empty_clusters(array_constr):
# test for the _relocate_empty_clusters_(dense/sparse) helpers
# Synthetic dataset with 3 obvious clusters of different sizes
X = np.array(
[-10., -9.5, -9, -8.5, -8, -1, 1, 9, 9.5, 10]).reshape(-1, 1)
X = array_constr(X)
sample_weight = np.ones(10)
# centers all initialized to the first point of X
centers_old = np.array([-10., -10, -10]).reshape(-1, 1)
# With this initialization, all points will be assigned to the first center
# At this point a center in centers_new is the weighted sum of the points
# it contains if it's not empty, otherwise it is the same as before.
centers_new = np.array([-16.5, -10, -10]).reshape(-1, 1)
weight_in_clusters = np.array([10., 0, 0])
labels = np.zeros(10, dtype=np.int32)
if array_constr is np.array:
_relocate_empty_clusters_dense(X, sample_weight, centers_old,
centers_new, weight_in_clusters, labels)
else:
_relocate_empty_clusters_sparse(X.data, X.indices, X.indptr,
sample_weight, centers_old,
centers_new, weight_in_clusters,
labels)
# The relocation scheme will take the 2 points farthest from the center and
# assign them to the 2 empty clusters, i.e. points at 10 and at 9.9. The
# first center will be updated to contain the other 8 points.
assert_array_equal(weight_in_clusters, [8, 1, 1])
assert_allclose(centers_new, [[-36], [10], [9.5]])
@pytest.mark.parametrize("distribution", ["normal", "blobs"])
@pytest.mark.parametrize("array_constr", [np.array, sp.csr_matrix],
ids=["dense", "sparse"])
@pytest.mark.parametrize("tol", [1e-2, 1e-8, 1e-100, 0])
def test_kmeans_elkan_results(distribution, array_constr, tol):
# Check that results are identical between lloyd and elkan algorithms
rnd = np.random.RandomState(0)
if distribution == "normal":
X = rnd.normal(size=(5000, 10))
else:
X, _ = make_blobs(random_state=rnd)
X[X < 0] = 0
X = array_constr(X)
km_full = KMeans(algorithm="full", n_clusters=5,
random_state=0, n_init=1, tol=tol)
km_elkan = KMeans(algorithm="elkan", n_clusters=5,
random_state=0, n_init=1, tol=tol)
km_full.fit(X)
km_elkan.fit(X)
assert_allclose(km_elkan.cluster_centers_, km_full.cluster_centers_)
assert_array_equal(km_elkan.labels_, km_full.labels_)
assert km_elkan.n_iter_ == km_full.n_iter_
assert km_elkan.inertia_ == pytest.approx(km_full.inertia_, rel=1e-6)
@pytest.mark.parametrize("algorithm", ["full", "elkan"])
def test_kmeans_convergence(algorithm):
# Check that KMeans stops when convergence is reached when tol=0. (#16075)
rnd = np.random.RandomState(0)
X = rnd.normal(size=(5000, 10))
max_iter = 300
km = KMeans(algorithm=algorithm, n_clusters=5, random_state=0,
n_init=1, tol=0, max_iter=max_iter).fit(X)
assert km.n_iter_ < max_iter
def test_minibatch_update_consistency():
# Check that dense and sparse minibatch update give the same results
rng = np.random.RandomState(42)
centers_old = centers + rng.normal(size=centers.shape)
centers_old_csr = centers_old.copy()
centers_new = np.zeros_like(centers_old)
centers_new_csr = np.zeros_like(centers_old_csr)
weight_sums = np.zeros(centers_old.shape[0], dtype=X.dtype)
weight_sums_csr = np.zeros(centers_old.shape[0], dtype=X.dtype)
x_squared_norms = (X ** 2).sum(axis=1)
x_squared_norms_csr = row_norms(X_csr, squared=True)
sample_weight = np.ones(X.shape[0], dtype=X.dtype)
# extract a small minibatch
X_mb = X[:10]
X_mb_csr = X_csr[:10]
x_mb_squared_norms = x_squared_norms[:10]
x_mb_squared_norms_csr = x_squared_norms_csr[:10]
sample_weight_mb = sample_weight[:10]
# step 1: compute the dense minibatch update
old_inertia = _mini_batch_step(
X_mb, x_mb_squared_norms, sample_weight_mb, centers_old, centers_new,
weight_sums, np.random.RandomState(0), random_reassign=False)
assert old_inertia > 0.0
# compute the new inertia on the same batch to check that it decreased
labels, new_inertia = _labels_inertia(
X_mb, sample_weight_mb, x_mb_squared_norms, centers_new)
assert new_inertia > 0.0
assert new_inertia < old_inertia
# step 2: compute the sparse minibatch update
old_inertia_csr = _mini_batch_step(
X_mb_csr, x_mb_squared_norms_csr, sample_weight_mb, centers_old_csr,
centers_new_csr, weight_sums_csr, np.random.RandomState(0),
random_reassign=False)
assert old_inertia_csr > 0.0
# compute the new inertia on the same batch to check that it decreased
labels_csr, new_inertia_csr = _labels_inertia(
X_mb_csr, sample_weight_mb, x_mb_squared_norms_csr, centers_new_csr)
assert new_inertia_csr > 0.0
assert new_inertia_csr < old_inertia_csr
# step 3: check that sparse and dense updates lead to the same results
assert_array_equal(labels, labels_csr)
assert_allclose(centers_new, centers_new_csr)
assert_allclose(old_inertia, old_inertia_csr)
assert_allclose(new_inertia, new_inertia_csr)
def _check_fitted_model(km):
# check that the number of clusters centers and distinct labels match
# the expectation
centers = km.cluster_centers_
assert centers.shape == (n_clusters, n_features)
labels = km.labels_
assert np.unique(labels).shape[0] == n_clusters
# check that the labels assignment are perfect (up to a permutation)
assert_allclose(v_measure_score(true_labels, labels), 1.0)
assert km.inertia_ > 0.0
@pytest.mark.parametrize("data", [X, X_csr], ids=["dense", "sparse"])
@pytest.mark.parametrize("init", ["random", "k-means++", centers,
lambda X, k, random_state: centers],
ids=["random", "k-means++", "ndarray", "callable"])
@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans])
def test_all_init(Estimator, data, init):
# Check KMeans and MiniBatchKMeans with all possible init.
n_init = 10 if isinstance(init, str) else 1
km = Estimator(init=init, n_clusters=n_clusters, random_state=42,
n_init=n_init).fit(data)
_check_fitted_model(km)
@pytest.mark.parametrize("init", ["random", "k-means++", centers,
lambda X, k, random_state: centers],
ids=["random", "k-means++", "ndarray", "callable"])
def test_minibatch_kmeans_partial_fit_init(init):
# Check MiniBatchKMeans init with partial_fit
n_init = 10 if isinstance(init, str) else 1
km = MiniBatchKMeans(init=init, n_clusters=n_clusters, random_state=0,
n_init=n_init)
for i in range(100):
# "random" init requires many batches to recover the true labels.
km.partial_fit(X)
_check_fitted_model(km)
@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans])
def test_fortran_aligned_data(Estimator):
# Check that KMeans works with fortran-aligned data.
X_fortran = np.asfortranarray(X)
centers_fortran = np.asfortranarray(centers)
km_c = Estimator(n_clusters=n_clusters, init=centers, n_init=1,
random_state=42).fit(X)
km_f = Estimator(n_clusters=n_clusters, init=centers_fortran, n_init=1,
random_state=42).fit(X_fortran)
assert_allclose(km_c.cluster_centers_, km_f.cluster_centers_)
assert_array_equal(km_c.labels_, km_f.labels_)
@pytest.mark.parametrize('algo', ['full', 'elkan'])
@pytest.mark.parametrize('dtype', [np.float32, np.float64])
@pytest.mark.parametrize('constructor', [np.asarray, sp.csr_matrix])
@pytest.mark.parametrize('seed, max_iter, tol', [
(0, 2, 1e-7), # strict non-convergence
(1, 2, 1e-1), # loose non-convergence
(3, 300, 1e-7), # strict convergence
(4, 300, 1e-1), # loose convergence
])
def test_k_means_fit_predict(algo, dtype, constructor, seed, max_iter, tol):
# check that fit.predict gives same result as fit_predict
# There's a very small chance of failure with elkan on unstructured dataset
# because predict method uses fast euclidean distances computation which
# may cause small numerical instabilities.
# NB: This test is largely redundant with respect to test_predict and
# test_predict_equal_labels. This test has the added effect of
# testing idempotence of the fittng procesdure which appears to
# be where it fails on some MacOS setups.
if sys.platform == "darwin":
pytest.xfail(
"Known failures on MacOS, See "
"https://github.com/scikit-learn/scikit-learn/issues/12644")
rng = np.random.RandomState(seed)
X = make_blobs(n_samples=1000, n_features=10, centers=10,
random_state=rng)[0].astype(dtype, copy=False)
X = constructor(X)
kmeans = KMeans(algorithm=algo, n_clusters=10, random_state=seed,
tol=tol, max_iter=max_iter)
labels_1 = kmeans.fit(X).predict(X)
labels_2 = kmeans.fit_predict(X)
# Due to randomness in the order in which chunks of data are processed when
# using more than one thread, the absolute values of the labels can be
# different between the 2 strategies but they should correspond to the same
# clustering.
assert v_measure_score(labels_1, labels_2) == pytest.approx(1, abs=1e-15)
def test_minibatch_kmeans_verbose():
# Check verbose mode of MiniBatchKMeans for better coverage.
km = MiniBatchKMeans(n_clusters=n_clusters, random_state=42, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
km.fit(X)
finally:
sys.stdout = old_stdout
@pytest.mark.parametrize("algorithm", ["full", "elkan"])
@pytest.mark.parametrize("tol", [1e-2, 0])
def test_kmeans_verbose(algorithm, tol, capsys):
# Check verbose mode of KMeans for better coverage.
X = np.random.RandomState(0).normal(size=(5000, 10))
KMeans(algorithm=algorithm, n_clusters=n_clusters, random_state=42,
init="random", n_init=1, tol=tol, verbose=1).fit(X)
captured = capsys.readouterr()
assert re.search(r"Initialization complete", captured.out)
assert re.search(r"Iteration [0-9]+, inertia", captured.out)
if tol == 0:
assert re.search(r"strict convergence", captured.out)
else:
assert re.search(r"center shift .* within tolerance", captured.out)
def test_minibatch_kmeans_warning_init_size():
# Check that a warning is raised when init_size is smaller than n_clusters
with pytest.warns(RuntimeWarning,
match=r"init_size.* should be larger than n_clusters"):
MiniBatchKMeans(init_size=10, n_clusters=20).fit(X)
@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans])
def test_warning_n_init_precomputed_centers(Estimator):
# Check that a warning is raised when n_init > 1 and an array is passed for
# the init parameter.
with pytest.warns(RuntimeWarning,
match="Explicit initial center position passed: "
"performing only one init"):
Estimator(init=centers, n_clusters=n_clusters, n_init=10).fit(X)
def test_minibatch_sensible_reassign():
# check that identical initial clusters are reassigned
# also a regression test for when there are more desired reassignments than
# samples.
zeroed_X, true_labels = make_blobs(n_samples=100, centers=5,
random_state=42)
zeroed_X[::2, :] = 0
km = MiniBatchKMeans(n_clusters=20, batch_size=10, random_state=42,
init="random").fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert km.cluster_centers_.any(axis=1).sum() > 10
# do the same with batch-size > X.shape[0] (regression test)
km = MiniBatchKMeans(n_clusters=20, batch_size=200, random_state=42,
init="random").fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert km.cluster_centers_.any(axis=1).sum() > 10
# do the same with partial_fit API
km = MiniBatchKMeans(n_clusters=20, random_state=42, init="random")
for i in range(100):
km.partial_fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert km.cluster_centers_.any(axis=1).sum() > 10
@pytest.mark.parametrize("data", [X, X_csr], ids=["dense", "sparse"])
def test_minibatch_reassign(data):
# Check the reassignment part of the minibatch step with very high or very
# low reassignment ratio.
perfect_centers = np.empty((n_clusters, n_features))
for i in range(n_clusters):
perfect_centers[i] = X[true_labels == i].mean(axis=0)
x_squared_norms = row_norms(data, squared=True)
sample_weight = np.ones(n_samples)
centers_new = np.empty_like(perfect_centers)
# Give a perfect initialization, but a large reassignment_ratio, as a
# result many centers should be reassigned and the model should no longer
# be good
score_before = - _labels_inertia(data, sample_weight, x_squared_norms,
perfect_centers, 1)[1]
_mini_batch_step(data, x_squared_norms, sample_weight, perfect_centers,
centers_new, np.zeros(n_clusters),
np.random.RandomState(0), random_reassign=True,
reassignment_ratio=1)
score_after = - _labels_inertia(data, sample_weight, x_squared_norms,
centers_new, 1)[1]
assert score_before > score_after
# Give a perfect initialization, with a small reassignment_ratio,
# no center should be reassigned.
_mini_batch_step(data, x_squared_norms, sample_weight, perfect_centers,
centers_new, np.zeros(n_clusters),
np.random.RandomState(0), random_reassign=True,
reassignment_ratio=1e-15)
assert_allclose(centers_new, perfect_centers)
def test_minibatch_with_many_reassignments():
# Test for the case that the number of clusters to reassign is bigger
# than the batch_size. Run the test with 100 clusters and a batch_size of
# 10 because it turned out that these values ensure that the number of
# clusters to reassign is always bigger than the batch_size.
MiniBatchKMeans(n_clusters=100,
batch_size=10,
init_size=n_samples,
random_state=42,
verbose=True).fit(X)
def test_minibatch_kmeans_init_size():
# Check the internal _init_size attribute of MiniBatchKMeans
# default init size should be 3 * batch_size
km = MiniBatchKMeans(n_clusters=10, batch_size=5, n_init=1).fit(X)
assert km._init_size == 15
# if 3 * batch size < n_clusters, it should then be 3 * n_clusters
km = MiniBatchKMeans(n_clusters=10, batch_size=1, n_init=1).fit(X)
assert km._init_size == 30
# it should not be larger than n_samples
km = MiniBatchKMeans(n_clusters=10, batch_size=5, n_init=1,
init_size=n_samples + 1).fit(X)
assert km._init_size == n_samples
@pytest.mark.parametrize("tol, max_no_improvement", [(1e-4, None), (0, 10)])
def test_minibatch_declared_convergence(capsys, tol, max_no_improvement):
# Check convergence detection based on ewa batch inertia or on
# small center change.
X, _, centers = make_blobs(centers=3, random_state=0, return_centers=True)
km = MiniBatchKMeans(n_clusters=3, init=centers, batch_size=20, tol=tol,
random_state=0, max_iter=10, n_init=1, verbose=1,
max_no_improvement=max_no_improvement)
km.fit(X)
assert 1 < km.n_iter_ < 10
captured = capsys.readouterr()
if max_no_improvement is None:
assert "Converged (small centers change)" in captured.out
if tol == 0:
assert "Converged (lack of improvement in inertia)" in captured.out
def test_minibatch_iter_steps():
# Check consistency of n_iter_ and n_steps_ attributes.
batch_size = 30
n_samples = X.shape[0]
km = MiniBatchKMeans(n_clusters=3, batch_size=batch_size,
random_state=0).fit(X)
# n_iter_ is the number of started epochs
assert km.n_iter_ == np.ceil((km.n_steps_ * batch_size) / n_samples)
assert isinstance(km.n_iter_, int)
# without stopping condition, max_iter should be reached
km = MiniBatchKMeans(n_clusters=3, batch_size=batch_size, random_state=0,
tol=0, max_no_improvement=None, max_iter=10).fit(X)
assert km.n_iter_ == 10
assert km.n_steps_ == (10 * n_samples) // batch_size
assert isinstance(km.n_steps_, int)
def test_kmeans_copyx():
# Check that copy_x=False returns nearly equal X after de-centering.
my_X = X.copy()
km = KMeans(copy_x=False, n_clusters=n_clusters, random_state=42)
km.fit(my_X)
_check_fitted_model(km)
# check that my_X is de-centered
assert_allclose(my_X, X)
@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans])
def test_score_max_iter(Estimator):
# Check that fitting KMeans or MiniBatchKMeans with more iterations gives
# better score
X = np.random.RandomState(0).randn(100, 10)
km1 = Estimator(n_init=1, random_state=42, max_iter=1)
s1 = km1.fit(X).score(X)
km2 = Estimator(n_init=1, random_state=42, max_iter=10)
s2 = km2.fit(X).score(X)
assert s2 > s1
@pytest.mark.parametrize("array_constr", [np.array, sp.csr_matrix],
ids=["dense", "sparse"])
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
@pytest.mark.parametrize("init", ["random", "k-means++"])
@pytest.mark.parametrize("Estimator, algorithm", [
(KMeans, "full"),
(KMeans, "elkan"),
(MiniBatchKMeans, None)
])
def test_predict(Estimator, algorithm, init, dtype, array_constr):
# Check the predict method and the equivalence between fit.predict and
# fit_predict.
# There's a very small chance of failure with elkan on unstructured dataset
# because predict method uses fast euclidean distances computation which
# may cause small numerical instabilities.
if sys.platform == "darwin":
pytest.xfail(
"Known failures on MacOS, See "
"https://github.com/scikit-learn/scikit-learn/issues/12644")
X, _ = make_blobs(n_samples=500, n_features=10, centers=10, random_state=0)
X = array_constr(X)
# With n_init = 1
km = Estimator(n_clusters=10, init=init, n_init=1, random_state=0)
if algorithm is not None:
km.set_params(algorithm=algorithm)
km.fit(X)
labels = km.labels_
# re-predict labels for training set using predict
pred = km.predict(X)
assert_array_equal(pred, labels)
# re-predict labels for training set using fit_predict
pred = km.fit_predict(X)
assert_array_equal(pred, labels)
# predict centroid labels
pred = km.predict(km.cluster_centers_)
assert_array_equal(pred, np.arange(10))
# With n_init > 1
# Due to randomness in the order in which chunks of data are processed when
# using more than one thread, there might be different rounding errors for
# the computation of the inertia between 2 runs. This might result in a
# different ranking of 2 inits, hence a different labeling, even if they
# give the same clustering. We only check the labels up to a permutation.
km = Estimator(n_clusters=10, init=init, n_init=10, random_state=0)
if algorithm is not None:
km.set_params(algorithm=algorithm)
km.fit(X)
labels = km.labels_
# re-predict labels for training set using predict
pred = km.predict(X)
assert_allclose(v_measure_score(pred, labels), 1)
# re-predict labels for training set using fit_predict
pred = km.fit_predict(X)
assert_allclose(v_measure_score(pred, labels), 1)
# predict centroid labels
pred = km.predict(km.cluster_centers_)
assert_allclose(v_measure_score(pred, np.arange(10)), 1)
@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans])
def test_dense_sparse(Estimator):
# Check that the results are the same for dense and sparse input.
sample_weight = np.random.RandomState(0).random_sample((n_samples,))
km_dense = Estimator(n_clusters=n_clusters, random_state=0, n_init=1)
km_dense.fit(X, sample_weight=sample_weight)
km_sparse = Estimator(n_clusters=n_clusters, random_state=0, n_init=1)
km_sparse.fit(X_csr, sample_weight=sample_weight)
assert_array_equal(km_dense.labels_, km_sparse.labels_)
assert_allclose(km_dense.cluster_centers_, km_sparse.cluster_centers_)
@pytest.mark.parametrize("init", ["random", "k-means++", centers],
ids=["random", "k-means++", "ndarray"])
@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans])
def test_predict_dense_sparse(Estimator, init):
# check that models trained on sparse input also works for dense input at
# predict time and vice versa.
n_init = 10 if isinstance(init, str) else 1
km = Estimator(n_clusters=n_clusters, init=init, n_init=n_init,
random_state=0)
km.fit(X_csr)
assert_array_equal(km.predict(X), km.labels_)
km.fit(X)
assert_array_equal(km.predict(X_csr), km.labels_)
@pytest.mark.parametrize("array_constr", [np.array, sp.csr_matrix],
ids=["dense", "sparse"])
@pytest.mark.parametrize("dtype", [np.int32, np.int64])
@pytest.mark.parametrize("init", ["k-means++", "ndarray"])
@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans])
def test_integer_input(Estimator, array_constr, dtype, init):
# Check that KMeans and MiniBatchKMeans work with integer input.
X_dense = np.array([[0, 0], [10, 10], [12, 9], [-1, 1], [2, 0], [8, 10]])
X = array_constr(X_dense, dtype=dtype)
n_init = 1 if init == "ndarray" else 10
init = X_dense[:2] if init == "ndarray" else init
km = Estimator(n_clusters=2, init=init, n_init=n_init, random_state=0)
if Estimator is MiniBatchKMeans:
km.set_params(batch_size=2)
km.fit(X)
# Internally integer input should be converted to float64
assert km.cluster_centers_.dtype == np.float64
expected_labels = [0, 1, 1, 0, 0, 1]
assert_allclose(v_measure_score(km.labels_, expected_labels), 1)
# Same with partial_fit (#14314)
if Estimator is MiniBatchKMeans:
km = clone(km).partial_fit(X)
assert km.cluster_centers_.dtype == np.float64
@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans])
def test_transform(Estimator):
# Check the transform method
km = Estimator(n_clusters=n_clusters).fit(X)
# Transorfming cluster_centers_ should return the pairwise distances
# between centers
Xt = km.transform(km.cluster_centers_)
assert_allclose(Xt, pairwise_distances(km.cluster_centers_))
# In particular, diagonal must be 0
assert_array_equal(Xt.diagonal(), np.zeros(n_clusters))
# Transorfming X should return the pairwise distances between X and the
# centers
Xt = km.transform(X)
assert_allclose(Xt, pairwise_distances(X, km.cluster_centers_))
@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans])
def test_fit_transform(Estimator):
# Check equivalence between fit.transform and fit_transform
X1 = Estimator(random_state=0, n_init=1).fit(X).transform(X)
X2 = Estimator(random_state=0, n_init=1).fit_transform(X)
assert_allclose(X1, X2)
def test_n_init():
# Check that increasing the number of init increases the quality
previous_inertia = np.inf
for n_init in [1, 5, 10]:
# set max_iter=1 to avoid finding the global minimum and get the same
# inertia each time
km = KMeans(n_clusters=n_clusters, init="random", n_init=n_init,
random_state=0, max_iter=1).fit(X)
assert km.inertia_ <= previous_inertia
def test_k_means_function():
# test calling the k_means function directly
cluster_centers, labels, inertia = k_means(X, n_clusters=n_clusters,
sample_weight=None)
assert cluster_centers.shape == (n_clusters, n_features)
assert np.unique(labels).shape[0] == n_clusters
# check that the labels assignment are perfect (up to a permutation)
assert_allclose(v_measure_score(true_labels, labels), 1.0)
assert inertia > 0.0
@pytest.mark.parametrize("data", [X, X_csr], ids=["dense", "sparse"])
@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans])
def test_float_precision(Estimator, data):
# Check that the results are the same for single and double precision.
km = Estimator(n_init=1, random_state=0)
inertia = {}
Xt = {}
centers = {}
labels = {}
for dtype in [np.float64, np.float32]:
X = data.astype(dtype, **_astype_copy_false(data))
km.fit(X)
inertia[dtype] = km.inertia_
Xt[dtype] = km.transform(X)
centers[dtype] = km.cluster_centers_
labels[dtype] = km.labels_
# dtype of cluster centers has to be the dtype of the input data
assert km.cluster_centers_.dtype == dtype
# same with partial_fit
if Estimator is MiniBatchKMeans:
km.partial_fit(X[0:3])
assert km.cluster_centers_.dtype == dtype
# compare arrays with low precision since the difference between 32 and
# 64 bit comes from an accumulation of rounding errors.
assert_allclose(inertia[np.float32], inertia[np.float64], rtol=1e-5)
assert_allclose(Xt[np.float32], Xt[np.float64], rtol=1e-5)
assert_allclose(centers[np.float32], centers[np.float64], rtol=1e-5)
assert_array_equal(labels[np.float32], labels[np.float64])
@pytest.mark.parametrize("dtype", [np.int32, np.int64, np.float32, np.float64])
@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans])
def test_centers_not_mutated(Estimator, dtype):
# Check that KMeans and MiniBatchKMeans won't mutate the user provided
# init centers silently even if input data and init centers have the same
# type.
X_new_type = X.astype(dtype, copy=False)
centers_new_type = centers.astype(dtype, copy=False)
km = Estimator(init=centers_new_type, n_clusters=n_clusters, n_init=1)
km.fit(X_new_type)
assert not np.may_share_memory(km.cluster_centers_, centers_new_type)
@pytest.mark.parametrize("data", [X, X_csr], ids=["dense", "sparse"])
def test_kmeans_init_fitted_centers(data):
# Check that starting fitting from a local optimum shouldn't change the
# solution
km1 = KMeans(n_clusters=n_clusters).fit(data)
km2 = KMeans(n_clusters=n_clusters, init=km1.cluster_centers_,
n_init=1).fit(data)
assert_allclose(km1.cluster_centers_, km2.cluster_centers_)
def test_kmeans_warns_less_centers_than_unique_points():
# Check KMeans when the number of found clusters is smaller than expected
X = np.asarray([[0, 0],
[0, 1],
[1, 0],
[1, 0]]) # last point is duplicated
km = KMeans(n_clusters=4)
# KMeans should warn that fewer labels than cluster centers have been used
msg = (r"Number of distinct clusters \(3\) found smaller than "
r"n_clusters \(4\). Possibly due to duplicate points in X.")
with pytest.warns(ConvergenceWarning, match=msg):
km.fit(X)
# only three distinct points, so only three clusters
# can have points assigned to them
assert set(km.labels_) == set(range(3))
def _sort_centers(centers):
return np.sort(centers, axis=0)
def test_weighted_vs_repeated():
# Check that a sample weight of N should yield the same result as an N-fold
# repetition of the sample. Valid only if init is precomputed, otherwise
# rng produces different results. Not valid for MinibatchKMeans due to rng
# to extract minibatches.
sample_weight = np.random.RandomState(0).randint(1, 5, size=n_samples)
X_repeat = np.repeat(X, sample_weight, axis=0)
km = KMeans(init=centers, n_init=1, n_clusters=n_clusters, random_state=0)
km_weighted = clone(km).fit(X, sample_weight=sample_weight)
repeated_labels = np.repeat(km_weighted.labels_, sample_weight)
km_repeated = clone(km).fit(X_repeat)
assert_array_equal(km_repeated.labels_, repeated_labels)
assert_allclose(km_weighted.inertia_, km_repeated.inertia_)
assert_allclose(_sort_centers(km_weighted.cluster_centers_),
_sort_centers(km_repeated.cluster_centers_))
@pytest.mark.parametrize("data", [X, X_csr], ids=["dense", "sparse"])
@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans])
def test_unit_weights_vs_no_weights(Estimator, data):
# Check that not passing sample weights should be equivalent to passing
# sample weights all equal to one.
sample_weight = np.ones(n_samples)
km = Estimator(n_clusters=n_clusters, random_state=42, n_init=1)
km_none = clone(km).fit(data, sample_weight=None)
km_ones = clone(km).fit(data, sample_weight=sample_weight)
assert_array_equal(km_none.labels_, km_ones.labels_)
assert_allclose(km_none.cluster_centers_, km_ones.cluster_centers_)
@pytest.mark.parametrize("data", [X, X_csr], ids=["dense", "sparse"])
@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans])
def test_scaled_weights(Estimator, data):
# Check that scaling all sample weights by a common factor
# shouldn't change the result
sample_weight = np.random.RandomState(0).uniform(n_samples)
km = Estimator(n_clusters=n_clusters, random_state=42, n_init=1)
km_orig = clone(km).fit(data, sample_weight=sample_weight)
km_scaled = clone(km).fit(data, sample_weight=0.5 * sample_weight)
assert_array_equal(km_orig.labels_, km_scaled.labels_)
assert_allclose(km_orig.cluster_centers_, km_scaled.cluster_centers_)
def test_kmeans_elkan_iter_attribute():
# Regression test on bad n_iter_ value. Previous bug n_iter_ was one off
# it's right value (#11340).
km = KMeans(algorithm="elkan", max_iter=1).fit(X)
assert km.n_iter_ == 1
@pytest.mark.parametrize("array_constr", [np.array, sp.csr_matrix],
ids=["dense", "sparse"])
def test_kmeans_empty_cluster_relocated(array_constr):
# check that empty clusters are correctly relocated when using sample
# weights (#13486)
X = array_constr([[-1], [1]])
sample_weight = [1.9, 0.1]
init = np.array([[-1], [10]])
km = KMeans(n_clusters=2, init=init, n_init=1)
km.fit(X, sample_weight=sample_weight)
assert len(set(km.labels_)) == 2
assert_allclose(km.cluster_centers_, [[-1], [1]])
@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans])
def test_result_equal_in_diff_n_threads(Estimator):
# Check that KMeans/MiniBatchKMeans give the same results in parallel mode
# than in sequential mode.
rnd = np.random.RandomState(0)
X = rnd.normal(size=(50, 10))
with threadpool_limits(limits=1, user_api="openmp"):
result_1 = Estimator(
n_clusters=n_clusters, random_state=0).fit(X).labels_
with threadpool_limits(limits=2, user_api="openmp"):
result_2 = Estimator(
n_clusters=n_clusters, random_state=0).fit(X).labels_
assert_array_equal(result_1, result_2)
@pytest.mark.parametrize("attr", ["counts_", "init_size_", "random_state_"])
def test_minibatch_kmeans_deprecated_attributes(attr):
# check that we raise a deprecation warning when accessing `init_size_`
# FIXME: remove in 1.1
depr_msg = (f"The attribute '{attr}' is deprecated in 0.24 and will be "
f"removed in 1.1")
km = MiniBatchKMeans(n_clusters=2, n_init=1, init='random', random_state=0)
km.fit(X)
with pytest.warns(FutureWarning, match=depr_msg):
getattr(km, attr)
def test_warning_elkan_1_cluster():
# Check warning messages specific to KMeans
with pytest.warns(RuntimeWarning,
match="algorithm='elkan' doesn't make sense for a single"
" cluster"):
KMeans(n_clusters=1, algorithm="elkan").fit(X)
@pytest.mark.parametrize("array_constr", [np.array, sp.csr_matrix],
ids=["dense", "sparse"])
@pytest.mark.parametrize("algo", ["full", "elkan"])
def test_k_means_1_iteration(array_constr, algo):
# check the results after a single iteration (E-step M-step E-step) by
# comparing against a pure python implementation.
X = np.random.RandomState(0).uniform(size=(100, 5))
init_centers = X[:5]
X = array_constr(X)
def py_kmeans(X, init):
new_centers = init.copy()
labels = pairwise_distances_argmin(X, init)
for label in range(init.shape[0]):
new_centers[label] = X[labels == label].mean(axis=0)
labels = pairwise_distances_argmin(X, new_centers)
return labels, new_centers
py_labels, py_centers = py_kmeans(X, init_centers)
cy_kmeans = KMeans(n_clusters=5, n_init=1, init=init_centers,
algorithm=algo, max_iter=1).fit(X)
cy_labels = cy_kmeans.labels_
cy_centers = cy_kmeans.cluster_centers_
assert_array_equal(py_labels, cy_labels)
assert_allclose(py_centers, cy_centers)
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
@pytest.mark.parametrize("squared", [True, False])
def test_euclidean_distance(dtype, squared):
# Check that the _euclidean_(dense/sparse)_dense helpers produce correct
# results
rng = np.random.RandomState(0)
a_sparse = sp.random(1, 100, density=0.5, format="csr", random_state=rng,
dtype=dtype)
a_dense = a_sparse.toarray().reshape(-1)
b = rng.randn(100).astype(dtype, copy=False)
b_squared_norm = (b**2).sum()
expected = ((a_dense - b)**2).sum()
expected = expected if squared else np.sqrt(expected)
distance_dense_dense = _euclidean_dense_dense_wrapper(a_dense, b, squared)
distance_sparse_dense = _euclidean_sparse_dense_wrapper(
a_sparse.data, a_sparse.indices, b, b_squared_norm, squared)
assert_allclose(distance_dense_dense, distance_sparse_dense, rtol=1e-6)
assert_allclose(distance_dense_dense, expected, rtol=1e-6)
assert_allclose(distance_sparse_dense, expected, rtol=1e-6)
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_inertia(dtype):
# Check that the _inertia_(dense/sparse) helpers produce correct results.
rng = np.random.RandomState(0)
X_sparse = sp.random(100, 10, density=0.5, format="csr", random_state=rng,
dtype=dtype)
X_dense = X_sparse.toarray()
sample_weight = rng.randn(100).astype(dtype, copy=False)
centers = rng.randn(5, 10).astype(dtype, copy=False)
labels = rng.randint(5, size=100, dtype=np.int32)
distances = ((X_dense - centers[labels])**2).sum(axis=1)
expected = np.sum(distances * sample_weight)
inertia_dense = _inertia_dense(
X_dense, sample_weight, centers, labels, n_threads=1)
inertia_sparse = _inertia_sparse(
X_sparse, sample_weight, centers, labels, n_threads=1)
assert_allclose(inertia_dense, inertia_sparse, rtol=1e-6)
assert_allclose(inertia_dense, expected, rtol=1e-6)
assert_allclose(inertia_sparse, expected, rtol=1e-6)
@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans])
def test_sample_weight_unchanged(Estimator):
# Check that sample_weight is not modified in place by KMeans (#17204)
X = np.array([[1], [2], [4]])
sample_weight = np.array([0.5, 0.2, 0.3])
Estimator(n_clusters=2, random_state=0).fit(X, sample_weight=sample_weight)
assert_array_equal(sample_weight, np.array([0.5, 0.2, 0.3]))
@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans])
@pytest.mark.parametrize("param, match", [
({"n_init": 0}, r"n_init should be > 0"),
({"max_iter": 0}, r"max_iter should be > 0"),
({"n_clusters": n_samples + 1}, r"n_samples.* should be >= n_clusters"),
({"init": X[:2]},
r"The shape of the initial centers .* does not match "
r"the number of clusters"),
({"init": lambda X_, k, random_state: X_[:2]},
r"The shape of the initial centers .* does not match "
r"the number of clusters"),
({"init": X[:8, :2]},
r"The shape of the initial centers .* does not match "
r"the number of features of the data"),
({"init": lambda X_, k, random_state: X_[:8, :2]},
r"The shape of the initial centers .* does not match "
r"the number of features of the data"),
({"init": "wrong"},
r"init should be either 'k-means\+\+', 'random', "
r"a ndarray or a callable")]
)
def test_wrong_params(Estimator, param, match):
# Check that error are raised with clear error message when wrong values
# are passed for the parameters
# Set n_init=1 by default to avoid warning with precomputed init
km = Estimator(n_init=1)
with pytest.raises(ValueError, match=match):
km.set_params(**param).fit(X)
@pytest.mark.parametrize("param, match", [
({"algorithm": "wrong"}, r"Algorithm must be 'auto', 'full' or 'elkan'")]
)
def test_kmeans_wrong_params(param, match):
# Check that error are raised with clear error message when wrong values
# are passed for the KMeans specific parameters
with pytest.raises(ValueError, match=match):
KMeans(**param).fit(X)
@pytest.mark.parametrize("param, match", [
({"max_no_improvement": -1}, r"max_no_improvement should be >= 0"),
({"batch_size": -1}, r"batch_size should be > 0"),
({"init_size": -1}, r"init_size should be > 0"),
({"reassignment_ratio": -1}, r"reassignment_ratio should be >= 0")]
)
def test_minibatch_kmeans_wrong_params(param, match):
# Check that error are raised with clear error message when wrong values
# are passed for the MiniBatchKMeans specific parameters
with pytest.raises(ValueError, match=match):
MiniBatchKMeans(**param).fit(X)
@pytest.mark.parametrize("param, match", [
({"n_local_trials": 0},
r"n_local_trials is set to 0 but should be an "
r"integer value greater than zero"),
({"x_squared_norms": X[:2]},
r"The length of x_squared_norms .* should "
r"be equal to the length of n_samples")]
)
def test_kmeans_plusplus_wrong_params(param, match):
with pytest.raises(ValueError, match=match):
kmeans_plusplus(X, n_clusters, **param)
@pytest.mark.parametrize("data", [X, X_csr])
@pytest.mark.parametrize("dtype", [np.float64, np.float32])
def test_kmeans_plusplus_output(data, dtype):
# Check for the correct number of seeds and all positive values
data = data.astype(dtype)
centers, indices = kmeans_plusplus(data, n_clusters)
# Check there are the correct number of indices and that all indices are
# positive and within the number of samples
assert indices.shape[0] == n_clusters
assert (indices >= 0).all()
assert (indices <= data.shape[0]).all()
# Check for the correct number of seeds and that they are bound by the data
assert centers.shape[0] == n_clusters
assert (centers.max(axis=0) <= data.max(axis=0)).all()
assert (centers.min(axis=0) >= data.min(axis=0)).all()
# Check that indices correspond to reported centers
# Use X for comparison rather than data, test still works against centers
# calculated with sparse data.
assert_allclose(X[indices].astype(dtype), centers)
@pytest.mark.parametrize("x_squared_norms", [row_norms(X, squared=True), None])
def test_kmeans_plusplus_norms(x_squared_norms):
# Check that defining x_squared_norms returns the same as default=None.
centers, indices = kmeans_plusplus(X, n_clusters,
x_squared_norms=x_squared_norms)
assert_allclose(X[indices], centers)
def test_kmeans_plusplus_dataorder():
# Check that memory layout does not effect result
centers_c, _ = kmeans_plusplus(X, n_clusters, random_state=0)
X_fortran = np.asfortranarray(X)
centers_fortran, _ = kmeans_plusplus(X_fortran, n_clusters, random_state=0)
assert_allclose(centers_c, centers_fortran)
| bsd-3-clause |
carrillo/scikit-learn | sklearn/metrics/regression.py | 175 | 16953 | """Metrics to assess performance on regression task
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Arnaud Joly <a.joly@ulg.ac.be>
# Jochen Wersdorfer <jochen@wersdoerfer.de>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Joel Nothman <joel.nothman@gmail.com>
# Noel Dawe <noel@dawe.me>
# Manoj Kumar <manojkumarsivaraj334@gmail.com>
# Michael Eickenberg <michael.eickenberg@gmail.com>
# Konstantin Shmelkov <konstantin.shmelkov@polytechnique.edu>
# License: BSD 3 clause
from __future__ import division
import numpy as np
from ..utils.validation import check_array, check_consistent_length
from ..utils.validation import column_or_1d
import warnings
__ALL__ = [
"mean_absolute_error",
"mean_squared_error",
"median_absolute_error",
"r2_score",
"explained_variance_score"
]
def _check_reg_targets(y_true, y_pred, multioutput):
"""Check that y_true and y_pred belong to the same regression task
Parameters
----------
y_true : array-like,
y_pred : array-like,
multioutput : array-like or string in ['raw_values', uniform_average',
'variance_weighted'] or None
None is accepted due to backward compatibility of r2_score().
Returns
-------
type_true : one of {'continuous', continuous-multioutput'}
The type of the true target data, as output by
'utils.multiclass.type_of_target'
y_true : array-like of shape = (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples, n_outputs)
Estimated target values.
multioutput : array-like of shape = (n_outputs) or string in ['raw_values',
uniform_average', 'variance_weighted'] or None
Custom output weights if ``multioutput`` is array-like or
just the corresponding argument if ``multioutput`` is a
correct keyword.
"""
check_consistent_length(y_true, y_pred)
y_true = check_array(y_true, ensure_2d=False)
y_pred = check_array(y_pred, ensure_2d=False)
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_pred.ndim == 1:
y_pred = y_pred.reshape((-1, 1))
if y_true.shape[1] != y_pred.shape[1]:
raise ValueError("y_true and y_pred have different number of output "
"({0}!={1})".format(y_true.shape[1], y_pred.shape[1]))
n_outputs = y_true.shape[1]
multioutput_options = (None, 'raw_values', 'uniform_average',
'variance_weighted')
if multioutput not in multioutput_options:
multioutput = check_array(multioutput, ensure_2d=False)
if n_outputs == 1:
raise ValueError("Custom weights are useful only in "
"multi-output cases.")
elif n_outputs != len(multioutput):
raise ValueError(("There must be equally many custom weights "
"(%d) as outputs (%d).") %
(len(multioutput), n_outputs))
y_type = 'continuous' if n_outputs == 1 else 'continuous-multioutput'
return y_type, y_true, y_pred, multioutput
def mean_absolute_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean absolute error regression loss
Read more in the :ref:`User Guide <mean_absolute_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
If multioutput is 'raw_values', then mean absolute error is returned
for each output separately.
If multioutput is 'uniform_average' or an ndarray of weights, then the
weighted average of all output errors is returned.
MAE output is non-negative floating point. The best value is 0.0.
Examples
--------
>>> from sklearn.metrics import mean_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_absolute_error(y_true, y_pred)
0.5
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> mean_absolute_error(y_true, y_pred)
0.75
>>> mean_absolute_error(y_true, y_pred, multioutput='raw_values')
array([ 0.5, 1. ])
>>> mean_absolute_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.849...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
output_errors = np.average(np.abs(y_pred - y_true),
weights=sample_weight, axis=0)
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def mean_squared_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean squared error regression loss
Read more in the :ref:`User Guide <mean_squared_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
A non-negative floating point value (the best value is 0.0), or an
array of floating point values, one for each individual target.
Examples
--------
>>> from sklearn.metrics import mean_squared_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_squared_error(y_true, y_pred)
0.375
>>> y_true = [[0.5, 1],[-1, 1],[7, -6]]
>>> y_pred = [[0, 2],[-1, 2],[8, -5]]
>>> mean_squared_error(y_true, y_pred) # doctest: +ELLIPSIS
0.708...
>>> mean_squared_error(y_true, y_pred, multioutput='raw_values')
... # doctest: +ELLIPSIS
array([ 0.416..., 1. ])
>>> mean_squared_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.824...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
output_errors = np.average((y_true - y_pred) ** 2, axis=0,
weights=sample_weight)
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def median_absolute_error(y_true, y_pred):
"""Median absolute error regression loss
Read more in the :ref:`User Guide <median_absolute_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples)
Estimated target values.
Returns
-------
loss : float
A positive floating point value (the best value is 0.0).
Examples
--------
>>> from sklearn.metrics import median_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> median_absolute_error(y_true, y_pred)
0.5
"""
y_type, y_true, y_pred, _ = _check_reg_targets(y_true, y_pred,
'uniform_average')
if y_type == 'continuous-multioutput':
raise ValueError("Multioutput not supported in median_absolute_error")
return np.median(np.abs(y_pred - y_true))
def explained_variance_score(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Explained variance regression score function
Best possible score is 1.0, lower values are worse.
Read more in the :ref:`User Guide <explained_variance_score>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average', \
'variance_weighted'] or array-like of shape (n_outputs)
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
Returns
-------
score : float or ndarray of floats
The explained variance or ndarray if 'multioutput' is 'raw_values'.
Notes
-----
This is not a symmetric function.
Examples
--------
>>> from sklearn.metrics import explained_variance_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> explained_variance_score(y_true, y_pred) # doctest: +ELLIPSIS
0.957...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> explained_variance_score(y_true, y_pred, multioutput='uniform_average')
... # doctest: +ELLIPSIS
0.983...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
y_diff_avg = np.average(y_true - y_pred, weights=sample_weight, axis=0)
numerator = np.average((y_true - y_pred - y_diff_avg) ** 2,
weights=sample_weight, axis=0)
y_true_avg = np.average(y_true, weights=sample_weight, axis=0)
denominator = np.average((y_true - y_true_avg) ** 2,
weights=sample_weight, axis=0)
nonzero_numerator = numerator != 0
nonzero_denominator = denominator != 0
valid_score = nonzero_numerator & nonzero_denominator
output_scores = np.ones(y_true.shape[1])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing to np.average() None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
def r2_score(y_true, y_pred,
sample_weight=None,
multioutput=None):
"""R^2 (coefficient of determination) regression score function.
Best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). A constant model that always
predicts the expected value of y, disregarding the input features,
would get a R^2 score of 0.0.
Read more in the :ref:`User Guide <r2_score>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average',
'variance_weighted'] or None or array-like of shape (n_outputs)
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
Default value correponds to 'variance_weighted', but
will be changed to 'uniform_average' in next versions.
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
Returns
-------
z : float or ndarray of floats
The R^2 score or ndarray of scores if 'multioutput' is
'raw_values'.
Notes
-----
This is not a symmetric function.
Unlike most other scores, R^2 score may be negative (it need not actually
be the square of a quantity R).
References
----------
.. [1] `Wikipedia entry on the Coefficient of determination
<http://en.wikipedia.org/wiki/Coefficient_of_determination>`_
Examples
--------
>>> from sklearn.metrics import r2_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> r2_score(y_true, y_pred) # doctest: +ELLIPSIS
0.948...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> r2_score(y_true, y_pred, multioutput='variance_weighted') # doctest: +ELLIPSIS
0.938...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
weight = sample_weight[:, np.newaxis]
else:
weight = 1.
numerator = (weight * (y_true - y_pred) ** 2).sum(axis=0,
dtype=np.float64)
denominator = (weight * (y_true - np.average(
y_true, axis=0, weights=sample_weight)) ** 2).sum(axis=0,
dtype=np.float64)
nonzero_denominator = denominator != 0
nonzero_numerator = numerator != 0
valid_score = nonzero_denominator & nonzero_numerator
output_scores = np.ones([y_true.shape[1]])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
# arbitrary set to zero to avoid -inf scores, having a constant
# y_true is not interesting for scoring a regression anyway
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if multioutput is None and y_true.shape[1] != 1:
# @FIXME change in 0.18
warnings.warn("Default 'multioutput' behavior now corresponds to "
"'variance_weighted' value, it will be changed "
"to 'uniform_average' in 0.18.",
DeprecationWarning)
multioutput = 'variance_weighted'
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
# avoid fail on constant y or one-element arrays
if not np.any(nonzero_denominator):
if not np.any(nonzero_numerator):
return 1.0
else:
return 0.0
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
| bsd-3-clause |
timsnyder/bokeh | bokeh/core/json_encoder.py | 2 | 9041 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide a functions and classes to implement a custom JSON encoder for
serializing objects for BokehJS.
The primary interface is provided by the |serialize_json| function, which
uses the custom |BokehJSONEncoder| to produce JSON output.
In general, functions in this module convert values in the following way:
* Datetime values (Python, Pandas, NumPy) are converted to floating point
milliseconds since epoch.
* TimeDelta values are converted to absolute floating point milliseconds.
* RelativeDelta values are converted to dictionaries.
* Decimal values are converted to floating point.
* Sequences (Pandas Series, NumPy arrays, python sequences) that are passed
though this interface are converted to lists. Note, however, that arrays in
data sources inside Bokeh Documents are converted elsewhere, and by default
use a binary encoded format.
* Bokeh ``Model`` instances are usually serialized elsewhere in the context
of an entire Bokeh Document. Models passed trough this interface are
converted to references.
* ``HasProps`` (that are not Bokeh models) are converted to key/value dicts or
all their properties and values.
* ``Color`` instances are converted to CSS color values.
.. |serialize_json| replace:: :class:`~bokeh.core.json_encoder.serialize_json`
.. |BokehJSONEncoder| replace:: :class:`~bokeh.core.json_encoder.BokehJSONEncoder`
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import collections
import decimal
import json
# External imports
import numpy as np
# Bokeh imports
from ..settings import settings
from ..util.dependencies import import_optional
from ..util.serialization import convert_datetime_type, convert_timedelta_type, is_datetime_type, is_timedelta_type, transform_series, transform_array
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
rd = import_optional("dateutil.relativedelta")
pd = import_optional('pandas')
__all__ = (
'BokehJSONEncoder',
'serialize_json',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
def serialize_json(obj, pretty=None, indent=None, **kwargs):
''' Return a serialized JSON representation of objects, suitable to
send to BokehJS.
This function is typically used to serialize single python objects in
the manner expected by BokehJS. In particular, many datetime values are
automatically normalized to an expected format. Some Bokeh objects can
also be passed, but note that Bokeh models are typically properly
serialized in the context of an entire Bokeh document.
The resulting JSON always has sorted keys. By default. the output is
as compact as possible unless pretty output or indentation is requested.
Args:
obj (obj) : the object to serialize to JSON format
pretty (bool, optional) :
Whether to generate prettified output. If ``True``, spaces are
added after added after separators, and indentation and newlines
are applied. (default: False)
Pretty output can also be enabled with the environment variable
``BOKEH_PRETTY``, which overrides this argument, if set.
indent (int or None, optional) :
Amount of indentation to use in generated JSON output. If ``None``
then no indentation is used, unless pretty output is enabled,
in which case two spaces are used. (default: None)
Any additional keyword arguments are passed to ``json.dumps``, except for
some that are computed internally, and cannot be overridden:
* allow_nan
* indent
* separators
* sort_keys
Examples:
.. code-block:: python
>>> data = dict(b=np.datetime64('2017-01-01'), a = np.arange(3))
>>>print(serialize_json(data))
{"a":[0,1,2],"b":1483228800000.0}
>>> print(serialize_json(data, pretty=True))
{
"a": [
0,
1,
2
],
"b": 1483228800000.0
}
'''
# these args to json.dumps are computed internally and should not be passed along
for name in ['allow_nan', 'separators', 'sort_keys']:
if name in kwargs:
raise ValueError("The value of %r is computed internally, overriding is not permissable." % name)
if pretty is None:
pretty = settings.pretty(False)
if pretty:
separators=(",", ": ")
else:
separators=(",", ":")
if pretty and indent is None:
indent = 2
return json.dumps(obj, cls=BokehJSONEncoder, allow_nan=False, indent=indent, separators=separators, sort_keys=True, **kwargs)
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
class BokehJSONEncoder(json.JSONEncoder):
''' A custom ``json.JSONEncoder`` subclass for encoding objects in
accordance with the BokehJS protocol.
'''
def transform_python_types(self, obj):
''' Handle special scalars such as (Python, NumPy, or Pandas)
datetimes, or Decimal values.
Args:
obj (obj) :
The object to encode. Anything not specifically handled in
this method is passed on to the default system JSON encoder.
'''
# date/time values that get serialized as milliseconds
if is_datetime_type(obj):
return convert_datetime_type(obj)
if is_timedelta_type(obj):
return convert_timedelta_type(obj)
# slice objects
elif isinstance(obj, slice):
return dict(start=obj.start, stop=obj.stop, step=obj.step)
# NumPy scalars
elif np.issubdtype(type(obj), np.floating):
return float(obj)
elif np.issubdtype(type(obj), np.integer):
return int(obj)
elif np.issubdtype(type(obj), np.bool_):
return bool(obj)
# Decimal values
elif isinstance(obj, decimal.Decimal):
return float(obj)
# RelativeDelta gets serialized as a dict
elif rd and isinstance(obj, rd.relativedelta):
return dict(years=obj.years,
months=obj.months,
days=obj.days,
hours=obj.hours,
minutes=obj.minutes,
seconds=obj.seconds,
microseconds=obj.microseconds)
else:
return super(BokehJSONEncoder, self).default(obj)
def default(self, obj):
''' The required ``default`` method for ``JSONEncoder`` subclasses.
Args:
obj (obj) :
The object to encode. Anything not specifically handled in
this method is passed on to the default system JSON encoder.
'''
from ..model import Model
from ..colors import Color
from .has_props import HasProps
# array types -- use force_list here, only binary
# encoding CDS columns for now
if pd and isinstance(obj, (pd.Series, pd.Index)):
return transform_series(obj, force_list=True)
elif isinstance(obj, np.ndarray):
return transform_array(obj, force_list=True)
elif isinstance(obj, collections.deque):
return list(map(self.default, obj))
elif isinstance(obj, Model):
return obj.ref
elif isinstance(obj, HasProps):
return obj.properties_with_values(include_defaults=False)
elif isinstance(obj, Color):
return obj.to_css()
else:
return self.transform_python_types(obj)
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| bsd-3-clause |
abhishekgahlot/scikit-learn | benchmarks/bench_lasso.py | 297 | 3305 | """
Benchmarks of Lasso vs LassoLars
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import gc
from time import time
import numpy as np
from sklearn.datasets.samples_generator import make_regression
def compute_bench(alpha, n_samples, n_features, precompute):
lasso_results = []
lars_lasso_results = []
it = 0
for ns in n_samples:
for nf in n_features:
it += 1
print('==================')
print('Iteration %s of %s' % (it, max(len(n_samples),
len(n_features))))
print('==================')
n_informative = nf // 10
X, Y, coef_ = make_regression(n_samples=ns, n_features=nf,
n_informative=n_informative,
noise=0.1, coef=True)
X /= np.sqrt(np.sum(X ** 2, axis=0)) # Normalize data
gc.collect()
print("- benchmarking Lasso")
clf = Lasso(alpha=alpha, fit_intercept=False,
precompute=precompute)
tstart = time()
clf.fit(X, Y)
lasso_results.append(time() - tstart)
gc.collect()
print("- benchmarking LassoLars")
clf = LassoLars(alpha=alpha, fit_intercept=False,
normalize=False, precompute=precompute)
tstart = time()
clf.fit(X, Y)
lars_lasso_results.append(time() - tstart)
return lasso_results, lars_lasso_results
if __name__ == '__main__':
from sklearn.linear_model import Lasso, LassoLars
import pylab as pl
alpha = 0.01 # regularization parameter
n_features = 10
list_n_samples = np.linspace(100, 1000000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, list_n_samples,
[n_features], precompute=True)
pl.figure('scikit-learn LASSO benchmark results')
pl.subplot(211)
pl.plot(list_n_samples, lasso_results, 'b-',
label='Lasso')
pl.plot(list_n_samples, lars_lasso_results, 'r-',
label='LassoLars')
pl.title('precomputed Gram matrix, %d features, alpha=%s' % (n_features, alpha))
pl.legend(loc='upper left')
pl.xlabel('number of samples')
pl.ylabel('Time (s)')
pl.axis('tight')
n_samples = 2000
list_n_features = np.linspace(500, 3000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, [n_samples],
list_n_features, precompute=False)
pl.subplot(212)
pl.plot(list_n_features, lasso_results, 'b-', label='Lasso')
pl.plot(list_n_features, lars_lasso_results, 'r-', label='LassoLars')
pl.title('%d samples, alpha=%s' % (n_samples, alpha))
pl.legend(loc='upper left')
pl.xlabel('number of features')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
mattilyra/scikit-learn | sklearn/gaussian_process/tests/test_gpr.py | 23 | 11915 | """Testing for Gaussian process regression """
# Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# Licence: BSD 3 clause
import numpy as np
from scipy.optimize import approx_fprime
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels \
import RBF, ConstantKernel as C, WhiteKernel
from sklearn.utils.testing \
import (assert_true, assert_greater, assert_array_less,
assert_almost_equal, assert_equal)
def f(x):
return x * np.sin(x)
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = f(X).ravel()
fixed_kernel = RBF(length_scale=1.0, length_scale_bounds="fixed")
kernels = [RBF(length_scale=1.0), fixed_kernel,
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
C(1.0, (1e-2, 1e2)) *
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
C(1.0, (1e-2, 1e2)) *
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)) +
C(1e-5, (1e-5, 1e2)),
C(0.1, (1e-2, 1e2)) *
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)) +
C(1e-5, (1e-5, 1e2))]
def test_gpr_interpolation():
"""Test the interpolating property for different kernels."""
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
y_pred, y_cov = gpr.predict(X, return_cov=True)
assert_true(np.allclose(y_pred, y))
assert_true(np.allclose(np.diag(y_cov), 0.))
def test_lml_improving():
""" Test that hyperparameter-tuning improves log-marginal likelihood. """
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
assert_greater(gpr.log_marginal_likelihood(gpr.kernel_.theta),
gpr.log_marginal_likelihood(kernel.theta))
def test_lml_precomputed():
""" Test that lml of optimized kernel is stored correctly. """
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
assert_equal(gpr.log_marginal_likelihood(gpr.kernel_.theta),
gpr.log_marginal_likelihood())
def test_converged_to_local_maximum():
""" Test that we are in local maximum after hyperparameter-optimization."""
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
lml, lml_gradient = \
gpr.log_marginal_likelihood(gpr.kernel_.theta, True)
assert_true(np.all((np.abs(lml_gradient) < 1e-4) |
(gpr.kernel_.theta == gpr.kernel_.bounds[:, 0]) |
(gpr.kernel_.theta == gpr.kernel_.bounds[:, 1])))
def test_solution_inside_bounds():
""" Test that hyperparameter-optimization remains in bounds"""
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
bounds = gpr.kernel_.bounds
max_ = np.finfo(gpr.kernel_.theta.dtype).max
tiny = 1e-10
bounds[~np.isfinite(bounds[:, 1]), 1] = max_
assert_array_less(bounds[:, 0], gpr.kernel_.theta + tiny)
assert_array_less(gpr.kernel_.theta, bounds[:, 1] + tiny)
def test_lml_gradient():
""" Compare analytic and numeric gradient of log marginal likelihood. """
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
lml, lml_gradient = gpr.log_marginal_likelihood(kernel.theta, True)
lml_gradient_approx = \
approx_fprime(kernel.theta,
lambda theta: gpr.log_marginal_likelihood(theta,
False),
1e-10)
assert_almost_equal(lml_gradient, lml_gradient_approx, 3)
def test_prior():
""" Test that GP prior has mean 0 and identical variances."""
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel)
y_mean, y_cov = gpr.predict(X, return_cov=True)
assert_almost_equal(y_mean, 0, 5)
if len(gpr.kernel.theta) > 1:
# XXX: quite hacky, works only for current kernels
assert_almost_equal(np.diag(y_cov), np.exp(kernel.theta[0]), 5)
else:
assert_almost_equal(np.diag(y_cov), 1, 5)
def test_sample_statistics():
""" Test that statistics of samples drawn from GP are correct."""
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
y_mean, y_cov = gpr.predict(X2, return_cov=True)
samples = gpr.sample_y(X2, 300000)
# More digits accuracy would require many more samples
assert_almost_equal(y_mean, np.mean(samples, 1), 1)
assert_almost_equal(np.diag(y_cov) / np.diag(y_cov).max(),
np.var(samples, 1) / np.diag(y_cov).max(), 1)
def test_no_optimizer():
""" Test that kernel parameters are unmodified when optimizer is None."""
kernel = RBF(1.0)
gpr = GaussianProcessRegressor(kernel=kernel, optimizer=None).fit(X, y)
assert_equal(np.exp(gpr.kernel_.theta), 1.0)
def test_predict_cov_vs_std():
""" Test that predicted std.-dev. is consistent with cov's diagonal."""
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
y_mean, y_cov = gpr.predict(X2, return_cov=True)
y_mean, y_std = gpr.predict(X2, return_std=True)
assert_almost_equal(np.sqrt(np.diag(y_cov)), y_std)
def test_anisotropic_kernel():
""" Test that GPR can identify meaningful anisotropic length-scales. """
# We learn a function which varies in one dimension ten-times slower
# than in the other. The corresponding length-scales should differ by at
# least a factor 5
rng = np.random.RandomState(0)
X = rng.uniform(-1, 1, (50, 2))
y = X[:, 0] + 0.1 * X[:, 1]
kernel = RBF([1.0, 1.0])
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
assert_greater(np.exp(gpr.kernel_.theta[1]),
np.exp(gpr.kernel_.theta[0]) * 5)
def test_random_starts():
"""
Test that an increasing number of random-starts of GP fitting only
increases the log marginal likelihood of the chosen theta.
"""
n_samples, n_features = 25, 2
np.random.seed(0)
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1) \
+ rng.normal(scale=0.1, size=n_samples)
kernel = C(1.0, (1e-2, 1e2)) \
* RBF(length_scale=[1.0] * n_features,
length_scale_bounds=[(1e-4, 1e+2)] * n_features) \
+ WhiteKernel(noise_level=1e-5, noise_level_bounds=(1e-5, 1e1))
last_lml = -np.inf
for n_restarts_optimizer in range(5):
gp = GaussianProcessRegressor(
kernel=kernel, n_restarts_optimizer=n_restarts_optimizer,
random_state=0,).fit(X, y)
lml = gp.log_marginal_likelihood(gp.kernel_.theta)
assert_greater(lml, last_lml - np.finfo(np.float32).eps)
last_lml = lml
def test_y_normalization():
""" Test normalization of the target values in GP
Fitting non-normalizing GP on normalized y and fitting normalizing GP
on unnormalized y should yield identical results
"""
y_mean = y.mean(0)
y_norm = y - y_mean
for kernel in kernels:
# Fit non-normalizing GP on normalized y
gpr = GaussianProcessRegressor(kernel=kernel)
gpr.fit(X, y_norm)
# Fit normalizing GP on unnormalized y
gpr_norm = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
gpr_norm.fit(X, y)
# Compare predicted mean, std-devs and covariances
y_pred, y_pred_std = gpr.predict(X2, return_std=True)
y_pred = y_mean + y_pred
y_pred_norm, y_pred_std_norm = gpr_norm.predict(X2, return_std=True)
assert_almost_equal(y_pred, y_pred_norm)
assert_almost_equal(y_pred_std, y_pred_std_norm)
_, y_cov = gpr.predict(X2, return_cov=True)
_, y_cov_norm = gpr_norm.predict(X2, return_cov=True)
assert_almost_equal(y_cov, y_cov_norm)
def test_y_multioutput():
""" Test that GPR can deal with multi-dimensional target values"""
y_2d = np.vstack((y, y * 2)).T
# Test for fixed kernel that first dimension of 2d GP equals the output
# of 1d GP and that second dimension is twice as large
kernel = RBF(length_scale=1.0)
gpr = GaussianProcessRegressor(kernel=kernel, optimizer=None,
normalize_y=False)
gpr.fit(X, y)
gpr_2d = GaussianProcessRegressor(kernel=kernel, optimizer=None,
normalize_y=False)
gpr_2d.fit(X, y_2d)
y_pred_1d, y_std_1d = gpr.predict(X2, return_std=True)
y_pred_2d, y_std_2d = gpr_2d.predict(X2, return_std=True)
_, y_cov_1d = gpr.predict(X2, return_cov=True)
_, y_cov_2d = gpr_2d.predict(X2, return_cov=True)
assert_almost_equal(y_pred_1d, y_pred_2d[:, 0])
assert_almost_equal(y_pred_1d, y_pred_2d[:, 1] / 2)
# Standard deviation and covariance do not depend on output
assert_almost_equal(y_std_1d, y_std_2d)
assert_almost_equal(y_cov_1d, y_cov_2d)
y_sample_1d = gpr.sample_y(X2, n_samples=10)
y_sample_2d = gpr_2d.sample_y(X2, n_samples=10)
assert_almost_equal(y_sample_1d, y_sample_2d[:, 0])
# Test hyperparameter optimization
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
gpr.fit(X, y)
gpr_2d = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
gpr_2d.fit(X, np.vstack((y, y)).T)
assert_almost_equal(gpr.kernel_.theta, gpr_2d.kernel_.theta, 4)
def test_custom_optimizer():
""" Test that GPR can use externally defined optimizers. """
# Define a dummy optimizer that simply tests 50 random hyperparameters
def optimizer(obj_func, initial_theta, bounds):
rng = np.random.RandomState(0)
theta_opt, func_min = \
initial_theta, obj_func(initial_theta, eval_gradient=False)
for _ in range(50):
theta = np.atleast_1d(rng.uniform(np.maximum(-2, bounds[:, 0]),
np.minimum(1, bounds[:, 1])))
f = obj_func(theta, eval_gradient=False)
if f < func_min:
theta_opt, func_min = theta, f
return theta_opt, func_min
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpr = GaussianProcessRegressor(kernel=kernel, optimizer=optimizer)
gpr.fit(X, y)
# Checks that optimizer improved marginal likelihood
assert_greater(gpr.log_marginal_likelihood(gpr.kernel_.theta),
gpr.log_marginal_likelihood(gpr.kernel.theta))
def test_duplicate_input():
""" Test GPR can handle two different output-values for the same input. """
for kernel in kernels:
gpr_equal_inputs = \
GaussianProcessRegressor(kernel=kernel, alpha=1e-2)
gpr_similar_inputs = \
GaussianProcessRegressor(kernel=kernel, alpha=1e-2)
X_ = np.vstack((X, X[0]))
y_ = np.hstack((y, y[0] + 1))
gpr_equal_inputs.fit(X_, y_)
X_ = np.vstack((X, X[0] + 1e-15))
y_ = np.hstack((y, y[0] + 1))
gpr_similar_inputs.fit(X_, y_)
X_test = np.linspace(0, 10, 100)[:, None]
y_pred_equal, y_std_equal = \
gpr_equal_inputs.predict(X_test, return_std=True)
y_pred_similar, y_std_similar = \
gpr_similar_inputs.predict(X_test, return_std=True)
assert_almost_equal(y_pred_equal, y_pred_similar)
assert_almost_equal(y_std_equal, y_std_similar)
| bsd-3-clause |
alexanderfield/spindle | benchmark-scripts/partitions.py | 3 | 5422 | #!/usr/bin/env python3
###########################################################################
##
## Copyright (c) 2014 Adobe Systems Incorporated. All rights reserved.
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###########################################################################
import argparse
from urllib.request import urlopen
import yaml
import statistics
import os
import sys
import time
import re
from subprocess import Popen, PIPE
import traceback
import bench_base
import matplotlib.pyplot as plt
from matplotlib import rc
rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
rc('text', usetex=True)
scHost = "localhost"
scPort = 8605
def runPartitionExperiment(name, data_dir):
print("Running partition experiment for '" + name + "'.")
targetPartitionSizes = [10000, 50000, 100000, 200000,
300000, 400000, 500000, 750000, 1000000, 1500000]
memoryPerWorker = "21g"
cores = 143
timesToRun = 4
cache = True
outFilePath = data_dir + "/partitions/" + name + ".yaml"
if os.path.isfile(outFilePath):
with open(outFilePath, "r") as f: data = yaml.load(f)
else: data = {}
for tps in targetPartitionSizes:
if tps not in data: data[tps] = []
if len(data[tps]) >= timesToRun:
print(" Already profiled for " + str(tps) +
" targetPartitionSizes, skipping.")
continue
while len(data[tps]) < timesToRun:
try:
bench_base.restartServers()
bench_base.restartSparkContext(memoryPerWorker, cores)
# Load the data into cache.
if cache: bench_base.runQuery(name,"2014-01-01","2014-01-07",cache,tps)
while len(data[tps]) < timesToRun:
result = bench_base.runQuery(name,"2014-01-01","2014-01-07",cache,tps)
data[tps].append(result[2]['TimeMillis'] - result[0]['TimeMillis'])
with open(outFilePath, "w") as f:
f.write(yaml.dump(data, indent=2, default_flow_style=False))
except KeyboardInterrupt: sys.exit(-1)
except Exception:
print("Exception occurred, retrying.")
traceback.print_exc()
if not cache: data[tps] = []
pass
return data
def getStats(data):
x = []; y = []; err = []
sortedKeys = sorted(data)
minX = sortedKeys[0]; maxX = sortedKeys[-1]
minY = statistics.mean(data[minX]); maxY = minY
for tps in sortedKeys:
x.append(tps)
m_data = statistics.mean(data[tps])
if m_data < minY: minY = m_data
if m_data > maxY: maxY = m_data
y.append(m_data)
err.append(statistics.stdev(data[tps]))
return (x,y,err,minX,maxX,minY,maxY)
def plotSpeedups(query, data, data_dir):
fig = plt.figure()
ax = plt.subplot(111)
plt.title(query)
plt.xlabel("Target Partition Sizes")
plt.ylabel("Execution Time (ms)")
(x, y, err, minX, maxX, minY, maxY) = getStats(data)
plt.errorbar(x, y, yerr=err, marker='.', color='black',ecolor="gray")
plt.axis([0, 1.02*maxX, 0, 1.02*(maxY+max(err))])
leg = ax.legend(["Caching. 6 workers."], fancybox=True)
leg.get_frame().set_alpha(0.5)
# plt.grid()
# Add annotations for the endpoint values.
# ax.annotate("("+str(x[0])+","+str(y[0])+")", xy=(x[0],y[0]), xytext=(0,0),
# textcoords='offset points', color='black')
# ax.annotate(str(y[1]), xy=(x[1],y[1]), xytext=(0,0),
# textcoords='offset points', color='black')
ax.annotate(str(y[-1]), xy=(x[-1],y[-1]), xytext=(10,0),
textcoords='offset points', color='black')
plt.savefig(data_dir + "/partitions/pdf/" + query + ".pdf")
plt.savefig(data_dir + "/partitions/png/" + query + ".png")
plt.clf()
# Print stats.
def two(s): return "{:.2f}".format(s)
print(" & ".join([query, two(min(y)), two(y[-1])]) + r" \\")
parser = argparse.ArgumentParser()
parser.add_argument("--collect-data", dest="collect", action="store_true")
parser.add_argument("--create-plots", dest="plot", action="store_true")
parser.add_argument("--data-dir", dest="data_dir", type=str, default=".")
args = parser.parse_args()
queriesWithPartitioning = [
"TopPages",
"TopPagesByBrowser",
"TopPagesByPreviousTopPages",
"TopReferringDomains",
"RevenueFromTopReferringDomains",
"RevenueFromTopReferringDomainsFirstVisitGoogle",
]
if args.collect:
if not os.path.isdir(args.data_dir + "/partitions"):
os.makedirs(args.data_dir + "/partitions")
for query in queriesWithPartitioning:
runPartitionExperiment(query, args.data_dir)
if args.plot:
print(" & ".join(["Query","Best Execution Time","Final Execution Time"]) +
r" \\ \hline")
if not os.path.isdir(args.data_dir + "/partitions/pdf"):
os.makedirs(args.data_dir + "/partitions/pdf")
if not os.path.isdir(args.data_dir + "/partitions/png"):
os.makedirs(args.data_dir + "/partitions/png")
for query in queriesWithPartitioning:
with open(args.data_dir+"/partitions/"+query+".yaml", "r") as f:
data = yaml.load(f)
plotSpeedups(query, data, args.data_dir)
| apache-2.0 |
jpautom/scikit-learn | examples/model_selection/plot_roc.py | 49 | 5041 | """
=======================================
Receiver Operating Characteristic (ROC)
=======================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
Multiclass settings
-------------------
ROC curves are typically used in binary classification to study the output of
a classifier. In order to extend ROC curve and ROC area to multi-class
or multi-label classification, it is necessary to binarize the output. One ROC
curve can be drawn per label, but one can also draw a ROC curve by considering
each element of the label indicator matrix as a binary prediction
(micro-averaging).
Another evaluation measure for multi-class classification is
macro-averaging, which gives equal weight to the classification of each
label.
.. note::
See also :func:`sklearn.metrics.roc_auc_score`,
:ref:`example_model_selection_plot_roc_crossval.py`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from itertools import cycle
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
from scipy import interp
# Import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features to make the problem harder
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# shuffle and split training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
# Learn to predict each class against the other
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
random_state=random_state))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
##############################################################################
# Plot of a ROC curve for a specific class
plt.figure()
lw = 2
plt.plot(fpr[2], tpr[2], color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc[2])
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
##############################################################################
# Plot ROC curves for the multiclass problem
# Compute macro-average ROC curve and ROC area
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
# Plot all ROC curves
plt.figure()
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]),
color='deeppink', linestyle=':', linewidth=4)
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]),
color='navy', linestyle=':', linewidth=4)
colors = cycle(['aqua', 'darkorange', 'cornflowerblue'])
for i, color in zip(range(n_classes), colors):
plt.plot(fpr[i], tpr[i], color=color, lw=lw,
label='ROC curve of class {0} (area = {1:0.2f})'
''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--', lw=lw)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Some extension of Receiver operating characteristic to multi-class')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
Dapid/GPy | GPy/models/gplvm.py | 8 | 3049 | # Copyright (c) 2012-2014, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from .. import kern
from ..core import GP, Param
from ..likelihoods import Gaussian
from .. import util
class GPLVM(GP):
"""
Gaussian Process Latent Variable Model
"""
def __init__(self, Y, input_dim, init='PCA', X=None, kernel=None, name="gplvm"):
"""
:param Y: observed data
:type Y: np.ndarray
:param input_dim: latent dimensionality
:type input_dim: int
:param init: initialisation method for the latent space
:type init: 'PCA'|'random'
"""
if X is None:
from ..util.initialization import initialize_latent
X, fracs = initialize_latent(init, input_dim, Y)
else:
fracs = np.ones(input_dim)
if kernel is None:
kernel = kern.RBF(input_dim, lengthscale=fracs, ARD=input_dim > 1) + kern.Bias(input_dim, np.exp(-2))
likelihood = Gaussian()
super(GPLVM, self).__init__(X, Y, kernel, likelihood, name='GPLVM')
self.X = Param('latent_mean', X)
self.link_parameter(self.X, index=0)
def parameters_changed(self):
super(GPLVM, self).parameters_changed()
self.X.gradient = self.kern.gradients_X(self.grad_dict['dL_dK'], self.X, None)
#def jacobian(self,X):
# J = np.zeros((X.shape[0],X.shape[1],self.output_dim))
# for i in range(self.output_dim):
# J[:,:,i] = self.kern.gradients_X(self.posterior.woodbury_vector[:,i:i+1], X, self.X)
# return J
#def magnification(self,X):
# target=np.zeros(X.shape[0])
# #J = np.zeros((X.shape[0],X.shape[1],self.output_dim))
## J = self.jacobian(X)
# for i in range(X.shape[0]):
# target[i]=np.sqrt(np.linalg.det(np.dot(J[i,:,:],np.transpose(J[i,:,:]))))
# return target
def plot(self):
assert self.Y.shape[1] == 2, "too high dimensional to plot. Try plot_latent"
from matplotlib import pyplot as plt
plt.scatter(self.Y[:, 0],
self.Y[:, 1],
40, self.X[:, 0].copy(),
linewidth=0, cmap=plt.cm.jet)
Xnew = np.linspace(self.X.min(), self.X.max(), 200)[:, None]
mu, _ = self.predict(Xnew)
plt.plot(mu[:, 0], mu[:, 1], 'k', linewidth=1.5)
def plot_latent(self, labels=None, which_indices=None,
resolution=50, ax=None, marker='o', s=40,
fignum=None, legend=True,
plot_limits=None,
aspect='auto', updates=False, **kwargs):
import sys
assert "matplotlib" in sys.modules, "matplotlib package has not been imported."
from ..plotting.matplot_dep import dim_reduction_plots
return dim_reduction_plots.plot_latent(self, labels, which_indices,
resolution, ax, marker, s,
fignum, False, legend,
plot_limits, aspect, updates, **kwargs)
| bsd-3-clause |
grlee77/pywt | demo/dwt_swt_show_coeffs.py | 6 | 2469 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import pywt
import pywt.data
ecg = pywt.data.ecg()
data1 = np.concatenate((np.arange(1, 400),
np.arange(398, 600),
np.arange(601, 1024)))
x = np.linspace(0.082, 2.128, num=1024)[::-1]
data2 = np.sin(40 * np.log(x)) * np.sign((np.log(x)))
mode = pywt.Modes.sp1DWT = 1
def plot_coeffs(data, w, title, use_dwt=True):
"""Show dwt or swt coefficients for given data and wavelet."""
w = pywt.Wavelet(w)
a = data
ca = []
cd = []
if use_dwt:
for i in range(5):
(a, d) = pywt.dwt(a, w, mode)
ca.append(a)
cd.append(d)
else:
coeffs = pywt.swt(data, w, 5) # [(cA5, cD5), ..., (cA1, cD1)]
for a, d in reversed(coeffs):
ca.append(a)
cd.append(d)
fig = plt.figure()
ax_main = fig.add_subplot(len(ca) + 1, 1, 1)
ax_main.set_title(title)
ax_main.plot(data)
ax_main.set_xlim(0, len(data) - 1)
for i, x in enumerate(ca):
ax = fig.add_subplot(len(ca) + 1, 2, 3 + i * 2)
ax.plot(x, 'r')
ax.set_ylabel("A%d" % (i + 1))
if use_dwt:
ax.set_xlim(0, len(x) - 1)
else:
ax.set_xlim(w.dec_len * i, len(x) - 1 - w.dec_len * i)
for i, x in enumerate(cd):
ax = fig.add_subplot(len(cd) + 1, 2, 4 + i * 2)
ax.plot(x, 'g')
ax.set_ylabel("D%d" % (i + 1))
# Scale axes
ax.set_xlim(0, len(x) - 1)
if use_dwt:
ax.set_ylim(min(0, 1.4 * min(x)), max(0, 1.4 * max(x)))
else:
vals = x[w.dec_len * (1 + i):len(x) - w.dec_len * (1 + i)]
ax.set_ylim(min(0, 2 * min(vals)), max(0, 2 * max(vals)))
# Show DWT coefficients
use_dwt = True
plot_coeffs(data1, 'db1',
"DWT: Signal irregularity shown in D1 - Haar wavelet",
use_dwt)
plot_coeffs(data2, 'sym5', "DWT: Frequency and phase change - Symmlets5",
use_dwt)
plot_coeffs(ecg, 'sym5', "DWT: Ecg sample - Symmlets5", use_dwt)
# Show DWT coefficients
use_dwt = False
plot_coeffs(data1, 'db1', "SWT: Signal irregularity detection - Haar wavelet",
use_dwt)
plot_coeffs(data2, 'sym5', "SWT: Frequency and phase change - Symmlets5",
use_dwt)
plot_coeffs(ecg, 'sym5', "SWT: Ecg sample - simple QRS detection - Symmlets5",
use_dwt)
plt.show()
| mit |
RomainBrault/scikit-learn | sklearn/cross_decomposition/tests/test_pls.py | 42 | 14294 | import numpy as np
from sklearn.utils.testing import (assert_equal, assert_array_almost_equal,
assert_array_equal, assert_true,
assert_raise_message)
from sklearn.datasets import load_linnerud
from sklearn.cross_decomposition import pls_, CCA
def test_pls():
d = load_linnerud()
X = d.data
Y = d.target
# 1) Canonical (symmetric) PLS (PLS 2 blocks canonical mode A)
# ===========================================================
# Compare 2 algo.: nipals vs. svd
# ------------------------------
pls_bynipals = pls_.PLSCanonical(n_components=X.shape[1])
pls_bynipals.fit(X, Y)
pls_bysvd = pls_.PLSCanonical(algorithm="svd", n_components=X.shape[1])
pls_bysvd.fit(X, Y)
# check equalities of loading (up to the sign of the second column)
assert_array_almost_equal(
pls_bynipals.x_loadings_,
pls_bysvd.x_loadings_, decimal=5,
err_msg="nipals and svd implementations lead to different x loadings")
assert_array_almost_equal(
pls_bynipals.y_loadings_,
pls_bysvd.y_loadings_, decimal=5,
err_msg="nipals and svd implementations lead to different y loadings")
# Check PLS properties (with n_components=X.shape[1])
# ---------------------------------------------------
plsca = pls_.PLSCanonical(n_components=X.shape[1])
plsca.fit(X, Y)
T = plsca.x_scores_
P = plsca.x_loadings_
Wx = plsca.x_weights_
U = plsca.y_scores_
Q = plsca.y_loadings_
Wy = plsca.y_weights_
def check_ortho(M, err_msg):
K = np.dot(M.T, M)
assert_array_almost_equal(K, np.diag(np.diag(K)), err_msg=err_msg)
# Orthogonality of weights
# ~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(Wx, "x weights are not orthogonal")
check_ortho(Wy, "y weights are not orthogonal")
# Orthogonality of latent scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(T, "x scores are not orthogonal")
check_ortho(U, "y scores are not orthogonal")
# Check X = TP' and Y = UQ' (with (p == q) components)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# center scale X, Y
Xc, Yc, x_mean, y_mean, x_std, y_std =\
pls_._center_scale_xy(X.copy(), Y.copy(), scale=True)
assert_array_almost_equal(Xc, np.dot(T, P.T), err_msg="X != TP'")
assert_array_almost_equal(Yc, np.dot(U, Q.T), err_msg="Y != UQ'")
# Check that rotations on training data lead to scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Xr = plsca.transform(X)
assert_array_almost_equal(Xr, plsca.x_scores_,
err_msg="rotation on X failed")
Xr, Yr = plsca.transform(X, Y)
assert_array_almost_equal(Xr, plsca.x_scores_,
err_msg="rotation on X failed")
assert_array_almost_equal(Yr, plsca.y_scores_,
err_msg="rotation on Y failed")
# "Non regression test" on canonical PLS
# --------------------------------------
# The results were checked against the R-package plspm
pls_ca = pls_.PLSCanonical(n_components=X.shape[1])
pls_ca.fit(X, Y)
x_weights = np.array(
[[-0.61330704, 0.25616119, -0.74715187],
[-0.74697144, 0.11930791, 0.65406368],
[-0.25668686, -0.95924297, -0.11817271]])
# x_weights_sign_flip holds columns of 1 or -1, depending on sign flip
# between R and python
x_weights_sign_flip = pls_ca.x_weights_ / x_weights
x_rotations = np.array(
[[-0.61330704, 0.41591889, -0.62297525],
[-0.74697144, 0.31388326, 0.77368233],
[-0.25668686, -0.89237972, -0.24121788]])
x_rotations_sign_flip = pls_ca.x_rotations_ / x_rotations
y_weights = np.array(
[[+0.58989127, 0.7890047, 0.1717553],
[+0.77134053, -0.61351791, 0.16920272],
[-0.23887670, -0.03267062, 0.97050016]])
y_weights_sign_flip = pls_ca.y_weights_ / y_weights
y_rotations = np.array(
[[+0.58989127, 0.7168115, 0.30665872],
[+0.77134053, -0.70791757, 0.19786539],
[-0.23887670, -0.00343595, 0.94162826]])
y_rotations_sign_flip = pls_ca.y_rotations_ / y_rotations
# x_weights = X.dot(x_rotation)
# Hence R/python sign flip should be the same in x_weight and x_rotation
assert_array_almost_equal(x_rotations_sign_flip, x_weights_sign_flip)
# This test that R / python give the same result up to column
# sign indeterminacy
assert_array_almost_equal(np.abs(x_rotations_sign_flip), 1, 4)
assert_array_almost_equal(np.abs(x_weights_sign_flip), 1, 4)
assert_array_almost_equal(y_rotations_sign_flip, y_weights_sign_flip)
assert_array_almost_equal(np.abs(y_rotations_sign_flip), 1, 4)
assert_array_almost_equal(np.abs(y_weights_sign_flip), 1, 4)
# 2) Regression PLS (PLS2): "Non regression test"
# ===============================================
# The results were checked against the R-packages plspm, misOmics and pls
pls_2 = pls_.PLSRegression(n_components=X.shape[1])
pls_2.fit(X, Y)
x_weights = np.array(
[[-0.61330704, -0.00443647, 0.78983213],
[-0.74697144, -0.32172099, -0.58183269],
[-0.25668686, 0.94682413, -0.19399983]])
x_weights_sign_flip = pls_2.x_weights_ / x_weights
x_loadings = np.array(
[[-0.61470416, -0.24574278, 0.78983213],
[-0.65625755, -0.14396183, -0.58183269],
[-0.51733059, 1.00609417, -0.19399983]])
x_loadings_sign_flip = pls_2.x_loadings_ / x_loadings
y_weights = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
y_weights_sign_flip = pls_2.y_weights_ / y_weights
y_loadings = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
y_loadings_sign_flip = pls_2.y_loadings_ / y_loadings
# x_loadings[:, i] = Xi.dot(x_weights[:, i]) \forall i
assert_array_almost_equal(x_loadings_sign_flip, x_weights_sign_flip, 4)
assert_array_almost_equal(np.abs(x_loadings_sign_flip), 1, 4)
assert_array_almost_equal(np.abs(x_weights_sign_flip), 1, 4)
assert_array_almost_equal(y_loadings_sign_flip, y_weights_sign_flip, 4)
assert_array_almost_equal(np.abs(y_loadings_sign_flip), 1, 4)
assert_array_almost_equal(np.abs(y_weights_sign_flip), 1, 4)
# 3) Another non-regression test of Canonical PLS on random dataset
# =================================================================
# The results were checked against the R-package plspm
n = 500
p_noise = 10
q_noise = 5
# 2 latents vars:
np.random.seed(11)
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X = np.concatenate(
(X, np.random.normal(size=p_noise * n).reshape(n, p_noise)), axis=1)
Y = np.concatenate(
(Y, np.random.normal(size=q_noise * n).reshape(n, q_noise)), axis=1)
np.random.seed(None)
pls_ca = pls_.PLSCanonical(n_components=3)
pls_ca.fit(X, Y)
x_weights = np.array(
[[0.65803719, 0.19197924, 0.21769083],
[0.7009113, 0.13303969, -0.15376699],
[0.13528197, -0.68636408, 0.13856546],
[0.16854574, -0.66788088, -0.12485304],
[-0.03232333, -0.04189855, 0.40690153],
[0.1148816, -0.09643158, 0.1613305],
[0.04792138, -0.02384992, 0.17175319],
[-0.06781, -0.01666137, -0.18556747],
[-0.00266945, -0.00160224, 0.11893098],
[-0.00849528, -0.07706095, 0.1570547],
[-0.00949471, -0.02964127, 0.34657036],
[-0.03572177, 0.0945091, 0.3414855],
[0.05584937, -0.02028961, -0.57682568],
[0.05744254, -0.01482333, -0.17431274]])
x_weights_sign_flip = pls_ca.x_weights_ / x_weights
x_loadings = np.array(
[[0.65649254, 0.1847647, 0.15270699],
[0.67554234, 0.15237508, -0.09182247],
[0.19219925, -0.67750975, 0.08673128],
[0.2133631, -0.67034809, -0.08835483],
[-0.03178912, -0.06668336, 0.43395268],
[0.15684588, -0.13350241, 0.20578984],
[0.03337736, -0.03807306, 0.09871553],
[-0.06199844, 0.01559854, -0.1881785],
[0.00406146, -0.00587025, 0.16413253],
[-0.00374239, -0.05848466, 0.19140336],
[0.00139214, -0.01033161, 0.32239136],
[-0.05292828, 0.0953533, 0.31916881],
[0.04031924, -0.01961045, -0.65174036],
[0.06172484, -0.06597366, -0.1244497]])
x_loadings_sign_flip = pls_ca.x_loadings_ / x_loadings
y_weights = np.array(
[[0.66101097, 0.18672553, 0.22826092],
[0.69347861, 0.18463471, -0.23995597],
[0.14462724, -0.66504085, 0.17082434],
[0.22247955, -0.6932605, -0.09832993],
[0.07035859, 0.00714283, 0.67810124],
[0.07765351, -0.0105204, -0.44108074],
[-0.00917056, 0.04322147, 0.10062478],
[-0.01909512, 0.06182718, 0.28830475],
[0.01756709, 0.04797666, 0.32225745]])
y_weights_sign_flip = pls_ca.y_weights_ / y_weights
y_loadings = np.array(
[[0.68568625, 0.1674376, 0.0969508],
[0.68782064, 0.20375837, -0.1164448],
[0.11712173, -0.68046903, 0.12001505],
[0.17860457, -0.6798319, -0.05089681],
[0.06265739, -0.0277703, 0.74729584],
[0.0914178, 0.00403751, -0.5135078],
[-0.02196918, -0.01377169, 0.09564505],
[-0.03288952, 0.09039729, 0.31858973],
[0.04287624, 0.05254676, 0.27836841]])
y_loadings_sign_flip = pls_ca.y_loadings_ / y_loadings
assert_array_almost_equal(x_loadings_sign_flip, x_weights_sign_flip, 4)
assert_array_almost_equal(np.abs(x_weights_sign_flip), 1, 4)
assert_array_almost_equal(np.abs(x_loadings_sign_flip), 1, 4)
assert_array_almost_equal(y_loadings_sign_flip, y_weights_sign_flip, 4)
assert_array_almost_equal(np.abs(y_weights_sign_flip), 1, 4)
assert_array_almost_equal(np.abs(y_loadings_sign_flip), 1, 4)
# Orthogonality of weights
# ~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(pls_ca.x_weights_, "x weights are not orthogonal")
check_ortho(pls_ca.y_weights_, "y weights are not orthogonal")
# Orthogonality of latent scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(pls_ca.x_scores_, "x scores are not orthogonal")
check_ortho(pls_ca.y_scores_, "y scores are not orthogonal")
def test_PLSSVD():
# Let's check the PLSSVD doesn't return all possible component but just
# the specified number
d = load_linnerud()
X = d.data
Y = d.target
n_components = 2
for clf in [pls_.PLSSVD, pls_.PLSRegression, pls_.PLSCanonical]:
pls = clf(n_components=n_components)
pls.fit(X, Y)
assert_equal(n_components, pls.y_scores_.shape[1])
def test_univariate_pls_regression():
# Ensure 1d Y is correctly interpreted
d = load_linnerud()
X = d.data
Y = d.target
clf = pls_.PLSRegression()
# Compare 1d to column vector
model1 = clf.fit(X, Y[:, 0]).coef_
model2 = clf.fit(X, Y[:, :1]).coef_
assert_array_almost_equal(model1, model2)
def test_predict_transform_copy():
# check that the "copy" keyword works
d = load_linnerud()
X = d.data
Y = d.target
clf = pls_.PLSCanonical()
X_copy = X.copy()
Y_copy = Y.copy()
clf.fit(X, Y)
# check that results are identical with copy
assert_array_almost_equal(clf.predict(X), clf.predict(X.copy(), copy=False))
assert_array_almost_equal(clf.transform(X), clf.transform(X.copy(), copy=False))
# check also if passing Y
assert_array_almost_equal(clf.transform(X, Y),
clf.transform(X.copy(), Y.copy(), copy=False))
# check that copy doesn't destroy
# we do want to check exact equality here
assert_array_equal(X_copy, X)
assert_array_equal(Y_copy, Y)
# also check that mean wasn't zero before (to make sure we didn't touch it)
assert_true(np.all(X.mean(axis=0) != 0))
def test_scale_and_stability():
# We test scale=True parameter
# This allows to check numerical stability over platforms as well
d = load_linnerud()
X1 = d.data
Y1 = d.target
# causes X[:, -1].std() to be zero
X1[:, -1] = 1.0
# From bug #2821
# Test with X2, T2 s.t. clf.x_score[:, 1] == 0, clf.y_score[:, 1] == 0
# This test robustness of algorithm when dealing with value close to 0
X2 = np.array([[0., 0., 1.],
[1., 0., 0.],
[2., 2., 2.],
[3., 5., 4.]])
Y2 = np.array([[0.1, -0.2],
[0.9, 1.1],
[6.2, 5.9],
[11.9, 12.3]])
for (X, Y) in [(X1, Y1), (X2, Y2)]:
X_std = X.std(axis=0, ddof=1)
X_std[X_std == 0] = 1
Y_std = Y.std(axis=0, ddof=1)
Y_std[Y_std == 0] = 1
X_s = (X - X.mean(axis=0)) / X_std
Y_s = (Y - Y.mean(axis=0)) / Y_std
for clf in [CCA(), pls_.PLSCanonical(), pls_.PLSRegression(),
pls_.PLSSVD()]:
clf.set_params(scale=True)
X_score, Y_score = clf.fit_transform(X, Y)
clf.set_params(scale=False)
X_s_score, Y_s_score = clf.fit_transform(X_s, Y_s)
assert_array_almost_equal(X_s_score, X_score)
assert_array_almost_equal(Y_s_score, Y_score)
# Scaling should be idempotent
clf.set_params(scale=True)
X_score, Y_score = clf.fit_transform(X_s, Y_s)
assert_array_almost_equal(X_s_score, X_score)
assert_array_almost_equal(Y_s_score, Y_score)
def test_pls_errors():
d = load_linnerud()
X = d.data
Y = d.target
for clf in [pls_.PLSCanonical(), pls_.PLSRegression(),
pls_.PLSSVD()]:
clf.n_components = 4
assert_raise_message(ValueError, "Invalid number of components", clf.fit, X, Y)
| bsd-3-clause |
untom/scikit-learn | examples/bicluster/bicluster_newsgroups.py | 162 | 7103 | """
================================================================
Biclustering documents with the Spectral Co-clustering algorithm
================================================================
This example demonstrates the Spectral Co-clustering algorithm on the
twenty newsgroups dataset. The 'comp.os.ms-windows.misc' category is
excluded because it contains many posts containing nothing but data.
The TF-IDF vectorized posts form a word frequency matrix, which is
then biclustered using Dhillon's Spectral Co-Clustering algorithm. The
resulting document-word biclusters indicate subsets words used more
often in those subsets documents.
For a few of the best biclusters, its most common document categories
and its ten most important words get printed. The best biclusters are
determined by their normalized cut. The best words are determined by
comparing their sums inside and outside the bicluster.
For comparison, the documents are also clustered using
MiniBatchKMeans. The document clusters derived from the biclusters
achieve a better V-measure than clusters found by MiniBatchKMeans.
Output::
Vectorizing...
Coclustering...
Done in 9.53s. V-measure: 0.4455
MiniBatchKMeans...
Done in 12.00s. V-measure: 0.3309
Best biclusters:
----------------
bicluster 0 : 1951 documents, 4373 words
categories : 23% talk.politics.guns, 19% talk.politics.misc, 14% sci.med
words : gun, guns, geb, banks, firearms, drugs, gordon, clinton, cdt, amendment
bicluster 1 : 1165 documents, 3304 words
categories : 29% talk.politics.mideast, 26% soc.religion.christian, 25% alt.atheism
words : god, jesus, christians, atheists, kent, sin, morality, belief, resurrection, marriage
bicluster 2 : 2219 documents, 2830 words
categories : 18% comp.sys.mac.hardware, 16% comp.sys.ibm.pc.hardware, 16% comp.graphics
words : voltage, dsp, board, receiver, circuit, shipping, packages, stereo, compression, package
bicluster 3 : 1860 documents, 2745 words
categories : 26% rec.motorcycles, 23% rec.autos, 13% misc.forsale
words : bike, car, dod, engine, motorcycle, ride, honda, cars, bmw, bikes
bicluster 4 : 12 documents, 155 words
categories : 100% rec.sport.hockey
words : scorer, unassisted, reichel, semak, sweeney, kovalenko, ricci, audette, momesso, nedved
"""
from __future__ import print_function
print(__doc__)
from collections import defaultdict
import operator
import re
from time import time
import numpy as np
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster import MiniBatchKMeans
from sklearn.externals.six import iteritems
from sklearn.datasets.twenty_newsgroups import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.cluster import v_measure_score
def number_aware_tokenizer(doc):
""" Tokenizer that maps all numeric tokens to a placeholder.
For many applications, tokens that begin with a number are not directly
useful, but the fact that such a token exists can be relevant. By applying
this form of dimensionality reduction, some methods may perform better.
"""
token_pattern = re.compile(u'(?u)\\b\\w\\w+\\b')
tokens = token_pattern.findall(doc)
tokens = ["#NUMBER" if token[0] in "0123456789_" else token
for token in tokens]
return tokens
# exclude 'comp.os.ms-windows.misc'
categories = ['alt.atheism', 'comp.graphics',
'comp.sys.ibm.pc.hardware', 'comp.sys.mac.hardware',
'comp.windows.x', 'misc.forsale', 'rec.autos',
'rec.motorcycles', 'rec.sport.baseball',
'rec.sport.hockey', 'sci.crypt', 'sci.electronics',
'sci.med', 'sci.space', 'soc.religion.christian',
'talk.politics.guns', 'talk.politics.mideast',
'talk.politics.misc', 'talk.religion.misc']
newsgroups = fetch_20newsgroups(categories=categories)
y_true = newsgroups.target
vectorizer = TfidfVectorizer(stop_words='english', min_df=5,
tokenizer=number_aware_tokenizer)
cocluster = SpectralCoclustering(n_clusters=len(categories),
svd_method='arpack', random_state=0)
kmeans = MiniBatchKMeans(n_clusters=len(categories), batch_size=20000,
random_state=0)
print("Vectorizing...")
X = vectorizer.fit_transform(newsgroups.data)
print("Coclustering...")
start_time = time()
cocluster.fit(X)
y_cocluster = cocluster.row_labels_
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_cocluster, y_true)))
print("MiniBatchKMeans...")
start_time = time()
y_kmeans = kmeans.fit_predict(X)
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_kmeans, y_true)))
feature_names = vectorizer.get_feature_names()
document_names = list(newsgroups.target_names[i] for i in newsgroups.target)
def bicluster_ncut(i):
rows, cols = cocluster.get_indices(i)
if not (np.any(rows) and np.any(cols)):
import sys
return sys.float_info.max
row_complement = np.nonzero(np.logical_not(cocluster.rows_[i]))[0]
col_complement = np.nonzero(np.logical_not(cocluster.columns_[i]))[0]
weight = X[rows[:, np.newaxis], cols].sum()
cut = (X[row_complement[:, np.newaxis], cols].sum() +
X[rows[:, np.newaxis], col_complement].sum())
return cut / weight
def most_common(d):
"""Items of a defaultdict(int) with the highest values.
Like Counter.most_common in Python >=2.7.
"""
return sorted(iteritems(d), key=operator.itemgetter(1), reverse=True)
bicluster_ncuts = list(bicluster_ncut(i)
for i in range(len(newsgroups.target_names)))
best_idx = np.argsort(bicluster_ncuts)[:5]
print()
print("Best biclusters:")
print("----------------")
for idx, cluster in enumerate(best_idx):
n_rows, n_cols = cocluster.get_shape(cluster)
cluster_docs, cluster_words = cocluster.get_indices(cluster)
if not len(cluster_docs) or not len(cluster_words):
continue
# categories
counter = defaultdict(int)
for i in cluster_docs:
counter[document_names[i]] += 1
cat_string = ", ".join("{:.0f}% {}".format(float(c) / n_rows * 100, name)
for name, c in most_common(counter)[:3])
# words
out_of_cluster_docs = cocluster.row_labels_ != cluster
out_of_cluster_docs = np.where(out_of_cluster_docs)[0]
word_col = X[:, cluster_words]
word_scores = np.array(word_col[cluster_docs, :].sum(axis=0) -
word_col[out_of_cluster_docs, :].sum(axis=0))
word_scores = word_scores.ravel()
important_words = list(feature_names[cluster_words[i]]
for i in word_scores.argsort()[:-11:-1])
print("bicluster {} : {} documents, {} words".format(
idx, n_rows, n_cols))
print("categories : {}".format(cat_string))
print("words : {}\n".format(', '.join(important_words)))
| bsd-3-clause |
kashif/scikit-learn | sklearn/metrics/classification.py | 7 | 69318 | """Metrics to assess performance on classification task given class prediction
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Arnaud Joly <a.joly@ulg.ac.be>
# Jochen Wersdorfer <jochen@wersdoerfer.de>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Joel Nothman <joel.nothman@gmail.com>
# Noel Dawe <noel@dawe.me>
# Jatin Shah <jatindshah@gmail.com>
# Saurabh Jha <saurabh.jhaa@gmail.com>
# Bernardo Stein <bernardovstein@gmail.com>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from ..preprocessing import LabelBinarizer, label_binarize
from ..preprocessing import LabelEncoder
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import column_or_1d
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..utils.validation import _num_samples
from ..utils.sparsefuncs import count_nonzero
from ..utils.fixes import bincount
from ..exceptions import UndefinedMetricWarning
def _check_targets(y_true, y_pred):
"""Check that y_true and y_pred belong to the same classification task
This converts multiclass or binary types to a common shape, and raises a
ValueError for a mix of multilabel and multiclass targets, a mix of
multilabel formats, for the presence of continuous-valued or multioutput
targets, or for targets of different lengths.
Column vectors are squeezed to 1d, while multilabel formats are returned
as CSR sparse label indicators.
Parameters
----------
y_true : array-like
y_pred : array-like
Returns
-------
type_true : one of {'multilabel-indicator', 'multiclass', 'binary'}
The type of the true target data, as output by
``utils.multiclass.type_of_target``
y_true : array or indicator matrix
y_pred : array or indicator matrix
"""
check_consistent_length(y_true, y_pred)
type_true = type_of_target(y_true)
type_pred = type_of_target(y_pred)
y_type = set([type_true, type_pred])
if y_type == set(["binary", "multiclass"]):
y_type = set(["multiclass"])
if len(y_type) > 1:
raise ValueError("Can't handle mix of {0} and {1}"
"".format(type_true, type_pred))
# We can't have more than one value on y_type => The set is no more needed
y_type = y_type.pop()
# No metrics support "multiclass-multioutput" format
if (y_type not in ["binary", "multiclass", "multilabel-indicator"]):
raise ValueError("{0} is not supported".format(y_type))
if y_type in ["binary", "multiclass"]:
y_true = column_or_1d(y_true)
y_pred = column_or_1d(y_pred)
if y_type.startswith('multilabel'):
y_true = csr_matrix(y_true)
y_pred = csr_matrix(y_pred)
y_type = 'multilabel-indicator'
return y_type, y_true, y_pred
def _weighted_sum(sample_score, sample_weight, normalize=False):
if normalize:
return np.average(sample_score, weights=sample_weight)
elif sample_weight is not None:
return np.dot(sample_score, sample_weight)
else:
return sample_score.sum()
def accuracy_score(y_true, y_pred, normalize=True, sample_weight=None):
"""Accuracy classification score.
In multilabel classification, this function computes subset accuracy:
the set of labels predicted for a sample must *exactly* match the
corresponding set of labels in y_true.
Read more in the :ref:`User Guide <accuracy_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of correctly classified samples.
Otherwise, return the fraction of correctly classified samples.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the correctly classified samples
(float), else it returns the number of correctly classified samples
(int).
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
jaccard_similarity_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equal
to the ``jaccard_similarity_score`` function.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import accuracy_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> accuracy_score(y_true, y_pred)
0.5
>>> accuracy_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> accuracy_score(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
differing_labels = count_nonzero(y_true - y_pred, axis=1)
score = differing_labels == 0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def confusion_matrix(y_true, y_pred, labels=None, sample_weight=None):
"""Compute confusion matrix to evaluate the accuracy of a classification
By definition a confusion matrix :math:`C` is such that :math:`C_{i, j}`
is equal to the number of observations known to be in group :math:`i` but
predicted to be in group :math:`j`.
Read more in the :ref:`User Guide <confusion_matrix>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to reorder
or select a subset of labels.
If none is given, those that appear at least once
in ``y_true`` or ``y_pred`` are used in sorted order.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
C : array, shape = [n_classes, n_classes]
Confusion matrix
References
----------
.. [1] `Wikipedia entry for the Confusion matrix
<http://en.wikipedia.org/wiki/Confusion_matrix>`_
Examples
--------
>>> from sklearn.metrics import confusion_matrix
>>> y_true = [2, 0, 2, 2, 0, 1]
>>> y_pred = [0, 0, 2, 2, 0, 2]
>>> confusion_matrix(y_true, y_pred)
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
>>> y_true = ["cat", "ant", "cat", "cat", "ant", "bird"]
>>> y_pred = ["ant", "ant", "cat", "cat", "ant", "cat"]
>>> confusion_matrix(y_true, y_pred, labels=["ant", "bird", "cat"])
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type not in ("binary", "multiclass"):
raise ValueError("%s is not supported" % y_type)
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
if sample_weight is None:
sample_weight = np.ones(y_true.shape[0], dtype=np.int)
else:
sample_weight = np.asarray(sample_weight)
check_consistent_length(sample_weight, y_true, y_pred)
n_labels = labels.size
label_to_ind = dict((y, x) for x, y in enumerate(labels))
# convert yt, yp into index
y_pred = np.array([label_to_ind.get(x, n_labels + 1) for x in y_pred])
y_true = np.array([label_to_ind.get(x, n_labels + 1) for x in y_true])
# intersect y_pred, y_true with labels, eliminate items not in labels
ind = np.logical_and(y_pred < n_labels, y_true < n_labels)
y_pred = y_pred[ind]
y_true = y_true[ind]
# also eliminate weights of eliminated items
sample_weight = sample_weight[ind]
CM = coo_matrix((sample_weight, (y_true, y_pred)),
shape=(n_labels, n_labels)
).toarray()
return CM
def cohen_kappa_score(y1, y2, labels=None):
"""Cohen's kappa: a statistic that measures inter-annotator agreement.
This function computes Cohen's kappa [1], a score that expresses the level
of agreement between two annotators on a classification problem. It is
defined as
.. math::
\kappa = (p_o - p_e) / (1 - p_e)
where :math:`p_o` is the empirical probability of agreement on the label
assigned to any sample (the observed agreement ratio), and :math:`p_e` is
the expected agreement when both annotators assign labels randomly.
:math:`p_e` is estimated using a per-annotator empirical prior over the
class labels [2].
Parameters
----------
y1 : array, shape = [n_samples]
Labels assigned by the first annotator.
y2 : array, shape = [n_samples]
Labels assigned by the second annotator. The kappa statistic is
symmetric, so swapping ``y1`` and ``y2`` doesn't change the value.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to select a
subset of labels. If None, all labels that appear at least once in
``y1`` or ``y2`` are used.
Returns
-------
kappa : float
The kappa statistic, which is a number between -1 and 1. The maximum
value means complete agreement; zero or lower means chance agreement.
References
----------
.. [1] J. Cohen (1960). "A coefficient of agreement for nominal scales".
Educational and Psychological Measurement 20(1):37-46.
doi:10.1177/001316446002000104.
.. [2] R. Artstein and M. Poesio (2008). "Inter-coder agreement for
computational linguistics". Computational Linguistic 34(4):555-596.
"""
confusion = confusion_matrix(y1, y2, labels=labels)
P = confusion / float(confusion.sum())
p_observed = np.trace(P)
p_expected = np.dot(P.sum(axis=0), P.sum(axis=1))
return (p_observed - p_expected) / (1 - p_expected)
def jaccard_similarity_score(y_true, y_pred, normalize=True,
sample_weight=None):
"""Jaccard similarity coefficient score
The Jaccard index [1], or Jaccard similarity coefficient, defined as
the size of the intersection divided by the size of the union of two label
sets, is used to compare set of predicted labels for a sample to the
corresponding set of labels in ``y_true``.
Read more in the :ref:`User Guide <jaccard_similarity_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the sum of the Jaccard similarity coefficient
over the sample set. Otherwise, return the average of Jaccard
similarity coefficient.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the average Jaccard similarity
coefficient, else it returns the sum of the Jaccard similarity
coefficient over the sample set.
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
accuracy_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equivalent
to the ``accuracy_score``. It differs in the multilabel classification
problem.
References
----------
.. [1] `Wikipedia entry for the Jaccard index
<http://en.wikipedia.org/wiki/Jaccard_index>`_
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import jaccard_similarity_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> jaccard_similarity_score(y_true, y_pred)
0.5
>>> jaccard_similarity_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> jaccard_similarity_score(np.array([[0, 1], [1, 1]]),\
np.ones((2, 2)))
0.75
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
with np.errstate(divide='ignore', invalid='ignore'):
# oddly, we may get an "invalid" rather than a "divide" error here
pred_or_true = count_nonzero(y_true + y_pred, axis=1)
pred_and_true = count_nonzero(y_true.multiply(y_pred), axis=1)
score = pred_and_true / pred_or_true
# If there is no label, it results in a Nan instead, we set
# the jaccard to 1: lim_{x->0} x/x = 1
# Note with py2.6 and np 1.3: we can't check safely for nan.
score[pred_or_true == 0.0] = 1.0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def matthews_corrcoef(y_true, y_pred, sample_weight=None):
"""Compute the Matthews correlation coefficient (MCC) for binary classes
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary (two-class) classifications. It takes into
account true and false positives and negatives and is generally regarded as
a balanced measure which can be used even if the classes are of very
different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
Only in the binary case does this relate to information about true and
false positives and negatives. See references below.
Read more in the :ref:`User Guide <matthews_corrcoef>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
sample_weight : array-like of shape = [n_samples], default None
Sample weights.
Returns
-------
mcc : float
The Matthews correlation coefficient (+1 represents a perfect
prediction, 0 an average random prediction and -1 and inverse
prediction).
References
----------
.. [1] `Baldi, Brunak, Chauvin, Andersen and Nielsen, (2000). Assessing the
accuracy of prediction algorithms for classification: an overview
<http://dx.doi.org/10.1093/bioinformatics/16.5.412>`_
.. [2] `Wikipedia entry for the Matthews Correlation Coefficient
<http://en.wikipedia.org/wiki/Matthews_correlation_coefficient>`_
Examples
--------
>>> from sklearn.metrics import matthews_corrcoef
>>> y_true = [+1, +1, +1, -1]
>>> y_pred = [+1, -1, +1, +1]
>>> matthews_corrcoef(y_true, y_pred) # doctest: +ELLIPSIS
-0.33...
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type != "binary":
raise ValueError("%s is not supported" % y_type)
lb = LabelEncoder()
lb.fit(np.hstack([y_true, y_pred]))
y_true = lb.transform(y_true)
y_pred = lb.transform(y_pred)
mean_yt = np.average(y_true, weights=sample_weight)
mean_yp = np.average(y_pred, weights=sample_weight)
y_true_u_cent = y_true - mean_yt
y_pred_u_cent = y_pred - mean_yp
cov_ytyp = np.average(y_true_u_cent * y_pred_u_cent, weights=sample_weight)
var_yt = np.average(y_true_u_cent ** 2, weights=sample_weight)
var_yp = np.average(y_pred_u_cent ** 2, weights=sample_weight)
mcc = cov_ytyp / np.sqrt(var_yt * var_yp)
if np.isnan(mcc):
return 0.
else:
return mcc
def zero_one_loss(y_true, y_pred, normalize=True, sample_weight=None):
"""Zero-one classification loss.
If normalize is ``True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int). The best
performance is 0.
Read more in the :ref:`User Guide <zero_one_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of misclassifications.
Otherwise, return the fraction of misclassifications.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float or int,
If ``normalize == True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int).
Notes
-----
In multilabel classification, the zero_one_loss function corresponds to
the subset zero-one loss: for each sample, the entire set of labels must be
correctly predicted, otherwise the loss for that sample is equal to one.
See also
--------
accuracy_score, hamming_loss, jaccard_similarity_score
Examples
--------
>>> from sklearn.metrics import zero_one_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> zero_one_loss(y_true, y_pred)
0.25
>>> zero_one_loss(y_true, y_pred, normalize=False)
1
In the multilabel case with binary label indicators:
>>> zero_one_loss(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
score = accuracy_score(y_true, y_pred,
normalize=normalize,
sample_weight=sample_weight)
if normalize:
return 1 - score
else:
if sample_weight is not None:
n_samples = np.sum(sample_weight)
else:
n_samples = _num_samples(y_true)
return n_samples - score
def f1_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the F1 score, also known as balanced F-score or F-measure
The F1 score can be interpreted as a weighted average of the precision and
recall, where an F1 score reaches its best value at 1 and worst score at 0.
The relative contribution of precision and recall to the F1 score are
equal. The formula for the F1 score is::
F1 = 2 * (precision * recall) / (precision + recall)
In the multi-class and multi-label case, this is the weighted average of
the F1 score of each class.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
f1_score : float or array of float, shape = [n_unique_labels]
F1 score of the positive class in binary classification or weighted
average of the F1 scores of each class for the multiclass task.
References
----------
.. [1] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import f1_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> f1_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> f1_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average=None)
array([ 0.8, 0. , 0. ])
"""
return fbeta_score(y_true, y_pred, 1, labels=labels,
pos_label=pos_label, average=average,
sample_weight=sample_weight)
def fbeta_score(y_true, y_pred, beta, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the F-beta score
The F-beta score is the weighted harmonic mean of precision and recall,
reaching its optimal value at 1 and its worst value at 0.
The `beta` parameter determines the weight of precision in the combined
score. ``beta < 1`` lends more weight to precision, while ``beta > 1``
favors recall (``beta -> 0`` considers only precision, ``beta -> inf``
only recall).
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta: float
Weight of precision in harmonic mean.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fbeta_score : float (if average is not None) or array of float, shape =\
[n_unique_labels]
F-beta score of the positive class in binary classification or weighted
average of the F-beta score of each class for the multiclass task.
References
----------
.. [1] R. Baeza-Yates and B. Ribeiro-Neto (2011).
Modern Information Retrieval. Addison Wesley, pp. 327-328.
.. [2] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import fbeta_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> fbeta_score(y_true, y_pred, average='macro', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average='micro', beta=0.5)
... # doctest: +ELLIPSIS
0.33...
>>> fbeta_score(y_true, y_pred, average='weighted', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average=None, beta=0.5)
... # doctest: +ELLIPSIS
array([ 0.71..., 0. , 0. ])
"""
_, _, f, _ = precision_recall_fscore_support(y_true, y_pred,
beta=beta,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('f-score',),
sample_weight=sample_weight)
return f
def _prf_divide(numerator, denominator, metric, modifier, average, warn_for):
"""Performs division and handles divide-by-zero.
On zero-division, sets the corresponding result elements to zero
and raises a warning.
The metric, modifier and average arguments are used only for determining
an appropriate warning.
"""
result = numerator / denominator
mask = denominator == 0.0
if not np.any(mask):
return result
# remove infs
result[mask] = 0.0
# build appropriate warning
# E.g. "Precision and F-score are ill-defined and being set to 0.0 in
# labels with no predicted samples"
axis0 = 'sample'
axis1 = 'label'
if average == 'samples':
axis0, axis1 = axis1, axis0
if metric in warn_for and 'f-score' in warn_for:
msg_start = '{0} and F-score are'.format(metric.title())
elif metric in warn_for:
msg_start = '{0} is'.format(metric.title())
elif 'f-score' in warn_for:
msg_start = 'F-score is'
else:
return result
msg = ('{0} ill-defined and being set to 0.0 {{0}} '
'no {1} {2}s.'.format(msg_start, modifier, axis0))
if len(mask) == 1:
msg = msg.format('due to')
else:
msg = msg.format('in {0}s with'.format(axis1))
warnings.warn(msg, UndefinedMetricWarning, stacklevel=2)
return result
def precision_recall_fscore_support(y_true, y_pred, beta=1.0, labels=None,
pos_label=1, average=None,
warn_for=('precision', 'recall',
'f-score'),
sample_weight=None):
"""Compute precision, recall, F-measure and support for each class
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The F-beta score can be interpreted as a weighted harmonic mean of
the precision and recall, where an F-beta score reaches its best
value at 1 and worst score at 0.
The F-beta score weights recall more than precision by a factor of
``beta``. ``beta == 1.0`` means recall and precision are equally important.
The support is the number of occurrences of each class in ``y_true``.
If ``pos_label is None`` and in binary classification, this function
returns the average precision, recall and F-measure if ``average``
is one of ``'micro'``, ``'macro'``, ``'weighted'`` or ``'samples'``.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta : float, 1.0 by default
The strength of recall versus precision in the F-score.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None (default), 'binary', 'micro', 'macro', 'samples', \
'weighted']
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
warn_for : tuple or set, for internal use
This determines which warnings will be made in the case that this
function is being used to return only one of its metrics.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision: float (if average is not None) or array of float, shape =\
[n_unique_labels]
recall: float (if average is not None) or array of float, , shape =\
[n_unique_labels]
fbeta_score: float (if average is not None) or array of float, shape =\
[n_unique_labels]
support: int (if average is not None) or array of int, shape =\
[n_unique_labels]
The number of occurrences of each label in ``y_true``.
References
----------
.. [1] `Wikipedia entry for the Precision and recall
<http://en.wikipedia.org/wiki/Precision_and_recall>`_
.. [2] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
.. [3] `Discriminative Methods for Multi-labeled Classification Advances
in Knowledge Discovery and Data Mining (2004), pp. 22-30 by Shantanu
Godbole, Sunita Sarawagi
<http://www.godbole.net/shantanu/pubs/multilabelsvm-pakdd04.pdf>`
Examples
--------
>>> from sklearn.metrics import precision_recall_fscore_support
>>> y_true = np.array(['cat', 'dog', 'pig', 'cat', 'dog', 'pig'])
>>> y_pred = np.array(['cat', 'pig', 'dog', 'cat', 'cat', 'dog'])
>>> precision_recall_fscore_support(y_true, y_pred, average='macro')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='micro')
... # doctest: +ELLIPSIS
(0.33..., 0.33..., 0.33..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
It is possible to compute per-label precisions, recalls, F1-scores and
supports instead of averaging:
>>> precision_recall_fscore_support(y_true, y_pred, average=None,
... labels=['pig', 'dog', 'cat'])
... # doctest: +ELLIPSIS,+NORMALIZE_WHITESPACE
(array([ 0. , 0. , 0.66...]),
array([ 0., 0., 1.]),
array([ 0. , 0. , 0.8]),
array([2, 2, 2]))
"""
average_options = (None, 'micro', 'macro', 'weighted', 'samples')
if average not in average_options and average != 'binary':
raise ValueError('average has to be one of ' +
str(average_options))
if beta <= 0:
raise ValueError("beta should be >0 in the F-beta score")
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
present_labels = unique_labels(y_true, y_pred)
if average == 'binary' and (y_type != 'binary' or pos_label is None):
warnings.warn('The default `weighted` averaging is deprecated, '
'and from version 0.18, use of precision, recall or '
'F-score with multiclass or multilabel data or '
'pos_label=None will result in an exception. '
'Please set an explicit value for `average`, one of '
'%s. In cross validation use, for instance, '
'scoring="f1_weighted" instead of scoring="f1".'
% str(average_options), DeprecationWarning, stacklevel=2)
average = 'weighted'
if y_type == 'binary' and pos_label is not None and average is not None:
if average != 'binary':
warnings.warn('From version 0.18, binary input will not be '
'handled specially when using averaged '
'precision/recall/F-score. '
'Please use average=\'binary\' to report only the '
'positive class performance.', DeprecationWarning)
if labels is None or len(labels) <= 2:
if pos_label not in present_labels:
if len(present_labels) < 2:
# Only negative labels
return (0., 0., 0., 0)
else:
raise ValueError("pos_label=%r is not a valid label: %r" %
(pos_label, present_labels))
labels = [pos_label]
if labels is None:
labels = present_labels
n_labels = None
else:
n_labels = len(labels)
labels = np.hstack([labels, np.setdiff1d(present_labels, labels,
assume_unique=True)])
# Calculate tp_sum, pred_sum, true_sum ###
if y_type.startswith('multilabel'):
sum_axis = 1 if average == 'samples' else 0
# All labels are index integers for multilabel.
# Select labels:
if not np.all(labels == present_labels):
if np.max(labels) > np.max(present_labels):
raise ValueError('All labels must be in [0, n labels). '
'Got %d > %d' %
(np.max(labels), np.max(present_labels)))
if np.min(labels) < 0:
raise ValueError('All labels must be in [0, n labels). '
'Got %d < 0' % np.min(labels))
y_true = y_true[:, labels[:n_labels]]
y_pred = y_pred[:, labels[:n_labels]]
# calculate weighted counts
true_and_pred = y_true.multiply(y_pred)
tp_sum = count_nonzero(true_and_pred, axis=sum_axis,
sample_weight=sample_weight)
pred_sum = count_nonzero(y_pred, axis=sum_axis,
sample_weight=sample_weight)
true_sum = count_nonzero(y_true, axis=sum_axis,
sample_weight=sample_weight)
elif average == 'samples':
raise ValueError("Sample-based precision, recall, fscore is "
"not meaningful outside multilabel "
"classification. See the accuracy_score instead.")
else:
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
y_pred = le.transform(y_pred)
sorted_labels = le.classes_
# labels are now from 0 to len(labels) - 1 -> use bincount
tp = y_true == y_pred
tp_bins = y_true[tp]
if sample_weight is not None:
tp_bins_weights = np.asarray(sample_weight)[tp]
else:
tp_bins_weights = None
if len(tp_bins):
tp_sum = bincount(tp_bins, weights=tp_bins_weights,
minlength=len(labels))
else:
# Pathological case
true_sum = pred_sum = tp_sum = np.zeros(len(labels))
if len(y_pred):
pred_sum = bincount(y_pred, weights=sample_weight,
minlength=len(labels))
if len(y_true):
true_sum = bincount(y_true, weights=sample_weight,
minlength=len(labels))
# Retain only selected labels
indices = np.searchsorted(sorted_labels, labels[:n_labels])
tp_sum = tp_sum[indices]
true_sum = true_sum[indices]
pred_sum = pred_sum[indices]
if average == 'micro':
tp_sum = np.array([tp_sum.sum()])
pred_sum = np.array([pred_sum.sum()])
true_sum = np.array([true_sum.sum()])
# Finally, we have all our sufficient statistics. Divide! #
beta2 = beta ** 2
with np.errstate(divide='ignore', invalid='ignore'):
# Divide, and on zero-division, set scores to 0 and warn:
# Oddly, we may get an "invalid" rather than a "divide" error
# here.
precision = _prf_divide(tp_sum, pred_sum,
'precision', 'predicted', average, warn_for)
recall = _prf_divide(tp_sum, true_sum,
'recall', 'true', average, warn_for)
# Don't need to warn for F: either P or R warned, or tp == 0 where pos
# and true are nonzero, in which case, F is well-defined and zero
f_score = ((1 + beta2) * precision * recall /
(beta2 * precision + recall))
f_score[tp_sum == 0] = 0.0
# Average the results
if average == 'weighted':
weights = true_sum
if weights.sum() == 0:
return 0, 0, 0, None
elif average == 'samples':
weights = sample_weight
else:
weights = None
if average is not None:
assert average != 'binary' or len(precision) == 1
precision = np.average(precision, weights=weights)
recall = np.average(recall, weights=weights)
f_score = np.average(f_score, weights=weights)
true_sum = None # return no support
return precision, recall, f_score, true_sum
def precision_score(y_true, y_pred, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the precision
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Precision of the positive class in binary classification or weighted
average of the precision of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import precision_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> precision_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> precision_score(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average=None) # doctest: +ELLIPSIS
array([ 0.66..., 0. , 0. ])
"""
p, _, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('precision',),
sample_weight=sample_weight)
return p
def recall_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the recall
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
recall : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Recall of the positive class in binary classification or weighted
average of the recall of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import recall_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> recall_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average=None)
array([ 1., 0., 0.])
"""
_, r, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('recall',),
sample_weight=sample_weight)
return r
def classification_report(y_true, y_pred, labels=None, target_names=None,
sample_weight=None, digits=2):
"""Build a text report showing the main classification metrics
Read more in the :ref:`User Guide <classification_report>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : array, shape = [n_labels]
Optional list of label indices to include in the report.
target_names : list of strings
Optional display names matching the labels (same order).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
digits : int
Number of digits for formatting output floating point values
Returns
-------
report : string
Text summary of the precision, recall, F1 score for each class.
Examples
--------
>>> from sklearn.metrics import classification_report
>>> y_true = [0, 1, 2, 2, 2]
>>> y_pred = [0, 0, 2, 2, 1]
>>> target_names = ['class 0', 'class 1', 'class 2']
>>> print(classification_report(y_true, y_pred, target_names=target_names))
precision recall f1-score support
<BLANKLINE>
class 0 0.50 1.00 0.67 1
class 1 0.00 0.00 0.00 1
class 2 1.00 0.67 0.80 3
<BLANKLINE>
avg / total 0.70 0.60 0.61 5
<BLANKLINE>
"""
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
last_line_heading = 'avg / total'
if target_names is None:
target_names = ['%s' % l for l in labels]
name_width = max(len(cn) for cn in target_names)
width = max(name_width, len(last_line_heading), digits)
headers = ["precision", "recall", "f1-score", "support"]
fmt = '%% %ds' % width # first column: class name
fmt += ' '
fmt += ' '.join(['% 9s' for _ in headers])
fmt += '\n'
headers = [""] + headers
report = fmt % tuple(headers)
report += '\n'
p, r, f1, s = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
average=None,
sample_weight=sample_weight)
for i, label in enumerate(labels):
values = [target_names[i]]
for v in (p[i], r[i], f1[i]):
values += ["{0:0.{1}f}".format(v, digits)]
values += ["{0}".format(s[i])]
report += fmt % tuple(values)
report += '\n'
# compute averages
values = [last_line_heading]
for v in (np.average(p, weights=s),
np.average(r, weights=s),
np.average(f1, weights=s)):
values += ["{0:0.{1}f}".format(v, digits)]
values += ['{0}'.format(np.sum(s))]
report += fmt % tuple(values)
return report
def hamming_loss(y_true, y_pred, classes=None, sample_weight=None):
"""Compute the average Hamming loss.
The Hamming loss is the fraction of labels that are incorrectly predicted.
Read more in the :ref:`User Guide <hamming_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
classes : array, shape = [n_labels], optional
Integer array of labels.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float or int,
Return the average Hamming loss between element of ``y_true`` and
``y_pred``.
See Also
--------
accuracy_score, jaccard_similarity_score, zero_one_loss
Notes
-----
In multiclass classification, the Hamming loss correspond to the Hamming
distance between ``y_true`` and ``y_pred`` which is equivalent to the
subset ``zero_one_loss`` function.
In multilabel classification, the Hamming loss is different from the
subset zero-one loss. The zero-one loss considers the entire set of labels
for a given sample incorrect if it does entirely match the true set of
labels. Hamming loss is more forgiving in that it penalizes the individual
labels.
The Hamming loss is upperbounded by the subset zero-one loss. When
normalized over samples, the Hamming loss is always between 0 and 1.
References
----------
.. [1] Grigorios Tsoumakas, Ioannis Katakis. Multi-Label Classification:
An Overview. International Journal of Data Warehousing & Mining,
3(3), 1-13, July-September 2007.
.. [2] `Wikipedia entry on the Hamming distance
<http://en.wikipedia.org/wiki/Hamming_distance>`_
Examples
--------
>>> from sklearn.metrics import hamming_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> hamming_loss(y_true, y_pred)
0.25
In the multilabel case with binary label indicators:
>>> hamming_loss(np.array([[0, 1], [1, 1]]), np.zeros((2, 2)))
0.75
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if classes is None:
classes = unique_labels(y_true, y_pred)
else:
classes = np.asarray(classes)
if sample_weight is None:
weight_average = 1.
else:
weight_average = np.mean(sample_weight)
if y_type.startswith('multilabel'):
n_differences = count_nonzero(y_true - y_pred,
sample_weight=sample_weight)
return (n_differences /
(y_true.shape[0] * len(classes) * weight_average))
elif y_type in ["binary", "multiclass"]:
return _weighted_sum(y_true != y_pred, sample_weight, normalize=True)
else:
raise ValueError("{0} is not supported".format(y_type))
def log_loss(y_true, y_pred, eps=1e-15, normalize=True, sample_weight=None):
"""Log loss, aka logistic loss or cross-entropy loss.
This is the loss function used in (multinomial) logistic regression
and extensions of it such as neural networks, defined as the negative
log-likelihood of the true labels given a probabilistic classifier's
predictions. For a single sample with true label yt in {0,1} and
estimated probability yp that yt = 1, the log loss is
-log P(yt|yp) = -(yt log(yp) + (1 - yt) log(1 - yp))
Read more in the :ref:`User Guide <log_loss>`.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) labels for n_samples samples.
y_pred : array-like of float, shape = (n_samples, n_classes)
Predicted probabilities, as returned by a classifier's
predict_proba method.
eps : float
Log loss is undefined for p=0 or p=1, so probabilities are
clipped to max(eps, min(1 - eps, p)).
normalize : bool, optional (default=True)
If true, return the mean loss per sample.
Otherwise, return the sum of the per-sample losses.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
Examples
--------
>>> log_loss(["spam", "ham", "ham", "spam"], # doctest: +ELLIPSIS
... [[.1, .9], [.9, .1], [.8, .2], [.35, .65]])
0.21616...
References
----------
C.M. Bishop (2006). Pattern Recognition and Machine Learning. Springer,
p. 209.
Notes
-----
The logarithm used is the natural logarithm (base-e).
"""
lb = LabelBinarizer()
T = lb.fit_transform(y_true)
if T.shape[1] == 1:
T = np.append(1 - T, T, axis=1)
y_pred = check_array(y_pred, ensure_2d=False)
# Clipping
Y = np.clip(y_pred, eps, 1 - eps)
# This happens in cases when elements in y_pred have type "str".
if not isinstance(Y, np.ndarray):
raise ValueError("y_pred should be an array of floats.")
# If y_pred is of single dimension, assume y_true to be binary
# and then check.
if Y.ndim == 1:
Y = Y[:, np.newaxis]
if Y.shape[1] == 1:
Y = np.append(1 - Y, Y, axis=1)
# Check if dimensions are consistent.
check_consistent_length(T, Y)
T = check_array(T)
Y = check_array(Y)
if T.shape[1] != Y.shape[1]:
raise ValueError("y_true and y_pred have different number of classes "
"%d, %d" % (T.shape[1], Y.shape[1]))
# Renormalize
Y /= Y.sum(axis=1)[:, np.newaxis]
loss = -(T * np.log(Y)).sum(axis=1)
return _weighted_sum(loss, sample_weight, normalize)
def hinge_loss(y_true, pred_decision, labels=None, sample_weight=None):
"""Average hinge loss (non-regularized)
In binary class case, assuming labels in y_true are encoded with +1 and -1,
when a prediction mistake is made, ``margin = y_true * pred_decision`` is
always negative (since the signs disagree), implying ``1 - margin`` is
always greater than 1. The cumulated hinge loss is therefore an upper
bound of the number of mistakes made by the classifier.
In multiclass case, the function expects that either all the labels are
included in y_true or an optional labels argument is provided which
contains all the labels. The multilabel margin is calculated according
to Crammer-Singer's method. As in the binary case, the cumulated hinge loss
is an upper bound of the number of mistakes made by the classifier.
Read more in the :ref:`User Guide <hinge_loss>`.
Parameters
----------
y_true : array, shape = [n_samples]
True target, consisting of integers of two values. The positive label
must be greater than the negative label.
pred_decision : array, shape = [n_samples] or [n_samples, n_classes]
Predicted decisions, as output by decision_function (floats).
labels : array, optional, default None
Contains all the labels for the problem. Used in multiclass hinge loss.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] `Wikipedia entry on the Hinge loss
<http://en.wikipedia.org/wiki/Hinge_loss>`_
.. [2] Koby Crammer, Yoram Singer. On the Algorithmic
Implementation of Multiclass Kernel-based Vector
Machines. Journal of Machine Learning Research 2,
(2001), 265-292
.. [3] `L1 AND L2 Regularization for Multiclass Hinge Loss Models
by Robert C. Moore, John DeNero.
<http://www.ttic.edu/sigml/symposium2011/papers/
Moore+DeNero_Regularization.pdf>`_
Examples
--------
>>> from sklearn import svm
>>> from sklearn.metrics import hinge_loss
>>> X = [[0], [1]]
>>> y = [-1, 1]
>>> est = svm.LinearSVC(random_state=0)
>>> est.fit(X, y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=0, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-2], [3], [0.5]])
>>> pred_decision # doctest: +ELLIPSIS
array([-2.18..., 2.36..., 0.09...])
>>> hinge_loss([-1, 1, 1], pred_decision) # doctest: +ELLIPSIS
0.30...
In the multiclass case:
>>> X = np.array([[0], [1], [2], [3]])
>>> Y = np.array([0, 1, 2, 3])
>>> labels = np.array([0, 1, 2, 3])
>>> est = svm.LinearSVC()
>>> est.fit(X, Y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=None, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-1], [2], [3]])
>>> y_true = [0, 2, 3]
>>> hinge_loss(y_true, pred_decision, labels) #doctest: +ELLIPSIS
0.56...
"""
check_consistent_length(y_true, pred_decision, sample_weight)
pred_decision = check_array(pred_decision, ensure_2d=False)
y_true = column_or_1d(y_true)
y_true_unique = np.unique(y_true)
if y_true_unique.size > 2:
if (labels is None and pred_decision.ndim > 1 and
(np.size(y_true_unique) != pred_decision.shape[1])):
raise ValueError("Please include all labels in y_true "
"or pass labels as third argument")
if labels is None:
labels = y_true_unique
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
mask = np.ones_like(pred_decision, dtype=bool)
mask[np.arange(y_true.shape[0]), y_true] = False
margin = pred_decision[~mask]
margin -= np.max(pred_decision[mask].reshape(y_true.shape[0], -1),
axis=1)
else:
# Handles binary class case
# this code assumes that positive and negative labels
# are encoded as +1 and -1 respectively
pred_decision = column_or_1d(pred_decision)
pred_decision = np.ravel(pred_decision)
lbin = LabelBinarizer(neg_label=-1)
y_true = lbin.fit_transform(y_true)[:, 0]
try:
margin = y_true * pred_decision
except TypeError:
raise TypeError("pred_decision should be an array of floats.")
losses = 1 - margin
# The hinge_loss doesn't penalize good enough predictions.
losses[losses <= 0] = 0
return np.average(losses, weights=sample_weight)
def _check_binary_probabilistic_predictions(y_true, y_prob):
"""Check that y_true is binary and y_prob contains valid probabilities"""
check_consistent_length(y_true, y_prob)
labels = np.unique(y_true)
if len(labels) != 2:
raise ValueError("Only binary classification is supported. "
"Provided labels %s." % labels)
if y_prob.max() > 1:
raise ValueError("y_prob contains values greater than 1.")
if y_prob.min() < 0:
raise ValueError("y_prob contains values less than 0.")
return label_binarize(y_true, labels)[:, 0]
def brier_score_loss(y_true, y_prob, sample_weight=None, pos_label=None):
"""Compute the Brier score.
The smaller the Brier score, the better, hence the naming with "loss".
Across all items in a set N predictions, the Brier score measures the
mean squared difference between (1) the predicted probability assigned
to the possible outcomes for item i, and (2) the actual outcome.
Therefore, the lower the Brier score is for a set of predictions, the
better the predictions are calibrated. Note that the Brier score always
takes on a value between zero and one, since this is the largest
possible difference between a predicted probability (which must be
between zero and one) and the actual outcome (which can take on values
of only 0 and 1).
The Brier score is appropriate for binary and categorical outcomes that
can be structured as true or false, but is inappropriate for ordinal
variables which can take on three or more values (this is because the
Brier score assumes that all possible outcomes are equivalently
"distant" from one another). Which label is considered to be the positive
label is controlled via the parameter pos_label, which defaults to 1.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array, shape (n_samples,)
True targets.
y_prob : array, shape (n_samples,)
Probabilities of the positive class.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
pos_label : int (default: None)
Label of the positive class. If None, the maximum label is used as
positive class
Returns
-------
score : float
Brier score
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import brier_score_loss
>>> y_true = np.array([0, 1, 1, 0])
>>> y_true_categorical = np.array(["spam", "ham", "ham", "spam"])
>>> y_prob = np.array([0.1, 0.9, 0.8, 0.3])
>>> brier_score_loss(y_true, y_prob) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, 1-y_prob, pos_label=0) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true_categorical, y_prob, \
pos_label="ham") # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, np.array(y_prob) > 0.5)
0.0
References
----------
http://en.wikipedia.org/wiki/Brier_score
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
if pos_label is None:
pos_label = y_true.max()
y_true = np.array(y_true == pos_label, int)
y_true = _check_binary_probabilistic_predictions(y_true, y_prob)
return np.average((y_true - y_prob) ** 2, weights=sample_weight)
| bsd-3-clause |
nanophotonics/nplab | nplab/experiment/fiber_raman/test.py | 1 | 2056 | from __future__ import print_function
from __future__ import absolute_import
import matplotlib.pyplot as plt
import numpy as np
from nplab import datafile as df
from nplab.analysis.smoothing import convex_smooth
from nplab.instrument.spectrometer.acton_2300i import Acton
from nplab.instrument.camera.Picam.pixis import Pixis
from .Pacton import Pacton
def make_measurement(data_group,laser_wavelength,center_wavelength):
spectrum,_ = pacton.get_spectrum(center_wavelength,subtract_background=True,roi=[0,1024,600,800],debug=1)
data_group.create_dataset("spectrum"+"_%d",data=spectrum, attrs = {"center_wavelength":center_wavelength,"laser_wavelength":laser_wavelength})
data_group.file.flush()
return
def initialize_measurement():
f = df.DataFile("ir_calibration.hdf5","a")
g = f.require_group("calibration")
print("Starting..")
print("Pixis...")
p = Pixis(debug=1)
p.StartUp()
print("Acton...")
act = Acton("COM7",debug=1)
print("Done...")
pacton = Pacton(pixis=p,acton=act)
print("Measuring...")
fig,ax = plt.subplots(1)
p.SetExposureTime(500)
pacton.get_pixel_response_calibration_spectrum()
return pacton, g
def plot_measurement(pacton, center_wavelength):
fig,ax = plt.subplots(1)
spectrum,_ = pacton.get_spectrum(center_wavelength,subtract_background=True,roi=[0,1024,600,800],debug=1)
ax.plot(spectrum)
plt.show()
if __name__ == "__main__":
f = df.DataFile("ir_calibration.hdf5","a")
g = f.require_group("calibration")
print("Starting..")
print("Pixis...")
p = Pixis(debug=1)
p.StartUp()
print("Acton...")
act = Acton("COM7",debug=1)
print("Done...")
pacton = Pacton(pixis=p,acton=act)
print("Measuring...")
fig,ax = plt.subplots(1)
p.SetExposureTime(500)
pacton.get_pixel_response_calibration_spectrum()
center_wavelength = 895
laser_wavelength = 905.0
# make_measurement(g,laser_wavelength,center_wavelength)
# spectrum,_ = pacton.get_spectrum(center_wavelength,subtract_background=True,roi=[0,1024,600,800],debug=1)
# ax.plot(spectrum)
# plt.show()
plot_measurement()
p.ShutDown()
| gpl-3.0 |
icdishb/scikit-learn | examples/plot_multilabel.py | 87 | 4279 | # Authors: Vlad Niculae, Mathieu Blondel
# License: BSD 3 clause
"""
=========================
Multilabel classification
=========================
This example simulates a multi-label document classification problem. The
dataset is generated randomly based on the following process:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that n is more
than 2, and that the document length is never zero. Likewise, we reject classes
which have already been chosen. The documents that are assigned to both
classes are plotted surrounded by two colored circles.
The classification is performed by projecting to the first two principal
components found by PCA and CCA for visualisation purposes, followed by using
the :class:`sklearn.multiclass.OneVsRestClassifier` metaclassifier using two
SVCs with linear kernels to learn a discriminative model for each class.
Note that PCA is used to perform an unsupervised dimensionality reduction,
while CCA is used to perform a supervised one.
Note: in the plot, "unlabeled samples" does not mean that we don't know the
labels (as in semi-supervised learning) but that the samples simply do *not*
have a label.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import LabelBinarizer
from sklearn.decomposition import PCA
from sklearn.cross_decomposition import CCA
def plot_hyperplane(clf, min_x, max_x, linestyle, label):
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(min_x - 5, max_x + 5) # make sure the line is long enough
yy = a * xx - (clf.intercept_[0]) / w[1]
plt.plot(xx, yy, linestyle, label=label)
def plot_subfigure(X, Y, subplot, title, transform):
if transform == "pca":
X = PCA(n_components=2).fit_transform(X)
elif transform == "cca":
X = CCA(n_components=2).fit(X, Y).transform(X)
else:
raise ValueError
min_x = np.min(X[:, 0])
max_x = np.max(X[:, 0])
min_y = np.min(X[:, 1])
max_y = np.max(X[:, 1])
classif = OneVsRestClassifier(SVC(kernel='linear'))
classif.fit(X, Y)
plt.subplot(2, 2, subplot)
plt.title(title)
zero_class = np.where(Y[:, 0])
one_class = np.where(Y[:, 1])
plt.scatter(X[:, 0], X[:, 1], s=40, c='gray')
plt.scatter(X[zero_class, 0], X[zero_class, 1], s=160, edgecolors='b',
facecolors='none', linewidths=2, label='Class 1')
plt.scatter(X[one_class, 0], X[one_class, 1], s=80, edgecolors='orange',
facecolors='none', linewidths=2, label='Class 2')
plot_hyperplane(classif.estimators_[0], min_x, max_x, 'k--',
'Boundary\nfor class 1')
plot_hyperplane(classif.estimators_[1], min_x, max_x, 'k-.',
'Boundary\nfor class 2')
plt.xticks(())
plt.yticks(())
plt.xlim(min_x - .5 * max_x, max_x + .5 * max_x)
plt.ylim(min_y - .5 * max_y, max_y + .5 * max_y)
if subplot == 2:
plt.xlabel('First principal component')
plt.ylabel('Second principal component')
plt.legend(loc="upper left")
plt.figure(figsize=(8, 6))
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=True,
return_indicator=True,
random_state=1)
plot_subfigure(X, Y, 1, "With unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 2, "With unlabeled samples + PCA", "pca")
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
return_indicator=True,
random_state=1)
plot_subfigure(X, Y, 3, "Without unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 4, "Without unlabeled samples + PCA", "pca")
plt.subplots_adjust(.04, .02, .97, .94, .09, .2)
plt.show()
| bsd-3-clause |
almarklein/scikit-image | doc/examples/plot_marching_cubes.py | 32 | 2051 | """
==============
Marching Cubes
==============
Marching cubes is an algorithm to extract a 2D surface mesh from a 3D volume.
This can be conceptualized as a 3D generalization of isolines on topographical
or weather maps. It works by iterating across the volume, looking for regions
which cross the level of interest. If such regions are found, triangulations
are generated and added to an output mesh. The final result is a set of
vertices and a set of triangular faces.
The algorithm requires a data volume and an isosurface value. For example, in
CT imaging Hounsfield units of +700 to +3000 represent bone. So, one potential
input would be a reconstructed CT set of data and the value +700, to extract
a mesh for regions of bone or bone-like density.
This implementation also works correctly on anisotropic datasets, where the
voxel spacing is not equal for every spatial dimension, through use of the
`spacing` kwarg.
"""
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
from skimage import measure
from skimage.draw import ellipsoid
# Generate a level set about zero of two identical ellipsoids in 3D
ellip_base = ellipsoid(6, 10, 16, levelset=True)
ellip_double = np.concatenate((ellip_base[:-1, ...],
ellip_base[2:, ...]), axis=0)
# Use marching cubes to obtain the surface mesh of these ellipsoids
verts, faces = measure.marching_cubes(ellip_double, 0)
# Display resulting triangular mesh using Matplotlib. This can also be done
# with mayavi (see skimage.measure.marching_cubes docstring).
fig = plt.figure(figsize=(10, 12))
ax = fig.add_subplot(111, projection='3d')
# Fancy indexing: `verts[faces]` to generate a collection of triangles
mesh = Poly3DCollection(verts[faces])
ax.add_collection3d(mesh)
ax.set_xlabel("x-axis: a = 6 per ellipsoid")
ax.set_ylabel("y-axis: b = 10")
ax.set_zlabel("z-axis: c = 16")
ax.set_xlim(0, 24) # a = 6 (times two for 2nd ellipsoid)
ax.set_ylim(0, 20) # b = 10
ax.set_zlim(0, 32) # c = 16
plt.show()
| bsd-3-clause |
ivano666/tensorflow | tensorflow/examples/skflow/iris_val_based_early_stopping.py | 3 | 2213 | # Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import datasets, metrics
from sklearn.cross_validation import train_test_split
from tensorflow.contrib import learn
iris = datasets.load_iris()
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
test_size=0.2,
random_state=42)
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train,
test_size=0.2, random_state=42)
val_monitor = learn.monitors.ValidationMonitor(X_val, y_val,
early_stopping_rounds=200)
# classifier with early stopping on training data
classifier1 = learn.TensorFlowDNNClassifier(hidden_units=[10, 20, 10],
n_classes=3, steps=2000)
classifier1.fit(X_train, y_train, logdir='/tmp/iris_model/')
score1 = metrics.accuracy_score(y_test, classifier1.predict(X_test))
# classifier with early stopping on validation data
classifier2 = learn.TensorFlowDNNClassifier(hidden_units=[10, 20, 10],
n_classes=3, steps=2000)
classifier2.fit(X_train, y_train, val_monitor, logdir='/tmp/iris_model_val/')
score2 = metrics.accuracy_score(y_test, classifier2.predict(X_test))
# in many applications, the score is improved by using early stopping on val data
print(score2 > score1)
| apache-2.0 |
Funtimezzhou/TradeBuildTools | Document/szse/Quantitative Trading/sat-ebook-and-full-source-20150618/algo-ebook-full-source-code-20150618/chapter11/forecast.py | 2 | 4129 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# forecast.py
from __future__ import print_function
import datetime
import numpy as np
import pandas as pd
import sklearn
from pandas.io.data import DataReader
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.lda import LDA
from sklearn.metrics import confusion_matrix
from sklearn.qda import QDA
from sklearn.svm import LinearSVC, SVC
def create_lagged_series(symbol, start_date, end_date, lags=5):
"""
This creates a pandas DataFrame that stores the
percentage returns of the adjusted closing value of
a stock obtained from Yahoo Finance, along with a
number of lagged returns from the prior trading days
(lags defaults to 5 days). Trading volume, as well as
the Direction from the previous day, are also included.
"""
# Obtain stock information from Yahoo Finance
ts = DataReader(
symbol, "yahoo",
start_date-datetime.timedelta(days=365),
end_date
)
# Create the new lagged DataFrame
tslag = pd.DataFrame(index=ts.index)
tslag["Today"] = ts["Adj Close"]
tslag["Volume"] = ts["Volume"]
# Create the shifted lag series of prior trading period close values
for i in range(0, lags):
tslag["Lag%s" % str(i+1)] = ts["Adj Close"].shift(i+1)
# Create the returns DataFrame
tsret = pd.DataFrame(index=tslag.index)
tsret["Volume"] = tslag["Volume"]
tsret["Today"] = tslag["Today"].pct_change()*100.0
# If any of the values of percentage returns equal zero, set them to
# a small number (stops issues with QDA model in scikit-learn)
for i,x in enumerate(tsret["Today"]):
if (abs(x) < 0.0001):
tsret["Today"][i] = 0.0001
# Create the lagged percentage returns columns
for i in range(0, lags):
tsret["Lag%s" % str(i+1)] = \
tslag["Lag%s" % str(i+1)].pct_change()*100.0
# Create the "Direction" column (+1 or -1) indicating an up/down day
tsret["Direction"] = np.sign(tsret["Today"])
tsret = tsret[tsret.index >= start_date]
return tsret
if __name__ == "__main__":
# Create a lagged series of the S&P500 US stock market index
snpret = create_lagged_series(
"^GSPC", datetime.datetime(2001,1,10),
datetime.datetime(2005,12,31), lags=5
)
# Use the prior two days of returns as predictor
# values, with direction as the response
X = snpret[["Lag1","Lag2"]]
y = snpret["Direction"]
# The test data is split into two parts: Before and after 1st Jan 2005.
start_test = datetime.datetime(2005,1,1)
# Create training and test sets
X_train = X[X.index < start_test]
X_test = X[X.index >= start_test]
y_train = y[y.index < start_test]
y_test = y[y.index >= start_test]
# Create the (parametrised) models
print("Hit Rates/Confusion Matrices:\n")
models = [("LR", LogisticRegression()),
("LDA", LDA()),
("QDA", QDA()),
("LSVC", LinearSVC()),
("RSVM", SVC(
C=1000000.0, cache_size=200, class_weight=None,
coef0=0.0, degree=3, gamma=0.0001, kernel='rbf',
max_iter=-1, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=False)
),
("RF", RandomForestClassifier(
n_estimators=1000, criterion='gini',
max_depth=None, min_samples_split=2,
min_samples_leaf=1, max_features='auto',
bootstrap=True, oob_score=False, n_jobs=1,
random_state=None, verbose=0)
)]
# Iterate through the models
for m in models:
# Train each of the models on the training set
m[1].fit(X_train, y_train)
# Make an array of predictions on the test set
pred = m[1].predict(X_test)
# Output the hit-rate and the confusion matrix for each model
print("%s:\n%0.3f" % (m[0], m[1].score(X_test, y_test)))
print("%s\n" % confusion_matrix(pred, y_test)) | gpl-3.0 |
pianomania/scikit-learn | examples/linear_model/plot_multi_task_lasso_support.py | 102 | 2319 | #!/usr/bin/env python
"""
=============================================
Joint feature selection with multi-task Lasso
=============================================
The multi-task lasso allows to fit multiple regression problems
jointly enforcing the selected features to be the same across
tasks. This example simulates sequential measurements, each task
is a time instant, and the relevant features vary in amplitude
over time while being the same. The multi-task lasso imposes that
features that are selected at one time point are select for all time
point. This makes feature selection by the Lasso more stable.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import MultiTaskLasso, Lasso
rng = np.random.RandomState(42)
# Generate some 2D coefficients with sine waves with random frequency and phase
n_samples, n_features, n_tasks = 100, 30, 40
n_relevant_features = 5
coef = np.zeros((n_tasks, n_features))
times = np.linspace(0, 2 * np.pi, n_tasks)
for k in range(n_relevant_features):
coef[:, k] = np.sin((1. + rng.randn(1)) * times + 3 * rng.randn(1))
X = rng.randn(n_samples, n_features)
Y = np.dot(X, coef.T) + rng.randn(n_samples, n_tasks)
coef_lasso_ = np.array([Lasso(alpha=0.5).fit(X, y).coef_ for y in Y.T])
coef_multi_task_lasso_ = MultiTaskLasso(alpha=1.).fit(X, Y).coef_
###############################################################################
# Plot support and time series
fig = plt.figure(figsize=(8, 5))
plt.subplot(1, 2, 1)
plt.spy(coef_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'Lasso')
plt.subplot(1, 2, 2)
plt.spy(coef_multi_task_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'MultiTaskLasso')
fig.suptitle('Coefficient non-zero location')
feature_to_plot = 0
plt.figure()
lw = 2
plt.plot(coef[:, feature_to_plot], color='seagreen', linewidth=lw,
label='Ground truth')
plt.plot(coef_lasso_[:, feature_to_plot], color='cornflowerblue', linewidth=lw,
label='Lasso')
plt.plot(coef_multi_task_lasso_[:, feature_to_plot], color='gold', linewidth=lw,
label='MultiTaskLasso')
plt.legend(loc='upper center')
plt.axis('tight')
plt.ylim([-1.1, 1.1])
plt.show()
| bsd-3-clause |
PBGraff/SwiftGRB_PEanalysis | pp_pipeline.py | 1 | 3476 | import numpy as np
import os
import triangle
import matplotlib.pyplot as plt
import scipy.stats
from matplotlib.ticker import MultipleLocator
def PlotTriangle(fileroot,usetruths=True):
data = np.loadtxt(fileroot+'post_equal_weights.dat', usecols=(0,1,2,3,4,5))
if (usetruths):
truths = np.loadtxt(fileroot+'injected_values.txt')
figure = triangle.corner(data, labels=names, truths=truths, bins=30, quantiles=[0.05, 0.5, 0.95],
show_titles=True, title_args={"fontsize": 12})
else:
figure = triangle.corner(data, labels=names, bins=30, quantiles=[0.05, 0.5, 0.95],
show_titles=True, title_args={"fontsize": 12})
figure.savefig(fileroot+'posterior_plot.png')
def FindRecoveryCDF(fileroot):
CDFvals = np.zeros(6)
data = np.loadtxt(fileroot+'post_equal_weights.dat', usecols=(0,1,2,3,4,5))
truths = np.loadtxt(fileroot+'injected_values.txt')
for i in range(6):
CDFvals[i] = np.sum(data[:,i]<truths[i]) * 1.0 / data.shape[0]
return CDFvals
def PP_KStest(cdf):
D, p = scipy.stats.kstest(cdf, 'uniform')
return p
def PP_Plot(idx,cdf):
x = np.linspace(0.0,1.0,num=opts.number+1)
y = np.zeros(opts.number+1)
y[1:] = np.sort(cdf)
fig, ax = plt.subplots(1)
ax.plot(x,x,'--k',lw=2)
ax.plot(y,x,'-b',lw=2)
ax.axis([0,1,0,1])
ax.set_xlabel(r'$p$')
ax.set_ylabel(r'$\Pr(p)$')
ax.set_title(names[idx]+r' KS p-value = %.4lf'%(PP_KStest(cdf)))
ax.xaxis.set_major_locator(MultipleLocator(0.1))
ax.yaxis.set_major_locator(MultipleLocator(0.1))
ax.grid()
#plt.show()
fig.savefig(opts.outdir+'/pp_plot_'+names2[idx]+'.png')
def RunAnalysis(i):
root = opts.outdir+'/run'+str(i)+'_'
cmd = './Analysis --seed='+str(seeds[i+1])+' --nlive='+str(opts.nlive)+' --varyz1 --silent --outfile='+root
cmd += ' --n0='+str(n0_inj[i])+' --n1='+str(n1_inj[i])+' --n2='+str(n2_inj[i])+' --z1='+str(z1_inj[i])
if (opts.resume):
cmd += ' --resume'
print cmd
# run the analysis
os.system(cmd)
# plot posterior results
PlotTriangle(root)
# find recovered CDF for injected values
recoveredCDFvals[i,:] = FindRecoveryCDF(root)
from optparse import OptionParser
parser=OptionParser()
parser.add_option("-n","--number",action="store",type="int",default=100,help="Number of analyses to run")
parser.add_option("-o","--outdir",action="store",type="string",default="chains",help="Directory for analyses' output")
parser.add_option("-l","--nlive",action="store",type="int",default=1000,help="Number of live points to use")
parser.add_option("-r","--resume",action="store_true",default=False,help="Resume previous run")
(opts,args)=parser.parse_args()
seeds = np.random.random_integers(10,30000,opts.number+1)
recoveredCDFvals = np.zeros((opts.number,6))
names = [r'$n_0$', r'$n_1$', r'$n_2$', r'$z_1$', r'$n_{\ast}$', r'$n_{\rm tot}$']
names2 = ['n0', 'n1', 'n2', 'z1', 'nstar', 'ntot']
if not os.path.exists(opts.outdir):
os.makedirs(opts.outdir)
# run prior sampling
cmd = './Analysis --seed='+str(seeds[0])+' --nlive='+str(opts.number)+' --outfile='+opts.outdir+'/prior_ --varyz1 --zeroLogLike --silent'
if (opts.resume):
cmd = cmd + ' --resume'
print cmd
os.system(cmd)
# retrieve prior samples for injecting
prior_samples = np.loadtxt(opts.outdir+'/prior_post_equal_weights.dat')
n0_inj = prior_samples[:,0]
n1_inj = prior_samples[:,1]
n2_inj = prior_samples[:,2]
z1_inj = prior_samples[:,3]
# loop to perform all analyses
for i in range(opts.number):
RunAnalysis(i)
# make P-P plots
for i in range(6):
PP_Plot(i,recoveredCDFvals[:,i])
| apache-2.0 |
zuku1985/scikit-learn | examples/manifold/plot_manifold_sphere.py | 31 | 5118 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=============================================
Manifold Learning methods on a severed sphere
=============================================
An application of the different :ref:`manifold` techniques
on a spherical data-set. Here one can see the use of
dimensionality reduction in order to gain some intuition
regarding the manifold learning methods. Regarding the dataset,
the poles are cut from the sphere, as well as a thin slice down its
side. This enables the manifold learning techniques to
'spread it open' whilst projecting it onto two dimensions.
For a similar example, where the methods are applied to the
S-curve dataset, see :ref:`sphx_glr_auto_examples_manifold_plot_compare_methods.py`
Note that the purpose of the :ref:`MDS <multidimensional_scaling>` is
to find a low-dimensional representation of the data (here 2D) in
which the distances respect well the distances in the original
high-dimensional space, unlike other manifold-learning algorithms,
it does not seeks an isotropic representation of the data in
the low-dimensional space. Here the manifold problem matches fairly
that of representing a flat map of the Earth, as with
`map projection <https://en.wikipedia.org/wiki/Map_projection>`_
"""
# Author: Jaques Grobler <jaques.grobler@inria.fr>
# License: BSD 3 clause
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold
from sklearn.utils import check_random_state
# Next line to silence pyflakes.
Axes3D
# Variables for manifold learning.
n_neighbors = 10
n_samples = 1000
# Create our sphere.
random_state = check_random_state(0)
p = random_state.rand(n_samples) * (2 * np.pi - 0.55)
t = random_state.rand(n_samples) * np.pi
# Sever the poles from the sphere.
indices = ((t < (np.pi - (np.pi / 8))) & (t > ((np.pi / 8))))
colors = p[indices]
x, y, z = np.sin(t[indices]) * np.cos(p[indices]), \
np.sin(t[indices]) * np.sin(p[indices]), \
np.cos(t[indices])
# Plot our dataset.
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
ax = fig.add_subplot(251, projection='3d')
ax.scatter(x, y, z, c=p[indices], cmap=plt.cm.rainbow)
try:
# compatibility matplotlib < 1.0
ax.view_init(40, -10)
except:
pass
sphere_data = np.array([x, y, z]).T
# Perform Locally Linear Embedding Manifold learning
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
trans_data = manifold\
.LocallyLinearEmbedding(n_neighbors, 2,
method=method).fit_transform(sphere_data).T
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Isomap Manifold learning.
t0 = time()
trans_data = manifold.Isomap(n_neighbors, n_components=2)\
.fit_transform(sphere_data).T
t1 = time()
print("%s: %.2g sec" % ('ISO', t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("%s (%.2g sec)" % ('Isomap', t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Multi-dimensional scaling.
t0 = time()
mds = manifold.MDS(2, max_iter=100, n_init=1)
trans_data = mds.fit_transform(sphere_data).T
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Spectral Embedding.
t0 = time()
se = manifold.SpectralEmbedding(n_components=2,
n_neighbors=n_neighbors)
trans_data = se.fit_transform(sphere_data).T
t1 = time()
print("Spectral Embedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("Spectral Embedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform t-distributed stochastic neighbor embedding.
t0 = time()
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
trans_data = tsne.fit_transform(sphere_data).T
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(2, 5, 10)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
| bsd-3-clause |
s0hvaperuna/Not-a-bot | cogs/jojo.py | 1 | 22891 | """
MIT License
Copyright (c) 2017 s0hvaperuna
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import argparse
import asyncio
import logging
import os
import sys
from collections import OrderedDict
from functools import partial
from io import BytesIO
from itertools import zip_longest
from threading import Lock
import discord
import numpy as np
from PIL import Image, ImageFont
from colour import Color
from discord.ext.commands import BucketType
from matplotlib import pyplot as plt
from matplotlib.patches import Polygon, Circle
from numpy import pi, random
from bot.bot import command, cooldown, bot_has_permissions
from cogs.cog import Cog
from utils.imagetools import (create_shadow, create_text,
create_geopattern_background, shift_color,
remove_background,
resize_keep_aspect_ratio, get_color,
IMAGES_PATH, image_from_url, GeoPattern,
color_distance, MAX_COLOR_DIFF)
from utils.utilities import (get_picture_from_msg, y_n_check,
check_negative, normalize_text,
get_image, basic_check, test_url)
terminal = logging.getLogger('terminal')
HALFWIDTH_TO_FULLWIDTH = str.maketrans(
'0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!"#$%&()*+,-./:;<=>?@[]^_`{|}~ ',
'0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!゛#$%&()*+、ー。/:;〈=〉?@[]^_‘{|}~ ')
LETTERS_TO_INT = {k: idx for idx, k in enumerate(['A', 'B', 'C', 'D', 'E'])}
INT_TO_LETTER = ['A', 'B', 'C', 'D', 'E']
POWERS = ['power', 'speed', 'range', 'durability', 'precision', 'potential']
class ArgumentParser(argparse.ArgumentParser):
def _get_action_from_name(self, name):
"""Given a name, get the Action instance registered with this parser.
If only it were made available in the ArgumentError object. It is
passed as it's first arg...
"""
container = self._actions
if name is None:
return None
for action in container:
if '/'.join(action.option_strings) == name:
return action
elif action.metavar == name:
return action
elif action.dest == name:
return action
def error(self, message):
exc = sys.exc_info()[1]
if exc:
exc.argument = self._get_action_from_name(exc.argument_name)
raise exc
super(ArgumentParser, self).error(message)
class JoJo(Cog):
def __init__(self, bot):
super().__init__(bot)
self.stat_lock = Lock()
self.stats = OrderedDict.fromkeys(POWERS, None)
self.stat_spread_figure = plt.figure()
self.line_points = [1 - 0.2*i for i in range(6)]
self.parser = ArgumentParser()
args = ['-blur', '-canny_thresh_1', '-canny_thresh_2', '-mask_dilate_iter', '-mask_erode_iter']
for arg in args:
self.parser.add_argument(arg, type=int, default=argparse.SUPPRESS,
required=False)
def cog_unload(self): # skipcq: PYL-R0201
plt.close('all')
def create_empty_stats_circle(self, color='k'):
fig = plt.figure()
ax = fig.add_subplot(111)
for i in range(6):
power = POWERS[i]
rot = 60 * i / 180 * pi # Lines every 60 degrees
# Rotate the points in the line rot degrees
x = list(map(lambda x: x * np.sin(rot), self.line_points))
y = list(map(lambda y: y * np.cos(rot), self.line_points))
line = ax.plot(x, y, '-', color=color, alpha=0.6, markersize=6,
marker=(2, 0, 360 - 90 - 60 * i))
if i == 0:
x, y = line[0].get_data()
# Shift the letters so the are not on top of the line
correctionx = 0.15
correctiony = 0.05
for l, idx in LETTERS_TO_INT.items():
ax.text(x[idx] + correctionx, y[idx] - correctiony, l,
horizontalalignment='right', color=color, alpha=0.65,
fontsize=10)
self.stats[power] = line
return fig, ax
def create_stats_circle(self, color='b', bg_color=None, **kwargs):
c = 'black'
if color_distance(Color(c), bg_color) < (MAX_COLOR_DIFF/2):
c = 'white'
inner_circle = Circle((0, 0), radius=1.1, fc='none', ec=c)
outer_circle = Circle((0, 0), radius=1.55, fc='none', ec=c)
outest_circle = Circle((0, 0), radius=1.65, fc='none', ec=c)
fig, ax = self.create_empty_stats_circle(c)
stat_spread = []
for idx, line in enumerate(self.stats.values()):
x, y = line[0].get_data()
power = POWERS[idx]
power_value = kwargs.get(power, 'E')
if power_value is None:
power_value = 'E'
power_value = power_value.upper()
power_int = LETTERS_TO_INT.get(power_value, 0)
# Small correction to the text position
correction = 0.03
r = 60 * idx / 180 * pi
sinr = np.round(np.sin(r), 5)
cosr = np.round(np.cos(r), 5)
if sinr < 0:
lx = 1.25 * sinr - correction
else:
lx = 1.25 * sinr + correction
if cosr < 0:
ly = 1.25 * cosr - correction
else:
ly = 1.25 * cosr + correction
rot = (0 + min(check_negative(cosr) * 180, 0)) - 60 * idx
if sinr == 0:
rot = 0
ax.text(lx, ly, power_value, color=c, alpha=0.9, fontsize=14,
weight='bold', ha='center', va='center')
ax.text(lx * 1.50, ly * 1.50, power, color=c, fontsize=17,
ha='center', rotation=rot, va='center')
x = x[power_int]
y = y[power_int]
stat_spread.append([x, y])
r1 = outer_circle.radius
r2 = outest_circle.radius
w = 3.0
for r in range(0, 360, 15):
sinr = np.round(np.sin(np.deg2rad(r)), 5)
cosr = np.round(np.cos(np.deg2rad(r)), 5)
x = (r1*sinr, r2*sinr)
y = (r1*cosr, r2*cosr)
ax.plot(x, y, '-', color=c, linewidth=w)
pol = Polygon(stat_spread, fc='y', alpha=0.7)
pol.set_color(color)
fig.gca().add_patch(inner_circle)
fig.gca().add_patch(outer_circle)
fig.gca().add_patch(outest_circle)
fig.gca().add_patch(pol)
fig.gca().autoscale(True)
fig.gca().set_axis_off()
ax.axis('scaled')
fig.canvas.draw()
return fig, ax
@staticmethod
def _standify_text(s, type_=0):
types = ['『』', '「」', '']
bracket = types[type_]
s = normalize_text(s)
s = s.translate(HALFWIDTH_TO_FULLWIDTH)
if type_ > 1:
return s
s = bracket[0] + s + bracket[1]
return s
@staticmethod
def pattern_check(msg):
return msg.content.lower() in GeoPattern.available_generators
@command(aliases=['stand'])
async def standify(self, ctx, *, stand):
"""Standify text using these brackets 『』"""
stand = self._standify_text(stand)
await ctx.send(stand)
@command(aliases=['stand2'])
async def standify2(self, ctx, *, stand):
"""Standify text using these brackets 「」"""
stand = self._standify_text(stand, 1)
await ctx.send(stand)
@command(aliases=['stand3'])
async def standify3(self, ctx, *, stand):
"""Standify text using no brackets"""
stand = self._standify_text(stand, 2)
await ctx.send(stand)
async def subcommand(self, ctx, content, delete_after=60, author=None, channel=None, check=None, del_msg=True):
m = await ctx.send(content, delete_after=delete_after)
if callable(check):
def _check(msg):
return check(msg) and basic_check(author, channel)
else:
_check = basic_check(author, channel)
try:
msg = await self.bot.wait_for('message', check=_check, timeout=delete_after)
except asyncio.TimeoutError:
msg = None
if del_msg:
try:
await m.delete()
except discord.HTTPException:
pass
return msg
@command(aliases=['stand_generator', 'standgen'])
@cooldown(1, 10, BucketType.user)
@bot_has_permissions(attach_files=True)
async def stand_gen(self, ctx, stand, user, image=None, *, params=None):
"""Generate a stand card. Arguments are stand name, user name and an image
Image can be an attachment or a link. Passing -advanced as the last argument
will enable advanced mode which gives the ability to tune some numbers.
Use quotes for names that have spaces e.g.
`{prefix}{name} "Star Platinum" "Jotaro Kujo" [image]`
You can also answer all parameters in one command by adding the parameters on their own line
It works something like this
```
{prefix}{name} "stand" "user"
A A A A A A
backround_image.png
triangles
n
```
You can make the bot ask you the parameters by setting it just as an empty line. e.g.
```
A A A A A A
triangles
```
Would make the bot sak the background from you.
Here is a tree of the questions being asked.
If some answers lead to extra question they are indented to make it clear
1. Stats from A to E in the order power, speed, range, durability, precision, potential
2. Link to the background. Leave empty if you want a randomized pattern
2.......a) The geopattern and bg color separated by space. If either is left out it will be selected randomly
3. Do you want automatic background removal. Recommended answer is no. Typing y or yes will use it
"""
author = ctx.author
channel = ctx.channel
stand = self._standify_text(stand, 2)
user = '[STAND MASTER]\n' + user
stand = '[STAND NAME]\n' + stand
size = (1100, 700)
shift = 800
advanced = False
if image is None:
pass
elif test_url(image):
pass
else:
params = params if params else ''
params = image + params
image = None
if params:
params = params.strip().split('\n')
advanced = params[-1].strip()
if advanced.endswith('-advanced'):
advanced = advanced[:-9].strip()
if advanced:
params[-1] = advanced
else:
params.pop(-1)
advanced = True
else:
advanced = False
else:
params = []
if advanced:
await ctx.send(f'{author} Advanced mode activated', delete_after=20)
image = await get_image(ctx, image)
if not image:
return
def get_next_param():
try:
return params.pop(0)
except IndexError:
return
stats = get_next_param()
if not stats:
msg = await self.subcommand(ctx,
'{} Give the stand **stats** in the given order ranging from **A** to **E** '
'separated by **spaces**.\nDefault value is E\n`{}`'.format(author, '` `'.join(POWERS)),
delete_after=120, author=author, channel=channel)
if msg is None:
await ctx.send(f'Timed out. {author} cancelling stand generation')
return
stats = msg.content
stats = stats.split(' ')
stats = dict(zip_longest(POWERS, stats[:6]))
bg = get_next_param()
if not bg:
msg = await self.subcommand(ctx,
f'{author}`Use a custom background by uploading a **picture** or using a **link**. '
'Posting something other than an image will use the **generated background**',
delete_after=120, author=author, channel=channel)
bg = get_picture_from_msg(msg)
else:
if not test_url(bg):
bg = None
color = None
if bg is not None:
try:
bg = bg.strip()
bg = await image_from_url(bg, self.bot.aiohttp_client)
def process_bg():
nonlocal bg, color
bg = bg.convert('RGB')
dominant_color = get_color(bg)
color = Color(rgb=list(map(lambda c: c/255, dominant_color)))
bg = resize_keep_aspect_ratio(bg, size, crop_to_size=True)
return color, bg
color, bg = await self.bot.loop.run_in_executor(self.bot.threadpool, process_bg)
except Exception:
terminal.exception('Failed to get background')
await ctx.send(f'{author} Failed to use custom background. Using generated one',
delete_after=60)
bg = None
if bg is None:
pattern = random.choice(GeoPattern.available_generators)
msg = get_next_param()
if not msg:
msg = await self.subcommand(ctx,
"{} Generating background. Select a **pattern** and **color** separated by space. "
"Otherwise they'll will be randomly chosen. Available patterns:\n"
'{}'.format(author, '\n'.join(GeoPattern.available_generators)),
delete_after=120, channel=channel, author=author)
msg = msg.content if msg else ''
if not msg:
await ctx.send('{} Selecting randomly'.format(author), delete_after=20)
else:
msg = msg.split(' ')
# Temporary containers for pattern and color
_pattern, _color = None, None
if len(msg) == 1:
_pattern = msg[0]
elif len(msg) > 1:
_pattern, _color = msg[:2]
if _pattern in GeoPattern.available_generators:
pattern = _pattern
else:
await ctx.send('{} Pattern {} not found. Selecting randomly'.format(author, _pattern),
delete_after=20)
if _color:
try:
color = Color(_color)
except (ValueError, AttributeError):
await ctx.send('{} {} not an available color'.format(author, _color),
delete_after=20)
def do_bg():
return create_geopattern_background(size, stand + user,
generator=pattern, color=color)
bg, color = await self.bot.loop.run_in_executor(self.bot.threadpool, do_bg)
if advanced:
msg = get_next_param()
if not msg:
msg = await self.subcommand(ctx,
'{} Input color value change as an **integer**. Default is {}. '
'You can also input a **color** instead of the change value. '
'The resulting color will be used in the stats circle'.format(author, shift),
delete_after=120, channel=channel, author=author)
msg = msg.content if msg else ''
try:
shift = int(msg.strip())
except ValueError:
try:
color = Color(msg)
shift = 0
except (ValueError, AttributeError):
await ctx.send(f'{author} Could not set color or color change int. Using default values',
delete_after=15)
try:
if not isinstance(color, str):
color = Color(color.get_hex_l())
bg_color = Color(color)
except (AttributeError, ValueError):
terminal.exception(f'Failed to set bg color from {color}')
return await ctx.send('Failed to set bg color')
def do_stuff():
# Shift color hue and saturation so it's not the same as the bg
shift_color(color, shift)
fig, _ = self.create_stats_circle(color=color.get_hex_l(), bg_color=bg_color, **stats)
path = os.path.join(IMAGES_PATH, 'stats.png')
with self.stat_lock:
try:
fig.savefig(path, transparent=True)
stat_img = Image.open(path)
except:
terminal.exception('Could not create image')
return '{} Could not create picture because of an error.'.format(author)
plt.close(fig)
stat_img = stat_img.resize((int(stat_img.width * 0.85),
int(stat_img.height * 0.85)),
Image.BILINEAR)
full = Image.new('RGBA', size)
# Coords for stat circle
x, y = (-60, full.height - stat_img.height)
stat_corner = (x + stat_img.width, y + stat_img.height)
full.paste(stat_img, (x, y, *stat_corner))
font = ImageFont.truetype(os.path.join('M-1c', 'mplus-1c-bold.ttf'), 40)
# Small glow blur can be created with create_glow and setting amount to 1 or lower
text = create_text(stand, font, '#FFFFFF', (int(full.width*0.75), int(y*0.8)), (10, 10))
text = create_shadow(text, 80, 3, 2, 4).convert('RGBA')
full.paste(text, (20, 20), text)
text2 = create_text(user, font, '#FFFFFF', (int((full.width - stat_corner[0])*0.8), int(full.height*0.7)), (10, 10))
text2 = create_shadow(text2, 80, 3, 2, 4).convert('RGBA')
text2.load()
return full, stat_corner, text2
res = await self.bot.loop.run_in_executor(self.bot.threadpool, do_stuff)
if isinstance(res, str):
return await ctx.send(res)
full, stat_corner, text2 = res
if image is not None:
# No clue what this does so leaving it out
#im = trim_image(image)
im = image
msg = get_next_param()
if not msg:
msg = await self.subcommand(ctx,
f'{author} Try to automatically remove background (y/n)? '
'This might fuck the picture up and will take a moment',
author=author, channel=channel, delete_after=120, check=y_n_check)
msg = msg.content if msg else ''
if msg and msg.lower() in ['y', 'yes']:
kwargs = {}
if advanced:
msg = get_next_param()
if not msg:
msg = await self.subcommand(ctx,
f'{author} Change the arguments of background removing. Available'
' arguments are `blur`, `canny_thresh_1`, `canny_thresh_2`, '
'`mask_dilate_iter`, `mask_erode_iter`. '
'Accepted values are integers.\nArguments are added like this '
'`-blur 30 -canny_thresh_2 50`. All arguments are optional',
channel=channel, author=author, delete_after=140)
msg = msg.content if msg else ''
await channel.trigger_typing()
if msg is not None:
try:
kwargs = self.parser.parse_known_args(msg.split(' '))[0].__dict__
except (SystemExit, IndexError, AttributeError):
await ctx.send(f'{author} Could not get arguments from {msg}',
delete_after=20)
try:
im = await self.bot.loop.run_in_executor(self.bot.threadpool, partial(remove_background, im, **kwargs))
except Exception:
terminal.exception('Failed to remove bg from image')
await ctx.send(f'{author} Could not remove background because of an error',
delete_after=30)
def resize_image():
nonlocal im
# Size of user pic
box = (500, 600)
im = resize_keep_aspect_ratio(im, box, can_be_bigger=False, resample=Image.BICUBIC)
im = create_shadow(im, 70, 3, -22, -7).convert('RGBA')
full.paste(im, (full.width - im.width, int((full.height - im.height)/2)), im)
await self.bot.loop.run_in_executor(self.bot.threadpool, resize_image)
await channel.trigger_typing()
def finalize_image():
full.paste(text2, (int((full.width - stat_corner[0]) * 0.9), int(full.height * 0.7)), text2)
bg.paste(full, (0, 0), full)
file = BytesIO()
bg.save(file, format='PNG')
file.seek(0)
return file
file = await self.bot.loop.run_in_executor(self.bot.threadpool, finalize_image)
await ctx.send(file=discord.File(file, filename='stand_card.png'))
def setup(bot):
bot.add_cog(JoJo(bot))
| mit |
simon-anders/htseq | setup.py | 1 | 7870 | #!/usr/bin/env python
from __future__ import print_function
import sys
import os
from distutils.log import INFO as logINFO
if ((sys.version_info[0] == 2 and sys.version_info[1] < 7) or
(sys.version_info[0] == 3 and sys.version_info[1] < 5)):
sys.stderr.write("Error in setup script for HTSeq:\n")
sys.stderr.write("HTSeq support Python 2.7 or 3.5+.")
sys.exit(1)
# Manage python2/3 compatibility with symlinks
py_maj = sys.version_info[0]
py_fdn = 'python'+str(py_maj)+'/'
print('symlinking folders for python'+str(py_maj))
for fdn in ['src', 'HTSeq', 'doc', 'scripts', 'test']:
if os.path.islink(fdn):
os.unlink(fdn)
os.symlink(py_fdn+fdn, fdn)
# Update version from VERSION file into module
with open('VERSION') as fversion:
version = fversion.readline().rstrip()
with open(py_fdn+'HTSeq/_version.py', 'wt') as fversion:
fversion.write('__version__ = "'+version+'"')
# Check OS-specific quirks
try:
from setuptools import setup, Extension
from setuptools.command.build_py import build_py
from setuptools import Command
# Setuptools but not distutils support build/runtime/optional dependencies
# NOTE: setuptools < 18.0 has issues with Cython as a dependency
# NOTE: old setuptools < 18.0 has issues with extras
kwargs = dict(
setup_requires=[
'Cython',
'numpy',
'pysam>=0.9.0',
],
install_requires=[
'numpy',
'pysam>=0.9.0',
],
extras_require={
'htseq-qa': ['matplotlib>=1.4']
},
)
except ImportError:
sys.stderr.write("Could not import 'setuptools'," +
" falling back to 'distutils'.\n")
from distutils.core import setup, Extension
from distutils.command.build_py import build_py
from distutils.cmd import Command
kwargs = dict(
requires=[
'Cython',
'numpy',
'pysam>=0.9.0',
]
)
try:
import numpy
except ImportError:
sys.stderr.write("Setup script for HTSeq: Failed to import 'numpy'.\n")
sys.stderr.write("Please install numpy and then try again to install" +
" HTSeq.\n")
sys.exit(1)
numpy_include_dir = os.path.join(
os.path.dirname(numpy.__file__),
'core',
'include',
)
def get_include_dirs(cpp=False):
'''OSX 10.14 and later split the /usr/include contents everywhere'''
include_dirs = []
if sys.platform != 'darwin':
return include_dirs
paths = {
'C': [
'/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/',
],
'C++': [
'/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/include/c++/v1/',
],
}
for path in paths['C']:
if os.path.isdir(path):
include_dirs.append(path)
if cpp:
for path in paths['C++']:
if os.path.isdir(path):
include_dirs.append(path)
return include_dirs
def get_library_dirs_cpp():
'''OSX 10.14 and later messed up C/C++ library locations'''
if sys.platform == 'darwin':
return ['/usr/X11R6/lib']
else:
return []
def get_extra_args_cpp():
'''OSX 101.14 and later refuses to use libstdc++'''
if sys.platform == 'darwin':
return ['-stdlib=libc++']
else:
return []
class Preprocess_command(Command):
'''Cython and SWIG preprocessing'''
description = "preprocess Cython and SWIG files for HTSeq"
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
self.swig_and_cython()
def swig_and_cython(self):
import os
from shutil import copy
from subprocess import check_call
if py_maj == 2:
from subprocess import CalledProcessError as SubprocessError
else:
from subprocess import SubprocessError
def c(x): return check_call(x, shell=True)
def p(x): return self.announce(x, level=logINFO)
# CYTHON
p('cythonizing')
cython = os.getenv('CYTHON', 'cython')
try:
c(cython+' --version')
except SubprocessError:
if os.path.isfile('src/_HTSeq.c'):
p('Cython not found, but transpiled file found')
else:
raise
else:
if py_maj == 2:
c(cython+' '+py_fdn+'src/HTSeq/_HTSeq.pyx -o '+py_fdn+'src/_HTSeq.c')
else:
c(cython+' -3 '+py_fdn+'src/HTSeq/_HTSeq.pyx -o '+py_fdn+'src/_HTSeq.c')
# SWIG
p('SWIGging')
swig = os.getenv('SWIG', 'swig')
pyswigged = py_fdn+'src/StepVector.py'
try:
if py_maj == 2:
c(swig+' -Wall -c++ -python '+py_fdn+'src/StepVector.i')
else:
p('correcting SWIG for python3')
c("2to3 --no-diffs --write --nobackups "+pyswigged)
c("sed -i 's/ import builtins as __builtin__/ import builtins/' "+pyswigged)
c("sed -i 's/\.next/.__next__/' "+pyswigged)
except SubprocessError:
if (os.path.isfile(py_fdn+'src/StepVector_wrap.cxx') and
os.path.isfile(py_fdn+'src/StepVector.py')):
p('SWIG not found, but transpiled files found')
else:
raise
p('moving swigged .py module')
copy(pyswigged, py_fdn+'HTSeq/StepVector.py')
p('done')
class Build_with_preprocess(build_py):
def run(self):
self.run_command('preprocess')
build_py.run(self)
setup(name='HTSeq',
version=version,
author='Simon Anders, Fabio Zanini',
author_email='sanders@fs.tum.de',
maintainer='Fabio Zanini',
maintainer_email='fabio.zanini@stanford.edu',
url='https://github.com/simon-anders/htseq',
description="A framework to process and analyze data from " +
"high-throughput sequencing (HTS) assays",
long_description="""
A framework to process and analyze data from high-throughput sequencing
(HTS) assays.
Development: https://github.com/simon-anders/htseq
Documentation: http://htseq.readthedocs.io""",
license='GPL3',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Operating System :: POSIX',
'Programming Language :: Python'
],
ext_modules=[
Extension(
'HTSeq._HTSeq',
['src/_HTSeq.c'],
include_dirs=[numpy_include_dir]+get_include_dirs(),
extra_compile_args=['-w']),
Extension(
'HTSeq._StepVector',
['src/StepVector_wrap.cxx'],
include_dirs=get_include_dirs(cpp=True),
library_dirs=get_library_dirs_cpp(),
extra_compile_args=['-w'] + get_extra_args_cpp(),
extra_link_args=get_extra_args_cpp(),
),
],
py_modules=[
'HTSeq._HTSeq_internal',
'HTSeq.StepVector',
'HTSeq._version',
'HTSeq.scripts.qa',
'HTSeq.scripts.count',
'HTSeq.scripts.count_with_barcodes',
],
scripts=[
'scripts/htseq-qa',
'scripts/htseq-count',
'scripts/htseq-count-barcodes',
],
cmdclass={
'preprocess': Preprocess_command,
'build_py': Build_with_preprocess,
},
**kwargs
)
| gpl-3.0 |
dsm054/pandas | pandas/tests/indexes/interval/test_interval_range.py | 1 | 12905 | from __future__ import division
from datetime import timedelta
import numpy as np
import pytest
import pandas.util.testing as tm
from pandas import (
DateOffset, Interval, IntervalIndex, Timedelta, Timestamp, date_range,
interval_range, timedelta_range
)
from pandas.core.dtypes.common import is_integer
from pandas.tseries.offsets import Day
@pytest.fixture(scope='class', params=[None, 'foo'])
def name(request):
return request.param
class TestIntervalRange(object):
@pytest.mark.parametrize('freq, periods', [
(1, 100), (2.5, 40), (5, 20), (25, 4)])
def test_constructor_numeric(self, closed, name, freq, periods):
start, end = 0, 100
breaks = np.arange(101, step=freq)
expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed)
# defined from start/end/freq
result = interval_range(
start=start, end=end, freq=freq, name=name, closed=closed)
tm.assert_index_equal(result, expected)
# defined from start/periods/freq
result = interval_range(
start=start, periods=periods, freq=freq, name=name, closed=closed)
tm.assert_index_equal(result, expected)
# defined from end/periods/freq
result = interval_range(
end=end, periods=periods, freq=freq, name=name, closed=closed)
tm.assert_index_equal(result, expected)
# GH 20976: linspace behavior defined from start/end/periods
result = interval_range(
start=start, end=end, periods=periods, name=name, closed=closed)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('tz', [None, 'US/Eastern'])
@pytest.mark.parametrize('freq, periods', [
('D', 364), ('2D', 182), ('22D18H', 16), ('M', 11)])
def test_constructor_timestamp(self, closed, name, freq, periods, tz):
start, end = Timestamp('20180101', tz=tz), Timestamp('20181231', tz=tz)
breaks = date_range(start=start, end=end, freq=freq)
expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed)
# defined from start/end/freq
result = interval_range(
start=start, end=end, freq=freq, name=name, closed=closed)
tm.assert_index_equal(result, expected)
# defined from start/periods/freq
result = interval_range(
start=start, periods=periods, freq=freq, name=name, closed=closed)
tm.assert_index_equal(result, expected)
# defined from end/periods/freq
result = interval_range(
end=end, periods=periods, freq=freq, name=name, closed=closed)
tm.assert_index_equal(result, expected)
# GH 20976: linspace behavior defined from start/end/periods
if not breaks.freq.isAnchored() and tz is None:
# matches expected only for non-anchored offsets and tz naive
# (anchored/DST transitions cause unequal spacing in expected)
result = interval_range(start=start, end=end, periods=periods,
name=name, closed=closed)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('freq, periods', [
('D', 100), ('2D12H', 40), ('5D', 20), ('25D', 4)])
def test_constructor_timedelta(self, closed, name, freq, periods):
start, end = Timedelta('0 days'), Timedelta('100 days')
breaks = timedelta_range(start=start, end=end, freq=freq)
expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed)
# defined from start/end/freq
result = interval_range(
start=start, end=end, freq=freq, name=name, closed=closed)
tm.assert_index_equal(result, expected)
# defined from start/periods/freq
result = interval_range(
start=start, periods=periods, freq=freq, name=name, closed=closed)
tm.assert_index_equal(result, expected)
# defined from end/periods/freq
result = interval_range(
end=end, periods=periods, freq=freq, name=name, closed=closed)
tm.assert_index_equal(result, expected)
# GH 20976: linspace behavior defined from start/end/periods
result = interval_range(
start=start, end=end, periods=periods, name=name, closed=closed)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('start, end, freq, expected_endpoint', [
(0, 10, 3, 9),
(0, 10, 1.5, 9),
(0.5, 10, 3, 9.5),
(Timedelta('0D'), Timedelta('10D'), '2D4H', Timedelta('8D16H')),
(Timestamp('2018-01-01'),
Timestamp('2018-02-09'),
'MS',
Timestamp('2018-02-01')),
(Timestamp('2018-01-01', tz='US/Eastern'),
Timestamp('2018-01-20', tz='US/Eastern'),
'5D12H',
Timestamp('2018-01-17 12:00:00', tz='US/Eastern'))])
def test_early_truncation(self, start, end, freq, expected_endpoint):
# index truncates early if freq causes end to be skipped
result = interval_range(start=start, end=end, freq=freq)
result_endpoint = result.right[-1]
assert result_endpoint == expected_endpoint
@pytest.mark.parametrize('start, end, freq', [
(0.5, None, None),
(None, 4.5, None),
(0.5, None, 1.5),
(None, 6.5, 1.5)])
def test_no_invalid_float_truncation(self, start, end, freq):
# GH 21161
if freq is None:
breaks = [0.5, 1.5, 2.5, 3.5, 4.5]
else:
breaks = [0.5, 2.0, 3.5, 5.0, 6.5]
expected = IntervalIndex.from_breaks(breaks)
result = interval_range(start=start, end=end, periods=4, freq=freq)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('start, mid, end', [
(Timestamp('2018-03-10', tz='US/Eastern'),
Timestamp('2018-03-10 23:30:00', tz='US/Eastern'),
Timestamp('2018-03-12', tz='US/Eastern')),
(Timestamp('2018-11-03', tz='US/Eastern'),
Timestamp('2018-11-04 00:30:00', tz='US/Eastern'),
Timestamp('2018-11-05', tz='US/Eastern'))])
def test_linspace_dst_transition(self, start, mid, end):
# GH 20976: linspace behavior defined from start/end/periods
# accounts for the hour gained/lost during DST transition
result = interval_range(start=start, end=end, periods=2)
expected = IntervalIndex.from_breaks([start, mid, end])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('freq', [2, 2.0])
@pytest.mark.parametrize('end', [10, 10.0])
@pytest.mark.parametrize('start', [0, 0.0])
def test_float_subtype(self, start, end, freq):
# Has float subtype if any of start/end/freq are float, even if all
# resulting endpoints can safely be upcast to integers
# defined from start/end/freq
index = interval_range(start=start, end=end, freq=freq)
result = index.dtype.subtype
expected = 'int64' if is_integer(start + end + freq) else 'float64'
assert result == expected
# defined from start/periods/freq
index = interval_range(start=start, periods=5, freq=freq)
result = index.dtype.subtype
expected = 'int64' if is_integer(start + freq) else 'float64'
assert result == expected
# defined from end/periods/freq
index = interval_range(end=end, periods=5, freq=freq)
result = index.dtype.subtype
expected = 'int64' if is_integer(end + freq) else 'float64'
assert result == expected
# GH 20976: linspace behavior defined from start/end/periods
index = interval_range(start=start, end=end, periods=5)
result = index.dtype.subtype
expected = 'int64' if is_integer(start + end) else 'float64'
assert result == expected
def test_constructor_coverage(self):
# float value for periods
expected = interval_range(start=0, periods=10)
result = interval_range(start=0, periods=10.5)
tm.assert_index_equal(result, expected)
# equivalent timestamp-like start/end
start, end = Timestamp('2017-01-01'), Timestamp('2017-01-15')
expected = interval_range(start=start, end=end)
result = interval_range(start=start.to_pydatetime(),
end=end.to_pydatetime())
tm.assert_index_equal(result, expected)
result = interval_range(start=start.asm8, end=end.asm8)
tm.assert_index_equal(result, expected)
# equivalent freq with timestamp
equiv_freq = ['D', Day(), Timedelta(days=1), timedelta(days=1),
DateOffset(days=1)]
for freq in equiv_freq:
result = interval_range(start=start, end=end, freq=freq)
tm.assert_index_equal(result, expected)
# equivalent timedelta-like start/end
start, end = Timedelta(days=1), Timedelta(days=10)
expected = interval_range(start=start, end=end)
result = interval_range(start=start.to_pytimedelta(),
end=end.to_pytimedelta())
tm.assert_index_equal(result, expected)
result = interval_range(start=start.asm8, end=end.asm8)
tm.assert_index_equal(result, expected)
# equivalent freq with timedelta
equiv_freq = ['D', Day(), Timedelta(days=1), timedelta(days=1)]
for freq in equiv_freq:
result = interval_range(start=start, end=end, freq=freq)
tm.assert_index_equal(result, expected)
def test_errors(self):
# not enough params
msg = ('Of the four parameters: start, end, periods, and freq, '
'exactly three must be specified')
with pytest.raises(ValueError, match=msg):
interval_range(start=0)
with pytest.raises(ValueError, match=msg):
interval_range(end=5)
with pytest.raises(ValueError, match=msg):
interval_range(periods=2)
with pytest.raises(ValueError, match=msg):
interval_range()
# too many params
with pytest.raises(ValueError, match=msg):
interval_range(start=0, end=5, periods=6, freq=1.5)
# mixed units
msg = 'start, end, freq need to be type compatible'
with pytest.raises(TypeError, match=msg):
interval_range(start=0, end=Timestamp('20130101'), freq=2)
with pytest.raises(TypeError, match=msg):
interval_range(start=0, end=Timedelta('1 day'), freq=2)
with pytest.raises(TypeError, match=msg):
interval_range(start=0, end=10, freq='D')
with pytest.raises(TypeError, match=msg):
interval_range(start=Timestamp('20130101'), end=10, freq='D')
with pytest.raises(TypeError, match=msg):
interval_range(start=Timestamp('20130101'),
end=Timedelta('1 day'), freq='D')
with pytest.raises(TypeError, match=msg):
interval_range(start=Timestamp('20130101'),
end=Timestamp('20130110'), freq=2)
with pytest.raises(TypeError, match=msg):
interval_range(start=Timedelta('1 day'), end=10, freq='D')
with pytest.raises(TypeError, match=msg):
interval_range(start=Timedelta('1 day'),
end=Timestamp('20130110'), freq='D')
with pytest.raises(TypeError, match=msg):
interval_range(start=Timedelta('1 day'),
end=Timedelta('10 days'), freq=2)
# invalid periods
msg = 'periods must be a number, got foo'
with pytest.raises(TypeError, match=msg):
interval_range(start=0, periods='foo')
# invalid start
msg = 'start must be numeric or datetime-like, got foo'
with pytest.raises(ValueError, match=msg):
interval_range(start='foo', periods=10)
# invalid end
msg = r'end must be numeric or datetime-like, got \(0, 1\]'
with pytest.raises(ValueError, match=msg):
interval_range(end=Interval(0, 1), periods=10)
# invalid freq for datetime-like
msg = 'freq must be numeric or convertible to DateOffset, got foo'
with pytest.raises(ValueError, match=msg):
interval_range(start=0, end=10, freq='foo')
with pytest.raises(ValueError, match=msg):
interval_range(start=Timestamp('20130101'), periods=10, freq='foo')
with pytest.raises(ValueError, match=msg):
interval_range(end=Timedelta('1 day'), periods=10, freq='foo')
# mixed tz
start = Timestamp('2017-01-01', tz='US/Eastern')
end = Timestamp('2017-01-07', tz='US/Pacific')
msg = 'Start and end cannot both be tz-aware with different timezones'
with pytest.raises(TypeError, match=msg):
interval_range(start=start, end=end)
| bsd-3-clause |
BigDataforYou/movie_recommendation_workshop_1 | big_data_4_you_demo_1/venv/lib/python2.7/site-packages/pandas/core/internals.py | 1 | 169308 | import copy
import itertools
import re
import operator
from datetime import datetime, timedelta, date
from collections import defaultdict
import numpy as np
from numpy import percentile as _quantile
from pandas.core.base import PandasObject
from pandas.core.common import (_possibly_downcast_to_dtype, isnull, _NS_DTYPE,
_TD_DTYPE, ABCSeries, is_list_like,
_infer_dtype_from_scalar, is_null_slice,
is_dtype_equal, is_null_datelike_scalar,
_maybe_promote, is_timedelta64_dtype,
is_datetime64_dtype, is_datetimetz, is_sparse,
array_equivalent, _is_na_compat,
_maybe_convert_string_to_object,
_maybe_convert_scalar,
is_categorical, is_datetimelike_v_numeric,
is_numeric_v_string_like, is_extension_type)
import pandas.core.algorithms as algos
from pandas.types.api import DatetimeTZDtype
from pandas.core.index import Index, MultiIndex, _ensure_index
from pandas.core.indexing import maybe_convert_indices, length_of_indexer
from pandas.core.categorical import Categorical, maybe_to_categorical
from pandas.tseries.index import DatetimeIndex
from pandas.formats.printing import pprint_thing
import pandas.core.common as com
import pandas.types.concat as _concat
import pandas.core.missing as missing
import pandas.core.convert as convert
from pandas.sparse.array import _maybe_to_sparse, SparseArray
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.computation.expressions as expressions
from pandas.util.decorators import cache_readonly
from pandas.tslib import Timedelta
from pandas import compat
from pandas.compat import range, map, zip, u
from pandas.lib import BlockPlacement
class Block(PandasObject):
"""
Canonical n-dimensional unit of homogeneous dtype contained in a pandas
data structure
Index-ignorant; let the container take care of that
"""
__slots__ = ['_mgr_locs', 'values', 'ndim']
is_numeric = False
is_float = False
is_integer = False
is_complex = False
is_datetime = False
is_datetimetz = False
is_timedelta = False
is_bool = False
is_object = False
is_categorical = False
is_sparse = False
_box_to_block_values = True
_can_hold_na = False
_downcast_dtype = None
_can_consolidate = True
_verify_integrity = True
_validate_ndim = True
_ftype = 'dense'
_holder = None
def __init__(self, values, placement, ndim=None, fastpath=False):
if ndim is None:
ndim = values.ndim
elif values.ndim != ndim:
raise ValueError('Wrong number of dimensions')
self.ndim = ndim
self.mgr_locs = placement
self.values = values
if len(self.mgr_locs) != len(self.values):
raise ValueError('Wrong number of items passed %d, placement '
'implies %d' % (len(self.values),
len(self.mgr_locs)))
@property
def _consolidate_key(self):
return (self._can_consolidate, self.dtype.name)
@property
def _is_single_block(self):
return self.ndim == 1
@property
def is_view(self):
""" return a boolean if I am possibly a view """
return self.values.base is not None
@property
def is_datelike(self):
""" return True if I am a non-datelike """
return self.is_datetime or self.is_timedelta
def is_categorical_astype(self, dtype):
"""
validate that we have a astypeable to categorical,
returns a boolean if we are a categorical
"""
if com.is_categorical_dtype(dtype):
if dtype == com.CategoricalDtype():
return True
# this is a pd.Categorical, but is not
# a valid type for astypeing
raise TypeError("invalid type {0} for astype".format(dtype))
return False
def external_values(self, dtype=None):
""" return an outside world format, currently just the ndarray """
return self.values
def internal_values(self, dtype=None):
""" return an internal format, currently just the ndarray
this should be the pure internal API format
"""
return self.values
def get_values(self, dtype=None):
"""
return an internal format, currently just the ndarray
this is often overriden to handle to_dense like operations
"""
if com.is_object_dtype(dtype):
return self.values.astype(object)
return self.values
def to_dense(self):
return self.values.view()
def to_object_block(self, mgr):
""" return myself as an object block """
values = self.get_values(dtype=object)
return self.make_block(values, klass=ObjectBlock)
@property
def _na_value(self):
return np.nan
@property
def fill_value(self):
return np.nan
@property
def mgr_locs(self):
return self._mgr_locs
@property
def array_dtype(self):
""" the dtype to return if I want to construct this block as an
array
"""
return self.dtype
def make_block(self, values, placement=None, ndim=None, **kwargs):
"""
Create a new block, with type inference propagate any values that are
not specified
"""
if placement is None:
placement = self.mgr_locs
if ndim is None:
ndim = self.ndim
return make_block(values, placement=placement, ndim=ndim, **kwargs)
def make_block_same_class(self, values, placement=None, fastpath=True,
**kwargs):
""" Wrap given values in a block of same type as self. """
if placement is None:
placement = self.mgr_locs
return make_block(values, placement=placement, klass=self.__class__,
fastpath=fastpath, **kwargs)
@mgr_locs.setter
def mgr_locs(self, new_mgr_locs):
if not isinstance(new_mgr_locs, BlockPlacement):
new_mgr_locs = BlockPlacement(new_mgr_locs)
self._mgr_locs = new_mgr_locs
def __unicode__(self):
# don't want to print out all of the items here
name = pprint_thing(self.__class__.__name__)
if self._is_single_block:
result = '%s: %s dtype: %s' % (name, len(self), self.dtype)
else:
shape = ' x '.join([pprint_thing(s) for s in self.shape])
result = '%s: %s, %s, dtype: %s' % (name, pprint_thing(
self.mgr_locs.indexer), shape, self.dtype)
return result
def __len__(self):
return len(self.values)
def __getstate__(self):
return self.mgr_locs.indexer, self.values
def __setstate__(self, state):
self.mgr_locs = BlockPlacement(state[0])
self.values = state[1]
self.ndim = self.values.ndim
def _slice(self, slicer):
""" return a slice of my values """
return self.values[slicer]
def reshape_nd(self, labels, shape, ref_items, mgr=None):
"""
Parameters
----------
labels : list of new axis labels
shape : new shape
ref_items : new ref_items
return a new block that is transformed to a nd block
"""
return _block2d_to_blocknd(values=self.get_values().T,
placement=self.mgr_locs, shape=shape,
labels=labels, ref_items=ref_items)
def getitem_block(self, slicer, new_mgr_locs=None):
"""
Perform __getitem__-like, return result as block.
As of now, only supports slices that preserve dimensionality.
"""
if new_mgr_locs is None:
if isinstance(slicer, tuple):
axis0_slicer = slicer[0]
else:
axis0_slicer = slicer
new_mgr_locs = self.mgr_locs[axis0_slicer]
new_values = self._slice(slicer)
if self._validate_ndim and new_values.ndim != self.ndim:
raise ValueError("Only same dim slicing is allowed")
return self.make_block_same_class(new_values, new_mgr_locs)
@property
def shape(self):
return self.values.shape
@property
def itemsize(self):
return self.values.itemsize
@property
def dtype(self):
return self.values.dtype
@property
def ftype(self):
return "%s:%s" % (self.dtype, self._ftype)
def merge(self, other):
return _merge_blocks([self, other])
def reindex_axis(self, indexer, method=None, axis=1, fill_value=None,
limit=None, mask_info=None):
"""
Reindex using pre-computed indexer information
"""
if axis < 1:
raise AssertionError('axis must be at least 1, got %d' % axis)
if fill_value is None:
fill_value = self.fill_value
new_values = algos.take_nd(self.values, indexer, axis,
fill_value=fill_value, mask_info=mask_info)
return self.make_block(new_values, fastpath=True)
def get(self, item):
loc = self.items.get_loc(item)
return self.values[loc]
def iget(self, i):
return self.values[i]
def set(self, locs, values, check=False):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
self.values[locs] = values
def delete(self, loc):
"""
Delete given loc(-s) from block in-place.
"""
self.values = np.delete(self.values, loc, 0)
self.mgr_locs = self.mgr_locs.delete(loc)
def apply(self, func, mgr=None, **kwargs):
""" apply the function to my values; return a block if we are not
one
"""
result = func(self.values, **kwargs)
if not isinstance(result, Block):
result = self.make_block(values=_block_shape(result))
return result
def fillna(self, value, limit=None, inplace=False, downcast=None,
mgr=None):
""" fillna on the block with the value. If we fail, then convert to
ObjectBlock and try again
"""
if not self._can_hold_na:
if inplace:
return self
else:
return self.copy()
original_value = value
mask = isnull(self.values)
if limit is not None:
if self.ndim > 2:
raise NotImplementedError("number of dimensions for 'fillna' "
"is currently limited to 2")
mask[mask.cumsum(self.ndim - 1) > limit] = False
# fillna, but if we cannot coerce, then try again as an ObjectBlock
try:
values, _, value, _ = self._try_coerce_args(self.values, value)
blocks = self.putmask(mask, value, inplace=inplace)
blocks = [b.make_block(values=self._try_coerce_result(b.values))
for b in blocks]
return self._maybe_downcast(blocks, downcast)
except (TypeError, ValueError):
# we can't process the value, but nothing to do
if not mask.any():
return self if inplace else self.copy()
# we cannot coerce the underlying object, so
# make an ObjectBlock
return self.to_object_block(mgr=mgr).fillna(original_value,
limit=limit,
inplace=inplace,
downcast=False)
def _maybe_downcast(self, blocks, downcast=None):
# no need to downcast our float
# unless indicated
if downcast is None and self.is_float:
return blocks
elif downcast is None and (self.is_timedelta or self.is_datetime):
return blocks
return _extend_blocks([b.downcast(downcast) for b in blocks])
def downcast(self, dtypes=None, mgr=None):
""" try to downcast each item to the dict of dtypes if present """
# turn it off completely
if dtypes is False:
return self
values = self.values
# single block handling
if self._is_single_block:
# try to cast all non-floats here
if dtypes is None:
dtypes = 'infer'
nv = _possibly_downcast_to_dtype(values, dtypes)
return self.make_block(nv, fastpath=True)
# ndim > 1
if dtypes is None:
return self
if not (dtypes == 'infer' or isinstance(dtypes, dict)):
raise ValueError("downcast must have a dictionary or 'infer' as "
"its argument")
# item-by-item
# this is expensive as it splits the blocks items-by-item
blocks = []
for i, rl in enumerate(self.mgr_locs):
if dtypes == 'infer':
dtype = 'infer'
else:
raise AssertionError("dtypes as dict is not supported yet")
# TODO: This either should be completed or removed
dtype = dtypes.get(item, self._downcast_dtype) # noqa
if dtype is None:
nv = _block_shape(values[i], ndim=self.ndim)
else:
nv = _possibly_downcast_to_dtype(values[i], dtype)
nv = _block_shape(nv, ndim=self.ndim)
blocks.append(self.make_block(nv, fastpath=True, placement=[rl]))
return blocks
def astype(self, dtype, copy=False, raise_on_error=True, values=None,
**kwargs):
return self._astype(dtype, copy=copy, raise_on_error=raise_on_error,
values=values, **kwargs)
def _astype(self, dtype, copy=False, raise_on_error=True, values=None,
klass=None, mgr=None, **kwargs):
"""
Coerce to the new type (if copy=True, return a new copy)
raise on an except if raise == True
"""
# may need to convert to categorical
# this is only called for non-categoricals
if self.is_categorical_astype(dtype):
return self.make_block(Categorical(self.values, **kwargs))
# astype processing
dtype = np.dtype(dtype)
if self.dtype == dtype:
if copy:
return self.copy()
return self
if klass is None:
if dtype == np.object_:
klass = ObjectBlock
try:
# force the copy here
if values is None:
if issubclass(dtype.type,
(compat.text_type, compat.string_types)):
# use native type formatting for datetime/tz/timedelta
if self.is_datelike:
values = self.to_native_types()
# astype formatting
else:
values = self.values
else:
values = self.get_values(dtype=dtype)
# _astype_nansafe works fine with 1-d only
values = com._astype_nansafe(values.ravel(), dtype, copy=True)
values = values.reshape(self.shape)
newb = make_block(values, placement=self.mgr_locs, dtype=dtype,
klass=klass)
except:
if raise_on_error is True:
raise
newb = self.copy() if copy else self
if newb.is_numeric and self.is_numeric:
if newb.shape != self.shape:
raise TypeError("cannot set astype for copy = [%s] for dtype "
"(%s [%s]) with smaller itemsize that current "
"(%s [%s])" % (copy, self.dtype.name,
self.itemsize, newb.dtype.name,
newb.itemsize))
return newb
def convert(self, copy=True, **kwargs):
""" attempt to coerce any object types to better types return a copy
of the block (if copy = True) by definition we are not an ObjectBlock
here!
"""
return self.copy() if copy else self
def _can_hold_element(self, value):
raise NotImplementedError()
def _try_cast(self, value):
raise NotImplementedError()
def _try_cast_result(self, result, dtype=None):
""" try to cast the result to our original type, we may have
roundtripped thru object in the mean-time
"""
if dtype is None:
dtype = self.dtype
if self.is_integer or self.is_bool or self.is_datetime:
pass
elif self.is_float and result.dtype == self.dtype:
# protect against a bool/object showing up here
if isinstance(dtype, compat.string_types) and dtype == 'infer':
return result
if not isinstance(dtype, type):
dtype = dtype.type
if issubclass(dtype, (np.bool_, np.object_)):
if issubclass(dtype, np.bool_):
if isnull(result).all():
return result.astype(np.bool_)
else:
result = result.astype(np.object_)
result[result == 1] = True
result[result == 0] = False
return result
else:
return result.astype(np.object_)
return result
# may need to change the dtype here
return _possibly_downcast_to_dtype(result, dtype)
def _try_operate(self, values):
""" return a version to operate on as the input """
return values
def _try_coerce_args(self, values, other):
""" provide coercion to our input arguments """
return values, False, other, False
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
return result
def _try_coerce_and_cast_result(self, result, dtype=None):
result = self._try_coerce_result(result)
result = self._try_cast_result(result, dtype=dtype)
return result
def _try_fill(self, value):
return value
def to_native_types(self, slicer=None, na_rep='nan', quoting=None,
**kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
mask = isnull(values)
if not self.is_object and not quoting:
values = values.astype(str)
else:
values = np.array(values, dtype='object')
values[mask] = na_rep
return values
# block actions ####
def copy(self, deep=True, mgr=None):
""" copy constructor """
values = self.values
if deep:
values = values.copy()
return self.make_block_same_class(values)
def replace(self, to_replace, value, inplace=False, filter=None,
regex=False, convert=True, mgr=None):
""" replace the to_replace value with value, possible to create new
blocks here this is just a call to putmask. regex is not used here.
It is used in ObjectBlocks. It is here for API
compatibility.
"""
original_to_replace = to_replace
mask = isnull(self.values)
# try to replace, if we raise an error, convert to ObjectBlock and
# retry
try:
values, _, to_replace, _ = self._try_coerce_args(self.values,
to_replace)
mask = missing.mask_missing(values, to_replace)
if filter is not None:
filtered_out = ~self.mgr_locs.isin(filter)
mask[filtered_out.nonzero()[0]] = False
blocks = self.putmask(mask, value, inplace=inplace)
if convert:
blocks = [b.convert(by_item=True, numeric=False,
copy=not inplace) for b in blocks]
return blocks
except (TypeError, ValueError):
# we can't process the value, but nothing to do
if not mask.any():
return self if inplace else self.copy()
return self.to_object_block(mgr=mgr).replace(
to_replace=original_to_replace, value=value, inplace=inplace,
filter=filter, regex=regex, convert=convert)
def _replace_single(self, *args, **kwargs):
""" no-op on a non-ObjectBlock """
return self if kwargs['inplace'] else self.copy()
def setitem(self, indexer, value, mgr=None):
""" set the value inplace; return a new block (of a possibly different
dtype)
indexer is a direct slice/positional indexer; value must be a
compatible shape
"""
# coerce None values, if appropriate
if value is None:
if self.is_numeric:
value = np.nan
# coerce args
values, _, value, _ = self._try_coerce_args(self.values, value)
arr_value = np.array(value)
# cast the values to a type that can hold nan (if necessary)
if not self._can_hold_element(value):
dtype, _ = com._maybe_promote(arr_value.dtype)
values = values.astype(dtype)
transf = (lambda x: x.T) if self.ndim == 2 else (lambda x: x)
values = transf(values)
l = len(values)
# length checking
# boolean with truth values == len of the value is ok too
if isinstance(indexer, (np.ndarray, list)):
if is_list_like(value) and len(indexer) != len(value):
if not (isinstance(indexer, np.ndarray) and
indexer.dtype == np.bool_ and
len(indexer[indexer]) == len(value)):
raise ValueError("cannot set using a list-like indexer "
"with a different length than the value")
# slice
elif isinstance(indexer, slice):
if is_list_like(value) and l:
if len(value) != length_of_indexer(indexer, values):
raise ValueError("cannot set using a slice indexer with a "
"different length than the value")
try:
def _is_scalar_indexer(indexer):
# return True if we are all scalar indexers
if arr_value.ndim == 1:
if not isinstance(indexer, tuple):
indexer = tuple([indexer])
return all([lib.isscalar(idx) for idx in indexer])
return False
def _is_empty_indexer(indexer):
# return a boolean if we have an empty indexer
if arr_value.ndim == 1:
if not isinstance(indexer, tuple):
indexer = tuple([indexer])
return any(isinstance(idx, np.ndarray) and len(idx) == 0
for idx in indexer)
return False
# empty indexers
# 8669 (empty)
if _is_empty_indexer(indexer):
pass
# setting a single element for each dim and with a rhs that could
# be say a list
# GH 6043
elif _is_scalar_indexer(indexer):
values[indexer] = value
# if we are an exact match (ex-broadcasting),
# then use the resultant dtype
elif (len(arr_value.shape) and
arr_value.shape[0] == values.shape[0] and
np.prod(arr_value.shape) == np.prod(values.shape)):
values[indexer] = value
values = values.astype(arr_value.dtype)
# set
else:
values[indexer] = value
# coerce and try to infer the dtypes of the result
if hasattr(value, 'dtype') and is_dtype_equal(values.dtype,
value.dtype):
dtype = value.dtype
elif lib.isscalar(value):
dtype, _ = _infer_dtype_from_scalar(value)
else:
dtype = 'infer'
values = self._try_coerce_and_cast_result(values, dtype)
block = self.make_block(transf(values), fastpath=True)
# may have to soft convert_objects here
if block.is_object and not self.is_object:
block = block.convert(numeric=False)
return block
except ValueError:
raise
except TypeError:
# cast to the passed dtype if possible
# otherwise raise the original error
try:
# e.g. we are uint32 and our value is uint64
# this is for compat with older numpies
block = self.make_block(transf(values.astype(value.dtype)))
return block.setitem(indexer=indexer, value=value, mgr=mgr)
except:
pass
raise
except Exception:
pass
return [self]
def putmask(self, mask, new, align=True, inplace=False, axis=0,
transpose=False, mgr=None):
""" putmask the data to the block; it is possible that we may create a
new dtype of block
return the resulting block(s)
Parameters
----------
mask : the condition to respect
new : a ndarray/object
align : boolean, perform alignment on other/cond, default is True
inplace : perform inplace modification, default is False
axis : int
transpose : boolean
Set to True if self is stored with axes reversed
Returns
-------
a list of new blocks, the result of the putmask
"""
new_values = self.values if inplace else self.values.copy()
if hasattr(new, 'reindex_axis'):
new = new.values
if hasattr(mask, 'reindex_axis'):
mask = mask.values
# if we are passed a scalar None, convert it here
if not is_list_like(new) and isnull(new) and not self.is_object:
new = self.fill_value
if self._can_hold_element(new):
if transpose:
new_values = new_values.T
new = self._try_cast(new)
# If the default repeat behavior in np.putmask would go in the
# wrong direction, then explictly repeat and reshape new instead
if getattr(new, 'ndim', 0) >= 1:
if self.ndim - 1 == new.ndim and axis == 1:
new = np.repeat(
new, new_values.shape[-1]).reshape(self.shape)
new = new.astype(new_values.dtype)
np.putmask(new_values, mask, new)
# maybe upcast me
elif mask.any():
if transpose:
mask = mask.T
if isinstance(new, np.ndarray):
new = new.T
axis = new_values.ndim - axis - 1
# Pseudo-broadcast
if getattr(new, 'ndim', 0) >= 1:
if self.ndim - 1 == new.ndim:
new_shape = list(new.shape)
new_shape.insert(axis, 1)
new = new.reshape(tuple(new_shape))
# need to go column by column
new_blocks = []
if self.ndim > 1:
for i, ref_loc in enumerate(self.mgr_locs):
m = mask[i]
v = new_values[i]
# need a new block
if m.any():
if isinstance(new, np.ndarray):
n = np.squeeze(new[i % new.shape[0]])
else:
n = np.array(new)
# type of the new block
dtype, _ = com._maybe_promote(n.dtype)
# we need to explicitly astype here to make a copy
n = n.astype(dtype)
nv = _putmask_smart(v, m, n)
else:
nv = v if inplace else v.copy()
# Put back the dimension that was taken from it and make
# a block out of the result.
block = self.make_block(values=nv[np.newaxis],
placement=[ref_loc], fastpath=True)
new_blocks.append(block)
else:
nv = _putmask_smart(new_values, mask, new)
new_blocks.append(self.make_block(values=nv, fastpath=True))
return new_blocks
if inplace:
return [self]
if transpose:
new_values = new_values.T
return [self.make_block(new_values, fastpath=True)]
def interpolate(self, method='pad', axis=0, index=None, values=None,
inplace=False, limit=None, limit_direction='forward',
fill_value=None, coerce=False, downcast=None, mgr=None,
**kwargs):
def check_int_bool(self, inplace):
# Only FloatBlocks will contain NaNs.
# timedelta subclasses IntBlock
if (self.is_bool or self.is_integer) and not self.is_timedelta:
if inplace:
return self
else:
return self.copy()
# a fill na type method
try:
m = missing.clean_fill_method(method)
except:
m = None
if m is not None:
r = check_int_bool(self, inplace)
if r is not None:
return r
return self._interpolate_with_fill(method=m, axis=axis,
inplace=inplace, limit=limit,
fill_value=fill_value,
coerce=coerce,
downcast=downcast, mgr=mgr)
# try an interp method
try:
m = missing.clean_interp_method(method, **kwargs)
except:
m = None
if m is not None:
r = check_int_bool(self, inplace)
if r is not None:
return r
return self._interpolate(method=m, index=index, values=values,
axis=axis, limit=limit,
limit_direction=limit_direction,
fill_value=fill_value, inplace=inplace,
downcast=downcast, mgr=mgr, **kwargs)
raise ValueError("invalid method '{0}' to interpolate.".format(method))
def _interpolate_with_fill(self, method='pad', axis=0, inplace=False,
limit=None, fill_value=None, coerce=False,
downcast=None, mgr=None):
""" fillna but using the interpolate machinery """
# if we are coercing, then don't force the conversion
# if the block can't hold the type
if coerce:
if not self._can_hold_na:
if inplace:
return [self]
else:
return [self.copy()]
values = self.values if inplace else self.values.copy()
values, _, fill_value, _ = self._try_coerce_args(values, fill_value)
values = self._try_operate(values)
values = missing.interpolate_2d(values, method=method, axis=axis,
limit=limit, fill_value=fill_value,
dtype=self.dtype)
values = self._try_coerce_result(values)
blocks = [self.make_block(values, klass=self.__class__, fastpath=True)]
return self._maybe_downcast(blocks, downcast)
def _interpolate(self, method=None, index=None, values=None,
fill_value=None, axis=0, limit=None,
limit_direction='forward', inplace=False, downcast=None,
mgr=None, **kwargs):
""" interpolate using scipy wrappers """
data = self.values if inplace else self.values.copy()
# only deal with floats
if not self.is_float:
if not self.is_integer:
return self
data = data.astype(np.float64)
if fill_value is None:
fill_value = self.fill_value
if method in ('krogh', 'piecewise_polynomial', 'pchip'):
if not index.is_monotonic:
raise ValueError("{0} interpolation requires that the "
"index be monotonic.".format(method))
# process 1-d slices in the axis direction
def func(x):
# process a 1-d slice, returning it
# should the axis argument be handled below in apply_along_axis?
# i.e. not an arg to missing.interpolate_1d
return missing.interpolate_1d(index, x, method=method, limit=limit,
limit_direction=limit_direction,
fill_value=fill_value,
bounds_error=False, **kwargs)
# interp each column independently
interp_values = np.apply_along_axis(func, axis, data)
blocks = [self.make_block(interp_values, klass=self.__class__,
fastpath=True)]
return self._maybe_downcast(blocks, downcast)
def take_nd(self, indexer, axis, new_mgr_locs=None, fill_tuple=None):
"""
Take values according to indexer and return them as a block.bb
"""
# algos.take_nd dispatches for DatetimeTZBlock, CategoricalBlock
# so need to preserve types
# sparse is treated like an ndarray, but needs .get_values() shaping
values = self.values
if self.is_sparse:
values = self.get_values()
if fill_tuple is None:
fill_value = self.fill_value
new_values = algos.take_nd(values, indexer, axis=axis,
allow_fill=False)
else:
fill_value = fill_tuple[0]
new_values = algos.take_nd(values, indexer, axis=axis,
allow_fill=True, fill_value=fill_value)
if new_mgr_locs is None:
if axis == 0:
slc = lib.indexer_as_slice(indexer)
if slc is not None:
new_mgr_locs = self.mgr_locs[slc]
else:
new_mgr_locs = self.mgr_locs[indexer]
else:
new_mgr_locs = self.mgr_locs
if not is_dtype_equal(new_values.dtype, self.dtype):
return self.make_block(new_values, new_mgr_locs)
else:
return self.make_block_same_class(new_values, new_mgr_locs)
def diff(self, n, axis=1, mgr=None):
""" return block for the diff of the values """
new_values = algos.diff(self.values, n, axis=axis)
return [self.make_block(values=new_values, fastpath=True)]
def shift(self, periods, axis=0, mgr=None):
""" shift the block by periods, possibly upcast """
# convert integer to float if necessary. need to do a lot more than
# that, handle boolean etc also
new_values, fill_value = com._maybe_upcast(self.values)
# make sure array sent to np.roll is c_contiguous
f_ordered = new_values.flags.f_contiguous
if f_ordered:
new_values = new_values.T
axis = new_values.ndim - axis - 1
if np.prod(new_values.shape):
new_values = np.roll(new_values, com._ensure_platform_int(periods),
axis=axis)
axis_indexer = [slice(None)] * self.ndim
if periods > 0:
axis_indexer[axis] = slice(None, periods)
else:
axis_indexer[axis] = slice(periods, None)
new_values[tuple(axis_indexer)] = fill_value
# restore original order
if f_ordered:
new_values = new_values.T
return [self.make_block(new_values, fastpath=True)]
def eval(self, func, other, raise_on_error=True, try_cast=False, mgr=None):
"""
evaluate the block; return result block from the result
Parameters
----------
func : how to combine self, other
other : a ndarray/object
raise_on_error : if True, raise when I can't perform the function,
False by default (and just return the data that we had coming in)
try_cast : try casting the results to the input type
Returns
-------
a new block, the result of the func
"""
values = self.values
if hasattr(other, 'reindex_axis'):
other = other.values
# make sure that we can broadcast
is_transposed = False
if hasattr(other, 'ndim') and hasattr(values, 'ndim'):
if values.ndim != other.ndim:
is_transposed = True
else:
if values.shape == other.shape[::-1]:
is_transposed = True
elif values.shape[0] == other.shape[-1]:
is_transposed = True
else:
# this is a broadcast error heree
raise ValueError("cannot broadcast shape [%s] with block "
"values [%s]" % (values.T.shape,
other.shape))
transf = (lambda x: x.T) if is_transposed else (lambda x: x)
# coerce/transpose the args if needed
values, values_mask, other, other_mask = self._try_coerce_args(
transf(values), other)
# get the result, may need to transpose the other
def get_result(other):
# avoid numpy warning of comparisons again None
if other is None:
result = not func.__name__ == 'eq'
# avoid numpy warning of elementwise comparisons to object
elif is_numeric_v_string_like(values, other):
result = False
else:
result = func(values, other)
# mask if needed
if isinstance(values_mask, np.ndarray) and values_mask.any():
result = result.astype('float64', copy=False)
result[values_mask] = np.nan
if other_mask is True:
result = result.astype('float64', copy=False)
result[:] = np.nan
elif isinstance(other_mask, np.ndarray) and other_mask.any():
result = result.astype('float64', copy=False)
result[other_mask.ravel()] = np.nan
return self._try_coerce_result(result)
# error handler if we have an issue operating with the function
def handle_error():
if raise_on_error:
raise TypeError('Could not operate %s with block values %s' %
(repr(other), str(detail)))
else:
# return the values
result = np.empty(values.shape, dtype='O')
result.fill(np.nan)
return result
# get the result
try:
result = get_result(other)
# if we have an invalid shape/broadcast error
# GH4576, so raise instead of allowing to pass through
except ValueError as detail:
raise
except Exception as detail:
result = handle_error()
# technically a broadcast error in numpy can 'work' by returning a
# boolean False
if not isinstance(result, np.ndarray):
if not isinstance(result, np.ndarray):
# differentiate between an invalid ndarray-ndarray comparison
# and an invalid type comparison
if isinstance(values, np.ndarray) and is_list_like(other):
raise ValueError('Invalid broadcasting comparison [%s] '
'with block values' % repr(other))
raise TypeError('Could not compare [%s] with block values' %
repr(other))
# transpose if needed
result = transf(result)
# try to cast if requested
if try_cast:
result = self._try_cast_result(result)
return [self.make_block(result, fastpath=True, )]
def where(self, other, cond, align=True, raise_on_error=True,
try_cast=False, axis=0, transpose=False, mgr=None):
"""
evaluate the block; return result block(s) from the result
Parameters
----------
other : a ndarray/object
cond : the condition to respect
align : boolean, perform alignment on other/cond
raise_on_error : if True, raise when I can't perform the function,
False by default (and just return the data that we had coming in)
axis : int
transpose : boolean
Set to True if self is stored with axes reversed
Returns
-------
a new block(s), the result of the func
"""
values = self.values
if transpose:
values = values.T
if hasattr(other, 'reindex_axis'):
other = other.values
if hasattr(cond, 'reindex_axis'):
cond = cond.values
# If the default broadcasting would go in the wrong direction, then
# explictly reshape other instead
if getattr(other, 'ndim', 0) >= 1:
if values.ndim - 1 == other.ndim and axis == 1:
other = other.reshape(tuple(other.shape + (1, )))
if not hasattr(cond, 'shape'):
raise ValueError("where must have a condition that is ndarray "
"like")
other = _maybe_convert_string_to_object(other)
other = _maybe_convert_scalar(other)
# our where function
def func(cond, values, other):
if cond.ravel().all():
return values
values, values_mask, other, other_mask = self._try_coerce_args(
values, other)
try:
return self._try_coerce_result(expressions.where(
cond, values, other, raise_on_error=True))
except Exception as detail:
if raise_on_error:
raise TypeError('Could not operate [%s] with block values '
'[%s]' % (repr(other), str(detail)))
else:
# return the values
result = np.empty(values.shape, dtype='float64')
result.fill(np.nan)
return result
# see if we can operate on the entire block, or need item-by-item
# or if we are a single block (ndim == 1)
result = func(cond, values, other)
if self._can_hold_na or self.ndim == 1:
if transpose:
result = result.T
# try to cast if requested
if try_cast:
result = self._try_cast_result(result)
return self.make_block(result)
# might need to separate out blocks
axis = cond.ndim - 1
cond = cond.swapaxes(axis, 0)
mask = np.array([cond[i].all() for i in range(cond.shape[0])],
dtype=bool)
result_blocks = []
for m in [mask, ~mask]:
if m.any():
r = self._try_cast_result(result.take(m.nonzero()[0],
axis=axis))
result_blocks.append(
self.make_block(r.T, placement=self.mgr_locs[m]))
return result_blocks
def equals(self, other):
if self.dtype != other.dtype or self.shape != other.shape:
return False
return array_equivalent(self.values, other.values)
def quantile(self, qs, mgr=None, **kwargs):
"""
compute the quantiles of the
Parameters
----------
qs : a scalar or list of the quantiles to be computed
"""
values = self.get_values()
values, mask, _, _ = self._try_coerce_args(values, values)
if not lib.isscalar(mask) and mask.any():
values = values[~mask]
if len(values) == 0:
if com.is_list_like(qs):
result = np.array([self.fill_value])
else:
result = self._na_value
elif com.is_list_like(qs):
values = [_quantile(values, x * 100, **kwargs) for x in qs]
result = np.array(values)
else:
result = _quantile(values, qs * 100, **kwargs)
return self._try_coerce_result(result)
class NonConsolidatableMixIn(object):
""" hold methods for the nonconsolidatable blocks """
_can_consolidate = False
_verify_integrity = False
_validate_ndim = False
_holder = None
def __init__(self, values, placement, ndim=None, fastpath=False, **kwargs):
# Placement must be converted to BlockPlacement via property setter
# before ndim logic, because placement may be a slice which doesn't
# have a length.
self.mgr_locs = placement
# kludgetastic
if ndim is None:
if len(self.mgr_locs) != 1:
ndim = 1
else:
ndim = 2
self.ndim = ndim
if not isinstance(values, self._holder):
raise TypeError("values must be {0}".format(self._holder.__name__))
self.values = values
@property
def shape(self):
if self.ndim == 1:
return (len(self.values)),
return (len(self.mgr_locs), len(self.values))
def get_values(self, dtype=None):
""" need to to_dense myself (and always return a ndim sized object) """
values = self.values.to_dense()
if values.ndim == self.ndim - 1:
values = values.reshape((1,) + values.shape)
return values
def iget(self, col):
if self.ndim == 2 and isinstance(col, tuple):
col, loc = col
if not is_null_slice(col) and col != 0:
raise IndexError("{0} only contains one item".format(self))
return self.values[loc]
else:
if col != 0:
raise IndexError("{0} only contains one item".format(self))
return self.values
def should_store(self, value):
return isinstance(value, self._holder)
def set(self, locs, values, check=False):
assert locs.tolist() == [0]
self.values = values
def get(self, item):
if self.ndim == 1:
loc = self.items.get_loc(item)
return self.values[loc]
else:
return self.values
def putmask(self, mask, new, align=True, inplace=False, axis=0,
transpose=False, mgr=None):
"""
putmask the data to the block; we must be a single block and not
generate other blocks
return the resulting block
Parameters
----------
mask : the condition to respect
new : a ndarray/object
align : boolean, perform alignment on other/cond, default is True
inplace : perform inplace modification, default is False
Returns
-------
a new block(s), the result of the putmask
"""
new_values = self.values if inplace else self.values.copy()
new_values, _, new, _ = self._try_coerce_args(new_values, new)
if isinstance(new, np.ndarray) and len(new) == len(mask):
new = new[mask]
new_values[mask] = new
new_values = self._try_coerce_result(new_values)
return [self.make_block(values=new_values)]
def _slice(self, slicer):
""" return a slice of my values (but densify first) """
return self.get_values()[slicer]
def _try_cast_result(self, result, dtype=None):
return result
class NumericBlock(Block):
__slots__ = ()
is_numeric = True
_can_hold_na = True
class FloatOrComplexBlock(NumericBlock):
__slots__ = ()
def equals(self, other):
if self.dtype != other.dtype or self.shape != other.shape:
return False
left, right = self.values, other.values
return ((left == right) | (np.isnan(left) & np.isnan(right))).all()
class FloatBlock(FloatOrComplexBlock):
__slots__ = ()
is_float = True
_downcast_dtype = 'int64'
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
tipo = element.dtype.type
return (issubclass(tipo, (np.floating, np.integer)) and
not issubclass(tipo, (np.datetime64, np.timedelta64)))
return (isinstance(element, (float, int, np.float_, np.int_)) and
not isinstance(element, (bool, np.bool_, datetime, timedelta,
np.datetime64, np.timedelta64)))
def _try_cast(self, element):
try:
return float(element)
except: # pragma: no cover
return element
def to_native_types(self, slicer=None, na_rep='', float_format=None,
decimal='.', quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
from pandas.formats.format import FloatArrayFormatter
formatter = FloatArrayFormatter(values, na_rep=na_rep,
float_format=float_format,
decimal=decimal, quoting=quoting,
fixed_width=False)
return formatter.get_result_as_array()
def should_store(self, value):
# when inserting a column should not coerce integers to floats
# unnecessarily
return (issubclass(value.dtype.type, np.floating) and
value.dtype == self.dtype)
class ComplexBlock(FloatOrComplexBlock):
__slots__ = ()
is_complex = True
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
return issubclass(element.dtype.type,
(np.floating, np.integer, np.complexfloating))
return (isinstance(element,
(float, int, complex, np.float_, np.int_)) and
not isinstance(bool, np.bool_))
def _try_cast(self, element):
try:
return complex(element)
except: # pragma: no cover
return element
def should_store(self, value):
return issubclass(value.dtype.type, np.complexfloating)
class IntBlock(NumericBlock):
__slots__ = ()
is_integer = True
_can_hold_na = False
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
tipo = element.dtype.type
return (issubclass(tipo, np.integer) and
not issubclass(tipo, (np.datetime64, np.timedelta64)))
return com.is_integer(element)
def _try_cast(self, element):
try:
return int(element)
except: # pragma: no cover
return element
def should_store(self, value):
return com.is_integer_dtype(value) and value.dtype == self.dtype
class DatetimeLikeBlockMixin(object):
@property
def _na_value(self):
return tslib.NaT
@property
def fill_value(self):
return tslib.iNaT
def _try_operate(self, values):
""" return a version to operate on """
return values.view('i8')
def get_values(self, dtype=None):
"""
return object dtype as boxed values, such as Timestamps/Timedelta
"""
if com.is_object_dtype(dtype):
return lib.map_infer(self.values.ravel(),
self._box_func).reshape(self.values.shape)
return self.values
class TimeDeltaBlock(DatetimeLikeBlockMixin, IntBlock):
__slots__ = ()
is_timedelta = True
_can_hold_na = True
is_numeric = False
@property
def _box_func(self):
return lambda x: tslib.Timedelta(x, unit='ns')
def fillna(self, value, **kwargs):
# allow filling with integers to be
# interpreted as seconds
if not isinstance(value, np.timedelta64) and com.is_integer(value):
value = Timedelta(value, unit='s')
return super(TimeDeltaBlock, self).fillna(value, **kwargs)
def _try_coerce_args(self, values, other):
"""
Coerce values and other to int64, with null values converted to
iNaT. values is always ndarray-like, other may not be
Parameters
----------
values : ndarray-like
other : ndarray-like or scalar
Returns
-------
base-type values, values mask, base-type other, other mask
"""
values_mask = isnull(values)
values = values.view('i8')
other_mask = False
if isinstance(other, bool):
raise TypeError
elif is_null_datelike_scalar(other):
other = tslib.iNaT
other_mask = True
elif isinstance(other, Timedelta):
other_mask = isnull(other)
other = other.value
elif isinstance(other, np.timedelta64):
other_mask = isnull(other)
other = other.view('i8')
elif isinstance(other, timedelta):
other = Timedelta(other).value
elif isinstance(other, np.ndarray):
other_mask = isnull(other)
other = other.astype('i8', copy=False).view('i8')
else:
# scalar
other = Timedelta(other)
other_mask = isnull(other)
other = other.value
return values, values_mask, other, other_mask
def _try_coerce_result(self, result):
""" reverse of try_coerce_args / try_operate """
if isinstance(result, np.ndarray):
mask = isnull(result)
if result.dtype.kind in ['i', 'f', 'O']:
result = result.astype('m8[ns]')
result[mask] = tslib.iNaT
elif isinstance(result, (np.integer, np.float)):
result = self._box_func(result)
return result
def should_store(self, value):
return issubclass(value.dtype.type, np.timedelta64)
def to_native_types(self, slicer=None, na_rep=None, quoting=None,
**kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
mask = isnull(values)
rvalues = np.empty(values.shape, dtype=object)
if na_rep is None:
na_rep = 'NaT'
rvalues[mask] = na_rep
imask = (~mask).ravel()
# FIXME:
# should use the formats.format.Timedelta64Formatter here
# to figure what format to pass to the Timedelta
# e.g. to not show the decimals say
rvalues.flat[imask] = np.array([Timedelta(val)._repr_base(format='all')
for val in values.ravel()[imask]],
dtype=object)
return rvalues
class BoolBlock(NumericBlock):
__slots__ = ()
is_bool = True
_can_hold_na = False
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
return issubclass(element.dtype.type, np.integer)
return isinstance(element, (int, bool))
def _try_cast(self, element):
try:
return bool(element)
except: # pragma: no cover
return element
def should_store(self, value):
return issubclass(value.dtype.type, np.bool_)
def replace(self, to_replace, value, inplace=False, filter=None,
regex=False, mgr=None):
to_replace_values = np.atleast_1d(to_replace)
if not np.can_cast(to_replace_values, bool):
return self
return super(BoolBlock, self).replace(to_replace, value,
inplace=inplace, filter=filter,
regex=regex, mgr=mgr)
class ObjectBlock(Block):
__slots__ = ()
is_object = True
_can_hold_na = True
def __init__(self, values, ndim=2, fastpath=False, placement=None,
**kwargs):
if issubclass(values.dtype.type, compat.string_types):
values = np.array(values, dtype=object)
super(ObjectBlock, self).__init__(values, ndim=ndim, fastpath=fastpath,
placement=placement, **kwargs)
@property
def is_bool(self):
""" we can be a bool if we have only bool values but are of type
object
"""
return lib.is_bool_array(self.values.ravel())
# TODO: Refactor when convert_objects is removed since there will be 1 path
def convert(self, *args, **kwargs):
""" attempt to coerce any object types to better types return a copy of
the block (if copy = True) by definition we ARE an ObjectBlock!!!!!
can return multiple blocks!
"""
if args:
raise NotImplementedError
by_item = True if 'by_item' not in kwargs else kwargs['by_item']
new_inputs = ['coerce', 'datetime', 'numeric', 'timedelta']
new_style = False
for kw in new_inputs:
new_style |= kw in kwargs
if new_style:
fn = convert._soft_convert_objects
fn_inputs = new_inputs
else:
fn = convert._possibly_convert_objects
fn_inputs = ['convert_dates', 'convert_numeric',
'convert_timedeltas']
fn_inputs += ['copy']
fn_kwargs = {}
for key in fn_inputs:
if key in kwargs:
fn_kwargs[key] = kwargs[key]
# attempt to create new type blocks
blocks = []
if by_item and not self._is_single_block:
for i, rl in enumerate(self.mgr_locs):
values = self.iget(i)
values = fn(values.ravel(), **fn_kwargs).reshape(values.shape)
values = _block_shape(values, ndim=self.ndim)
newb = make_block(values, ndim=self.ndim, placement=[rl])
blocks.append(newb)
else:
values = fn(
self.values.ravel(), **fn_kwargs).reshape(self.values.shape)
blocks.append(make_block(values, ndim=self.ndim,
placement=self.mgr_locs))
return blocks
def set(self, locs, values, check=False):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
# GH6026
if check:
try:
if (self.values[locs] == values).all():
return
except:
pass
try:
self.values[locs] = values
except (ValueError):
# broadcasting error
# see GH6171
new_shape = list(values.shape)
new_shape[0] = len(self.items)
self.values = np.empty(tuple(new_shape), dtype=self.dtype)
self.values.fill(np.nan)
self.values[locs] = values
def _maybe_downcast(self, blocks, downcast=None):
if downcast is not None:
return blocks
# split and convert the blocks
return _extend_blocks([b.convert(datetime=True, numeric=False)
for b in blocks])
def _can_hold_element(self, element):
return True
def _try_cast(self, element):
return element
def should_store(self, value):
return not (issubclass(value.dtype.type,
(np.integer, np.floating, np.complexfloating,
np.datetime64, np.bool_)) or
is_extension_type(value))
def replace(self, to_replace, value, inplace=False, filter=None,
regex=False, convert=True, mgr=None):
to_rep_is_list = com.is_list_like(to_replace)
value_is_list = com.is_list_like(value)
both_lists = to_rep_is_list and value_is_list
either_list = to_rep_is_list or value_is_list
result_blocks = []
blocks = [self]
if not either_list and com.is_re(to_replace):
return self._replace_single(to_replace, value, inplace=inplace,
filter=filter, regex=True,
convert=convert, mgr=mgr)
elif not (either_list or regex):
return super(ObjectBlock, self).replace(to_replace, value,
inplace=inplace,
filter=filter, regex=regex,
convert=convert, mgr=mgr)
elif both_lists:
for to_rep, v in zip(to_replace, value):
result_blocks = []
for b in blocks:
result = b._replace_single(to_rep, v, inplace=inplace,
filter=filter, regex=regex,
convert=convert, mgr=mgr)
result_blocks = _extend_blocks(result, result_blocks)
blocks = result_blocks
return result_blocks
elif to_rep_is_list and regex:
for to_rep in to_replace:
result_blocks = []
for b in blocks:
result = b._replace_single(to_rep, value, inplace=inplace,
filter=filter, regex=regex,
convert=convert, mgr=mgr)
result_blocks = _extend_blocks(result, result_blocks)
blocks = result_blocks
return result_blocks
return self._replace_single(to_replace, value, inplace=inplace,
filter=filter, convert=convert,
regex=regex, mgr=mgr)
def _replace_single(self, to_replace, value, inplace=False, filter=None,
regex=False, convert=True, mgr=None):
# to_replace is regex compilable
to_rep_re = regex and com.is_re_compilable(to_replace)
# regex is regex compilable
regex_re = com.is_re_compilable(regex)
# only one will survive
if to_rep_re and regex_re:
raise AssertionError('only one of to_replace and regex can be '
'regex compilable')
# if regex was passed as something that can be a regex (rather than a
# boolean)
if regex_re:
to_replace = regex
regex = regex_re or to_rep_re
# try to get the pattern attribute (compiled re) or it's a string
try:
pattern = to_replace.pattern
except AttributeError:
pattern = to_replace
# if the pattern is not empty and to_replace is either a string or a
# regex
if regex and pattern:
rx = re.compile(to_replace)
else:
# if the thing to replace is not a string or compiled regex call
# the superclass method -> to_replace is some kind of object
return super(ObjectBlock, self).replace(to_replace, value,
inplace=inplace,
filter=filter, regex=regex,
mgr=mgr)
new_values = self.values if inplace else self.values.copy()
# deal with replacing values with objects (strings) that match but
# whose replacement is not a string (numeric, nan, object)
if isnull(value) or not isinstance(value, compat.string_types):
def re_replacer(s):
try:
return value if rx.search(s) is not None else s
except TypeError:
return s
else:
# value is guaranteed to be a string here, s can be either a string
# or null if it's null it gets returned
def re_replacer(s):
try:
return rx.sub(value, s)
except TypeError:
return s
f = np.vectorize(re_replacer, otypes=[self.dtype])
if filter is None:
filt = slice(None)
else:
filt = self.mgr_locs.isin(filter).nonzero()[0]
new_values[filt] = f(new_values[filt])
# convert
block = self.make_block(new_values)
if convert:
block = block.convert(by_item=True, numeric=False)
return block
class CategoricalBlock(NonConsolidatableMixIn, ObjectBlock):
__slots__ = ()
is_categorical = True
_verify_integrity = True
_can_hold_na = True
_holder = Categorical
def __init__(self, values, placement, fastpath=False, **kwargs):
# coerce to categorical if we can
super(CategoricalBlock, self).__init__(maybe_to_categorical(values),
fastpath=True,
placement=placement, **kwargs)
@property
def is_view(self):
""" I am never a view """
return False
def to_dense(self):
return self.values.to_dense().view()
def convert(self, copy=True, **kwargs):
return self.copy() if copy else self
@property
def array_dtype(self):
""" the dtype to return if I want to construct this block as an
array
"""
return np.object_
def _slice(self, slicer):
""" return a slice of my values """
# slice the category
# return same dims as we currently have
return self.values._slice(slicer)
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
# GH12564: CategoricalBlock is 1-dim only
# while returned results could be any dim
if ((not com.is_categorical_dtype(result)) and
isinstance(result, np.ndarray)):
result = _block_shape(result, ndim=self.ndim)
return result
def fillna(self, value, limit=None, inplace=False, downcast=None,
mgr=None):
# we may need to upcast our fill to match our dtype
if limit is not None:
raise NotImplementedError("specifying a limit for 'fillna' has "
"not been implemented yet")
values = self.values if inplace else self.values.copy()
values = self._try_coerce_result(values.fillna(value=value,
limit=limit))
return [self.make_block(values=values)]
def interpolate(self, method='pad', axis=0, inplace=False, limit=None,
fill_value=None, **kwargs):
values = self.values if inplace else self.values.copy()
return self.make_block_same_class(
values=values.fillna(fill_value=fill_value, method=method,
limit=limit),
placement=self.mgr_locs)
def shift(self, periods, axis=0, mgr=None):
return self.make_block_same_class(values=self.values.shift(periods),
placement=self.mgr_locs)
def take_nd(self, indexer, axis=0, new_mgr_locs=None, fill_tuple=None):
"""
Take values according to indexer and return them as a block.bb
"""
if fill_tuple is None:
fill_value = None
else:
fill_value = fill_tuple[0]
# axis doesn't matter; we are really a single-dim object
# but are passed the axis depending on the calling routing
# if its REALLY axis 0, then this will be a reindex and not a take
new_values = self.values.take_nd(indexer, fill_value=fill_value)
# if we are a 1-dim object, then always place at 0
if self.ndim == 1:
new_mgr_locs = [0]
else:
if new_mgr_locs is None:
new_mgr_locs = self.mgr_locs
return self.make_block_same_class(new_values, new_mgr_locs)
def _astype(self, dtype, copy=False, raise_on_error=True, values=None,
klass=None, mgr=None):
"""
Coerce to the new type (if copy=True, return a new copy)
raise on an except if raise == True
"""
if self.is_categorical_astype(dtype):
values = self.values
else:
values = np.asarray(self.values).astype(dtype, copy=False)
if copy:
values = values.copy()
return self.make_block(values)
def to_native_types(self, slicer=None, na_rep='', quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
# Categorical is always one dimension
values = values[slicer]
mask = isnull(values)
values = np.array(values, dtype='object')
values[mask] = na_rep
# we are expected to return a 2-d ndarray
return values.reshape(1, len(values))
class DatetimeBlock(DatetimeLikeBlockMixin, Block):
__slots__ = ()
is_datetime = True
_can_hold_na = True
def __init__(self, values, placement, fastpath=False, **kwargs):
if values.dtype != _NS_DTYPE:
values = tslib.cast_to_nanoseconds(values)
super(DatetimeBlock, self).__init__(values, fastpath=True,
placement=placement, **kwargs)
def _astype(self, dtype, mgr=None, **kwargs):
"""
these automatically copy, so copy=True has no effect
raise on an except if raise == True
"""
# if we are passed a datetime64[ns, tz]
if com.is_datetime64tz_dtype(dtype):
dtype = DatetimeTZDtype(dtype)
values = self.values
if getattr(values, 'tz', None) is None:
values = DatetimeIndex(values).tz_localize('UTC')
values = values.tz_convert(dtype.tz)
return self.make_block(values)
# delegate
return super(DatetimeBlock, self)._astype(dtype=dtype, **kwargs)
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
return element.dtype == _NS_DTYPE or element.dtype == np.int64
return (com.is_integer(element) or isinstance(element, datetime) or
isnull(element))
def _try_cast(self, element):
try:
return int(element)
except:
return element
def _try_coerce_args(self, values, other):
"""
Coerce values and other to dtype 'i8'. NaN and NaT convert to
the smallest i8, and will correctly round-trip to NaT if converted
back in _try_coerce_result. values is always ndarray-like, other
may not be
Parameters
----------
values : ndarray-like
other : ndarray-like or scalar
Returns
-------
base-type values, values mask, base-type other, other mask
"""
values_mask = isnull(values)
values = values.view('i8')
other_mask = False
if isinstance(other, bool):
raise TypeError
elif is_null_datelike_scalar(other):
other = tslib.iNaT
other_mask = True
elif isinstance(other, (datetime, np.datetime64, date)):
other = self._box_func(other)
if getattr(other, 'tz') is not None:
raise TypeError("cannot coerce a Timestamp with a tz on a "
"naive Block")
other_mask = isnull(other)
other = other.asm8.view('i8')
elif hasattr(other, 'dtype') and com.is_integer_dtype(other):
other = other.view('i8')
else:
try:
other = np.asarray(other)
other_mask = isnull(other)
other = other.astype('i8', copy=False).view('i8')
except ValueError:
# coercion issues
# let higher levels handle
raise TypeError
return values, values_mask, other, other_mask
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
if isinstance(result, np.ndarray):
if result.dtype.kind in ['i', 'f', 'O']:
result = result.astype('M8[ns]')
elif isinstance(result, (np.integer, np.float, np.datetime64)):
result = self._box_func(result)
return result
@property
def _box_func(self):
return tslib.Timestamp
def to_native_types(self, slicer=None, na_rep=None, date_format=None,
quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[..., slicer]
from pandas.formats.format import _get_format_datetime64_from_values
format = _get_format_datetime64_from_values(values, date_format)
result = tslib.format_array_from_datetime(
values.view('i8').ravel(), tz=getattr(self.values, 'tz', None),
format=format, na_rep=na_rep).reshape(values.shape)
return np.atleast_2d(result)
def should_store(self, value):
return (issubclass(value.dtype.type, np.datetime64) and
not is_datetimetz(value))
def set(self, locs, values, check=False):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
if values.dtype != _NS_DTYPE:
# Workaround for numpy 1.6 bug
values = tslib.cast_to_nanoseconds(values)
self.values[locs] = values
class DatetimeTZBlock(NonConsolidatableMixIn, DatetimeBlock):
""" implement a datetime64 block with a tz attribute """
__slots__ = ()
_holder = DatetimeIndex
is_datetimetz = True
def __init__(self, values, placement, ndim=2, **kwargs):
if not isinstance(values, self._holder):
values = self._holder(values)
dtype = kwargs.pop('dtype', None)
if dtype is not None:
if isinstance(dtype, compat.string_types):
dtype = DatetimeTZDtype.construct_from_string(dtype)
values = values.tz_localize('UTC').tz_convert(dtype.tz)
if values.tz is None:
raise ValueError("cannot create a DatetimeTZBlock without a tz")
super(DatetimeTZBlock, self).__init__(values, placement=placement,
ndim=ndim, **kwargs)
def copy(self, deep=True, mgr=None):
""" copy constructor """
values = self.values
if deep:
values = values.copy(deep=True)
return self.make_block_same_class(values)
def external_values(self):
""" we internally represent the data as a DatetimeIndex, but for
external compat with ndarray, export as a ndarray of Timestamps
"""
return self.values.astype('datetime64[ns]').values
def get_values(self, dtype=None):
# return object dtype as Timestamps with the zones
if com.is_object_dtype(dtype):
f = lambda x: lib.Timestamp(x, tz=self.values.tz)
return lib.map_infer(
self.values.ravel(), f).reshape(self.values.shape)
return self.values
def to_object_block(self, mgr):
"""
return myself as an object block
Since we keep the DTI as a 1-d object, this is different
depends on BlockManager's ndim
"""
values = self.get_values(dtype=object)
kwargs = {}
if mgr.ndim > 1:
values = _block_shape(values, ndim=mgr.ndim)
kwargs['ndim'] = mgr.ndim
kwargs['placement'] = [0]
return self.make_block(values, klass=ObjectBlock, **kwargs)
def replace(self, *args, **kwargs):
# if we are forced to ObjectBlock, then don't coerce (to UTC)
kwargs['convert'] = False
return super(DatetimeTZBlock, self).replace(*args, **kwargs)
def _slice(self, slicer):
""" return a slice of my values """
if isinstance(slicer, tuple):
col, loc = slicer
if not is_null_slice(col) and col != 0:
raise IndexError("{0} only contains one item".format(self))
return self.values[loc]
return self.values[slicer]
def _try_coerce_args(self, values, other):
"""
localize and return i8 for the values
Parameters
----------
values : ndarray-like
other : ndarray-like or scalar
Returns
-------
base-type values, values mask, base-type other, other mask
"""
values_mask = isnull(values)
values = values.tz_localize(None).asi8
other_mask = False
if isinstance(other, ABCSeries):
other = self._holder(other)
other_mask = isnull(other)
if isinstance(other, bool):
raise TypeError
elif is_null_datelike_scalar(other):
other = tslib.iNaT
other_mask = True
elif isinstance(other, self._holder):
if other.tz != self.values.tz:
raise ValueError("incompatible or non tz-aware value")
other = other.tz_localize(None).asi8
other_mask = isnull(other)
elif isinstance(other, (np.datetime64, datetime, date)):
other = lib.Timestamp(other)
tz = getattr(other, 'tz', None)
# test we can have an equal time zone
if tz is None or str(tz) != str(self.values.tz):
raise ValueError("incompatible or non tz-aware value")
other_mask = isnull(other)
other = other.tz_localize(None).value
return values, values_mask, other, other_mask
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
if isinstance(result, np.ndarray):
if result.dtype.kind in ['i', 'f', 'O']:
result = result.astype('M8[ns]')
elif isinstance(result, (np.integer, np.float, np.datetime64)):
result = lib.Timestamp(result).tz_localize(self.values.tz)
if isinstance(result, np.ndarray):
result = self._holder(result).tz_localize(self.values.tz)
return result
@property
def _box_func(self):
return lambda x: tslib.Timestamp(x, tz=self.dtype.tz)
def shift(self, periods, axis=0, mgr=None):
""" shift the block by periods """
# think about moving this to the DatetimeIndex. This is a non-freq
# (number of periods) shift ###
N = len(self)
indexer = np.zeros(N, dtype=int)
if periods > 0:
indexer[periods:] = np.arange(N - periods)
else:
indexer[:periods] = np.arange(-periods, N)
# move to UTC & take
new_values = self.values.tz_localize(None).asi8.take(indexer)
if periods > 0:
new_values[:periods] = tslib.iNaT
else:
new_values[periods:] = tslib.iNaT
new_values = DatetimeIndex(new_values, tz=self.values.tz)
return [self.make_block_same_class(new_values,
placement=self.mgr_locs)]
class SparseBlock(NonConsolidatableMixIn, Block):
""" implement as a list of sparse arrays of the same dtype """
__slots__ = ()
is_sparse = True
is_numeric = True
_box_to_block_values = False
_can_hold_na = True
_ftype = 'sparse'
_holder = SparseArray
@property
def shape(self):
return (len(self.mgr_locs), self.sp_index.length)
@property
def itemsize(self):
return self.dtype.itemsize
@property
def fill_value(self):
# return np.nan
return self.values.fill_value
@fill_value.setter
def fill_value(self, v):
# we may need to upcast our fill to match our dtype
if issubclass(self.dtype.type, np.floating):
v = float(v)
self.values.fill_value = v
def to_dense(self):
return self.values.to_dense().view()
@property
def sp_values(self):
return self.values.sp_values
@sp_values.setter
def sp_values(self, v):
# reset the sparse values
self.values = SparseArray(v, sparse_index=self.sp_index,
kind=self.kind, dtype=v.dtype,
fill_value=self.values.fill_value,
copy=False)
@property
def sp_index(self):
return self.values.sp_index
@property
def kind(self):
return self.values.kind
def __len__(self):
try:
return self.sp_index.length
except:
return 0
def copy(self, deep=True, mgr=None):
return self.make_block_same_class(values=self.values,
sparse_index=self.sp_index,
kind=self.kind, copy=deep,
placement=self.mgr_locs)
def make_block_same_class(self, values, placement, sparse_index=None,
kind=None, dtype=None, fill_value=None,
copy=False, fastpath=True, **kwargs):
""" return a new block """
if dtype is None:
dtype = self.dtype
if fill_value is None and not isinstance(values, SparseArray):
fill_value = self.values.fill_value
# if not isinstance(values, SparseArray) and values.ndim != self.ndim:
# raise ValueError("ndim mismatch")
if values.ndim == 2:
nitems = values.shape[0]
if nitems == 0:
# kludgy, but SparseBlocks cannot handle slices, where the
# output is 0-item, so let's convert it to a dense block: it
# won't take space since there's 0 items, plus it will preserve
# the dtype.
return self.make_block(np.empty(values.shape, dtype=dtype),
placement,
fastpath=True)
elif nitems > 1:
raise ValueError("Only 1-item 2d sparse blocks are supported")
else:
values = values.reshape(values.shape[1])
new_values = SparseArray(values, sparse_index=sparse_index,
kind=kind or self.kind, dtype=dtype,
fill_value=fill_value, copy=copy)
return self.make_block(new_values, fastpath=fastpath,
placement=placement)
def interpolate(self, method='pad', axis=0, inplace=False, limit=None,
fill_value=None, **kwargs):
values = missing.interpolate_2d(self.values.to_dense(), method, axis,
limit, fill_value)
return self.make_block_same_class(values=values,
placement=self.mgr_locs)
def fillna(self, value, limit=None, inplace=False, downcast=None,
mgr=None):
# we may need to upcast our fill to match our dtype
if limit is not None:
raise NotImplementedError("specifying a limit for 'fillna' has "
"not been implemented yet")
values = self.values if inplace else self.values.copy()
values = values.fillna(value, downcast=downcast)
return [self.make_block_same_class(values=values,
placement=self.mgr_locs)]
def shift(self, periods, axis=0, mgr=None):
""" shift the block by periods """
N = len(self.values.T)
indexer = np.zeros(N, dtype=int)
if periods > 0:
indexer[periods:] = np.arange(N - periods)
else:
indexer[:periods] = np.arange(-periods, N)
new_values = self.values.to_dense().take(indexer)
# convert integer to float if necessary. need to do a lot more than
# that, handle boolean etc also
new_values, fill_value = com._maybe_upcast(new_values)
if periods > 0:
new_values[:periods] = fill_value
else:
new_values[periods:] = fill_value
return [self.make_block_same_class(new_values,
placement=self.mgr_locs)]
def reindex_axis(self, indexer, method=None, axis=1, fill_value=None,
limit=None, mask_info=None):
"""
Reindex using pre-computed indexer information
"""
if axis < 1:
raise AssertionError('axis must be at least 1, got %d' % axis)
# taking on the 0th axis always here
if fill_value is None:
fill_value = self.fill_value
return self.make_block_same_class(self.values.take(indexer),
fill_value=fill_value,
placement=self.mgr_locs)
def sparse_reindex(self, new_index):
""" sparse reindex and return a new block
current reindex only works for float64 dtype! """
values = self.values
values = values.sp_index.to_int_index().reindex(
values.sp_values.astype('float64'), values.fill_value, new_index)
return self.make_block_same_class(values, sparse_index=new_index,
placement=self.mgr_locs)
def make_block(values, placement, klass=None, ndim=None, dtype=None,
fastpath=False):
if klass is None:
dtype = dtype or values.dtype
vtype = dtype.type
if isinstance(values, SparseArray):
klass = SparseBlock
elif issubclass(vtype, np.floating):
klass = FloatBlock
elif (issubclass(vtype, np.integer) and
issubclass(vtype, np.timedelta64)):
klass = TimeDeltaBlock
elif (issubclass(vtype, np.integer) and
not issubclass(vtype, np.datetime64)):
klass = IntBlock
elif dtype == np.bool_:
klass = BoolBlock
elif issubclass(vtype, np.datetime64):
if hasattr(values, 'tz'):
klass = DatetimeTZBlock
else:
klass = DatetimeBlock
elif is_datetimetz(values):
klass = DatetimeTZBlock
elif issubclass(vtype, np.complexfloating):
klass = ComplexBlock
elif is_categorical(values):
klass = CategoricalBlock
else:
klass = ObjectBlock
elif klass is DatetimeTZBlock and not is_datetimetz(values):
return klass(values, ndim=ndim, fastpath=fastpath,
placement=placement, dtype=dtype)
return klass(values, ndim=ndim, fastpath=fastpath, placement=placement)
# TODO: flexible with index=None and/or items=None
class BlockManager(PandasObject):
"""
Core internal data structure to implement DataFrame, Series, Panel, etc.
Manage a bunch of labeled 2D mixed-type ndarrays. Essentially it's a
lightweight blocked set of labeled data to be manipulated by the DataFrame
public API class
Attributes
----------
shape
ndim
axes
values
items
Methods
-------
set_axis(axis, new_labels)
copy(deep=True)
get_dtype_counts
get_ftype_counts
get_dtypes
get_ftypes
apply(func, axes, block_filter_fn)
get_bool_data
get_numeric_data
get_slice(slice_like, axis)
get(label)
iget(loc)
get_scalar(label_tup)
take(indexer, axis)
reindex_axis(new_labels, axis)
reindex_indexer(new_labels, indexer, axis)
delete(label)
insert(loc, label, value)
set(label, value)
Parameters
----------
Notes
-----
This is *not* a public API class
"""
__slots__ = ['axes', 'blocks', '_ndim', '_shape', '_known_consolidated',
'_is_consolidated', '_blknos', '_blklocs']
def __init__(self, blocks, axes, do_integrity_check=True, fastpath=True):
self.axes = [_ensure_index(ax) for ax in axes]
self.blocks = tuple(blocks)
for block in blocks:
if block.is_sparse:
if len(block.mgr_locs) != 1:
raise AssertionError("Sparse block refers to multiple "
"items")
else:
if self.ndim != block.ndim:
raise AssertionError('Number of Block dimensions (%d) '
'must equal number of axes (%d)' %
(block.ndim, self.ndim))
if do_integrity_check:
self._verify_integrity()
self._consolidate_check()
self._rebuild_blknos_and_blklocs()
def make_empty(self, axes=None):
""" return an empty BlockManager with the items axis of len 0 """
if axes is None:
axes = [_ensure_index([])] + [_ensure_index(a)
for a in self.axes[1:]]
# preserve dtype if possible
if self.ndim == 1:
blocks = np.array([], dtype=self.array_dtype)
else:
blocks = []
return self.__class__(blocks, axes)
def __nonzero__(self):
return True
# Python3 compat
__bool__ = __nonzero__
@property
def shape(self):
return tuple(len(ax) for ax in self.axes)
@property
def ndim(self):
return len(self.axes)
def set_axis(self, axis, new_labels):
new_labels = _ensure_index(new_labels)
old_len = len(self.axes[axis])
new_len = len(new_labels)
if new_len != old_len:
raise ValueError('Length mismatch: Expected axis has %d elements, '
'new values have %d elements' %
(old_len, new_len))
self.axes[axis] = new_labels
def rename_axis(self, mapper, axis, copy=True):
"""
Rename one of axes.
Parameters
----------
mapper : unary callable
axis : int
copy : boolean, default True
"""
obj = self.copy(deep=copy)
obj.set_axis(axis, _transform_index(self.axes[axis], mapper))
return obj
def add_prefix(self, prefix):
f = (str(prefix) + '%s').__mod__
return self.rename_axis(f, axis=0)
def add_suffix(self, suffix):
f = ('%s' + str(suffix)).__mod__
return self.rename_axis(f, axis=0)
@property
def _is_single_block(self):
if self.ndim == 1:
return True
if len(self.blocks) != 1:
return False
blk = self.blocks[0]
return (blk.mgr_locs.is_slice_like and
blk.mgr_locs.as_slice == slice(0, len(self), 1))
def _rebuild_blknos_and_blklocs(self):
"""
Update mgr._blknos / mgr._blklocs.
"""
new_blknos = np.empty(self.shape[0], dtype=np.int64)
new_blklocs = np.empty(self.shape[0], dtype=np.int64)
new_blknos.fill(-1)
new_blklocs.fill(-1)
for blkno, blk in enumerate(self.blocks):
rl = blk.mgr_locs
new_blknos[rl.indexer] = blkno
new_blklocs[rl.indexer] = np.arange(len(rl))
if (new_blknos == -1).any():
raise AssertionError("Gaps in blk ref_locs")
self._blknos = new_blknos
self._blklocs = new_blklocs
# make items read only for now
def _get_items(self):
return self.axes[0]
items = property(fget=_get_items)
def _get_counts(self, f):
""" return a dict of the counts of the function in BlockManager """
self._consolidate_inplace()
counts = dict()
for b in self.blocks:
v = f(b)
counts[v] = counts.get(v, 0) + b.shape[0]
return counts
def get_dtype_counts(self):
return self._get_counts(lambda b: b.dtype.name)
def get_ftype_counts(self):
return self._get_counts(lambda b: b.ftype)
def get_dtypes(self):
dtypes = np.array([blk.dtype for blk in self.blocks])
return algos.take_1d(dtypes, self._blknos, allow_fill=False)
def get_ftypes(self):
ftypes = np.array([blk.ftype for blk in self.blocks])
return algos.take_1d(ftypes, self._blknos, allow_fill=False)
def __getstate__(self):
block_values = [b.values for b in self.blocks]
block_items = [self.items[b.mgr_locs.indexer] for b in self.blocks]
axes_array = [ax for ax in self.axes]
extra_state = {
'0.14.1': {
'axes': axes_array,
'blocks': [dict(values=b.values, mgr_locs=b.mgr_locs.indexer)
for b in self.blocks]
}
}
# First three elements of the state are to maintain forward
# compatibility with 0.13.1.
return axes_array, block_values, block_items, extra_state
def __setstate__(self, state):
def unpickle_block(values, mgr_locs):
# numpy < 1.7 pickle compat
if values.dtype == 'M8[us]':
values = values.astype('M8[ns]')
return make_block(values, placement=mgr_locs)
if (isinstance(state, tuple) and len(state) >= 4 and
'0.14.1' in state[3]):
state = state[3]['0.14.1']
self.axes = [_ensure_index(ax) for ax in state['axes']]
self.blocks = tuple(unpickle_block(b['values'], b['mgr_locs'])
for b in state['blocks'])
else:
# discard anything after 3rd, support beta pickling format for a
# little while longer
ax_arrays, bvalues, bitems = state[:3]
self.axes = [_ensure_index(ax) for ax in ax_arrays]
if len(bitems) == 1 and self.axes[0].equals(bitems[0]):
# This is a workaround for pre-0.14.1 pickles that didn't
# support unpickling multi-block frames/panels with non-unique
# columns/items, because given a manager with items ["a", "b",
# "a"] there's no way of knowing which block's "a" is where.
#
# Single-block case can be supported under the assumption that
# block items corresponded to manager items 1-to-1.
all_mgr_locs = [slice(0, len(bitems[0]))]
else:
all_mgr_locs = [self.axes[0].get_indexer(blk_items)
for blk_items in bitems]
self.blocks = tuple(
unpickle_block(values, mgr_locs)
for values, mgr_locs in zip(bvalues, all_mgr_locs))
self._post_setstate()
def _post_setstate(self):
self._is_consolidated = False
self._known_consolidated = False
self._rebuild_blknos_and_blklocs()
def __len__(self):
return len(self.items)
def __unicode__(self):
output = pprint_thing(self.__class__.__name__)
for i, ax in enumerate(self.axes):
if i == 0:
output += u('\nItems: %s') % ax
else:
output += u('\nAxis %d: %s') % (i, ax)
for block in self.blocks:
output += u('\n%s') % pprint_thing(block)
return output
def _verify_integrity(self):
mgr_shape = self.shape
tot_items = sum(len(x.mgr_locs) for x in self.blocks)
for block in self.blocks:
if block._verify_integrity and block.shape[1:] != mgr_shape[1:]:
construction_error(tot_items, block.shape[1:], self.axes)
if len(self.items) != tot_items:
raise AssertionError('Number of manager items must equal union of '
'block items\n# manager items: {0}, # '
'tot_items: {1}'.format(
len(self.items), tot_items))
def apply(self, f, axes=None, filter=None, do_integrity_check=False,
consolidate=True, raw=False, **kwargs):
"""
iterate over the blocks, collect and create a new block manager
Parameters
----------
f : the callable or function name to operate on at the block level
axes : optional (if not supplied, use self.axes)
filter : list, if supplied, only call the block if the filter is in
the block
do_integrity_check : boolean, default False. Do the block manager
integrity check
consolidate: boolean, default True. Join together blocks having same
dtype
raw: boolean, default False. Return the raw returned results
Returns
-------
Block Manager (new object)
"""
result_blocks = []
# filter kwarg is used in replace-* family of methods
if filter is not None:
filter_locs = set(self.items.get_indexer_for(filter))
if len(filter_locs) == len(self.items):
# All items are included, as if there were no filtering
filter = None
else:
kwargs['filter'] = filter_locs
if consolidate:
self._consolidate_inplace()
if f == 'where':
align_copy = True
if kwargs.get('align', True):
align_keys = ['other', 'cond']
else:
align_keys = ['cond']
elif f == 'putmask':
align_copy = False
if kwargs.get('align', True):
align_keys = ['new', 'mask']
else:
align_keys = ['mask']
elif f == 'eval':
align_copy = False
align_keys = ['other']
elif f == 'fillna':
# fillna internally does putmask, maybe it's better to do this
# at mgr, not block level?
align_copy = False
align_keys = ['value']
else:
align_keys = []
aligned_args = dict((k, kwargs[k])
for k in align_keys
if hasattr(kwargs[k], 'reindex_axis'))
for b in self.blocks:
if filter is not None:
if not b.mgr_locs.isin(filter_locs).any():
result_blocks.append(b)
continue
if aligned_args:
b_items = self.items[b.mgr_locs.indexer]
for k, obj in aligned_args.items():
axis = getattr(obj, '_info_axis_number', 0)
kwargs[k] = obj.reindex_axis(b_items, axis=axis,
copy=align_copy)
kwargs['mgr'] = self
applied = getattr(b, f)(**kwargs)
result_blocks = _extend_blocks(applied, result_blocks)
if raw:
if self._is_single_block:
return result_blocks[0]
return result_blocks
elif len(result_blocks) == 0:
return self.make_empty(axes or self.axes)
bm = self.__class__(result_blocks, axes or self.axes,
do_integrity_check=do_integrity_check)
bm._consolidate_inplace()
return bm
def isnull(self, **kwargs):
return self.apply('apply', **kwargs)
def where(self, **kwargs):
return self.apply('where', **kwargs)
def eval(self, **kwargs):
return self.apply('eval', **kwargs)
def quantile(self, **kwargs):
return self.apply('quantile', raw=True, **kwargs)
def setitem(self, **kwargs):
return self.apply('setitem', **kwargs)
def putmask(self, **kwargs):
return self.apply('putmask', **kwargs)
def diff(self, **kwargs):
return self.apply('diff', **kwargs)
def interpolate(self, **kwargs):
return self.apply('interpolate', **kwargs)
def shift(self, **kwargs):
return self.apply('shift', **kwargs)
def fillna(self, **kwargs):
return self.apply('fillna', **kwargs)
def downcast(self, **kwargs):
return self.apply('downcast', **kwargs)
def astype(self, dtype, **kwargs):
return self.apply('astype', dtype=dtype, **kwargs)
def convert(self, **kwargs):
return self.apply('convert', **kwargs)
def replace(self, **kwargs):
return self.apply('replace', **kwargs)
def replace_list(self, src_list, dest_list, inplace=False, regex=False,
mgr=None):
""" do a list replace """
if mgr is None:
mgr = self
# figure out our mask a-priori to avoid repeated replacements
values = self.as_matrix()
def comp(s):
if isnull(s):
return isnull(values)
return _possibly_compare(values, getattr(s, 'asm8', s),
operator.eq)
masks = [comp(s) for i, s in enumerate(src_list)]
result_blocks = []
for blk in self.blocks:
# its possible to get multiple result blocks here
# replace ALWAYS will return a list
rb = [blk if inplace else blk.copy()]
for i, (s, d) in enumerate(zip(src_list, dest_list)):
new_rb = []
for b in rb:
if b.dtype == np.object_:
result = b.replace(s, d, inplace=inplace, regex=regex,
mgr=mgr)
new_rb = _extend_blocks(result, new_rb)
else:
# get our mask for this element, sized to this
# particular block
m = masks[i][b.mgr_locs.indexer]
if m.any():
new_rb.extend(b.putmask(m, d, inplace=True))
else:
new_rb.append(b)
rb = new_rb
result_blocks.extend(rb)
bm = self.__class__(result_blocks, self.axes)
bm._consolidate_inplace()
return bm
def reshape_nd(self, axes, **kwargs):
""" a 2d-nd reshape operation on a BlockManager """
return self.apply('reshape_nd', axes=axes, **kwargs)
def is_consolidated(self):
"""
Return True if more than one block with the same dtype
"""
if not self._known_consolidated:
self._consolidate_check()
return self._is_consolidated
def _consolidate_check(self):
ftypes = [blk.ftype for blk in self.blocks]
self._is_consolidated = len(ftypes) == len(set(ftypes))
self._known_consolidated = True
@property
def is_mixed_type(self):
# Warning, consolidation needs to get checked upstairs
self._consolidate_inplace()
return len(self.blocks) > 1
@property
def is_numeric_mixed_type(self):
# Warning, consolidation needs to get checked upstairs
self._consolidate_inplace()
return all([block.is_numeric for block in self.blocks])
@property
def is_datelike_mixed_type(self):
# Warning, consolidation needs to get checked upstairs
self._consolidate_inplace()
return any([block.is_datelike for block in self.blocks])
@property
def is_view(self):
""" return a boolean if we are a single block and are a view """
if len(self.blocks) == 1:
return self.blocks[0].is_view
# It is technically possible to figure out which blocks are views
# e.g. [ b.values.base is not None for b in self.blocks ]
# but then we have the case of possibly some blocks being a view
# and some blocks not. setting in theory is possible on the non-view
# blocks w/o causing a SettingWithCopy raise/warn. But this is a bit
# complicated
return False
def get_bool_data(self, copy=False):
"""
Parameters
----------
copy : boolean, default False
Whether to copy the blocks
"""
self._consolidate_inplace()
return self.combine([b for b in self.blocks if b.is_bool], copy)
def get_numeric_data(self, copy=False):
"""
Parameters
----------
copy : boolean, default False
Whether to copy the blocks
"""
self._consolidate_inplace()
return self.combine([b for b in self.blocks if b.is_numeric], copy)
def combine(self, blocks, copy=True):
""" return a new manager with the blocks """
if len(blocks) == 0:
return self.make_empty()
# FIXME: optimization potential
indexer = np.sort(np.concatenate([b.mgr_locs.as_array
for b in blocks]))
inv_indexer = lib.get_reverse_indexer(indexer, self.shape[0])
new_items = self.items.take(indexer)
new_blocks = []
for b in blocks:
b = b.copy(deep=copy)
b.mgr_locs = algos.take_1d(inv_indexer, b.mgr_locs.as_array,
axis=0, allow_fill=False)
new_blocks.append(b)
new_axes = list(self.axes)
new_axes[0] = new_items
return self.__class__(new_blocks, new_axes, do_integrity_check=False)
def get_slice(self, slobj, axis=0):
if axis >= self.ndim:
raise IndexError("Requested axis not found in manager")
if axis == 0:
new_blocks = self._slice_take_blocks_ax0(slobj)
else:
slicer = [slice(None)] * (axis + 1)
slicer[axis] = slobj
slicer = tuple(slicer)
new_blocks = [blk.getitem_block(slicer) for blk in self.blocks]
new_axes = list(self.axes)
new_axes[axis] = new_axes[axis][slobj]
bm = self.__class__(new_blocks, new_axes, do_integrity_check=False,
fastpath=True)
bm._consolidate_inplace()
return bm
def __contains__(self, item):
return item in self.items
@property
def nblocks(self):
return len(self.blocks)
def copy(self, deep=True, mgr=None):
"""
Make deep or shallow copy of BlockManager
Parameters
----------
deep : boolean o rstring, default True
If False, return shallow copy (do not copy data)
If 'all', copy data and a deep copy of the index
Returns
-------
copy : BlockManager
"""
# this preserves the notion of view copying of axes
if deep:
if deep == 'all':
copy = lambda ax: ax.copy(deep=True)
else:
copy = lambda ax: ax.view()
new_axes = [copy(ax) for ax in self.axes]
else:
new_axes = list(self.axes)
return self.apply('copy', axes=new_axes, deep=deep,
do_integrity_check=False)
def as_matrix(self, items=None):
if len(self.blocks) == 0:
return np.empty(self.shape, dtype=float)
if items is not None:
mgr = self.reindex_axis(items, axis=0)
else:
mgr = self
if self._is_single_block or not self.is_mixed_type:
return mgr.blocks[0].get_values()
else:
return mgr._interleave()
def _interleave(self):
"""
Return ndarray from blocks with specified item order
Items must be contained in the blocks
"""
dtype = _interleaved_dtype(self.blocks)
result = np.empty(self.shape, dtype=dtype)
if result.shape[0] == 0:
# Workaround for numpy 1.7 bug:
#
# >>> a = np.empty((0,10))
# >>> a[slice(0,0)]
# array([], shape=(0, 10), dtype=float64)
# >>> a[[]]
# Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
# IndexError: index 0 is out of bounds for axis 0 with size 0
return result
itemmask = np.zeros(self.shape[0])
for blk in self.blocks:
rl = blk.mgr_locs
result[rl.indexer] = blk.get_values(dtype)
itemmask[rl.indexer] = 1
if not itemmask.all():
raise AssertionError('Some items were not contained in blocks')
return result
def xs(self, key, axis=1, copy=True, takeable=False):
if axis < 1:
raise AssertionError('Can only take xs across axis >= 1, got %d' %
axis)
# take by position
if takeable:
loc = key
else:
loc = self.axes[axis].get_loc(key)
slicer = [slice(None, None) for _ in range(self.ndim)]
slicer[axis] = loc
slicer = tuple(slicer)
new_axes = list(self.axes)
# could be an array indexer!
if isinstance(loc, (slice, np.ndarray)):
new_axes[axis] = new_axes[axis][loc]
else:
new_axes.pop(axis)
new_blocks = []
if len(self.blocks) > 1:
# we must copy here as we are mixed type
for blk in self.blocks:
newb = make_block(values=blk.values[slicer],
klass=blk.__class__, fastpath=True,
placement=blk.mgr_locs)
new_blocks.append(newb)
elif len(self.blocks) == 1:
block = self.blocks[0]
vals = block.values[slicer]
if copy:
vals = vals.copy()
new_blocks = [make_block(values=vals,
placement=block.mgr_locs,
klass=block.__class__,
fastpath=True, )]
return self.__class__(new_blocks, new_axes)
def fast_xs(self, loc):
"""
get a cross sectional for a given location in the
items ; handle dups
return the result, is *could* be a view in the case of a
single block
"""
if len(self.blocks) == 1:
return self.blocks[0].iget((slice(None), loc))
items = self.items
# non-unique (GH4726)
if not items.is_unique:
result = self._interleave()
if self.ndim == 2:
result = result.T
return result[loc]
# unique
dtype = _interleaved_dtype(self.blocks)
n = len(items)
result = np.empty(n, dtype=dtype)
for blk in self.blocks:
# Such assignment may incorrectly coerce NaT to None
# result[blk.mgr_locs] = blk._slice((slice(None), loc))
for i, rl in enumerate(blk.mgr_locs):
result[rl] = blk._try_coerce_result(blk.iget((i, loc)))
return result
def consolidate(self):
"""
Join together blocks having same dtype
Returns
-------
y : BlockManager
"""
if self.is_consolidated():
return self
bm = self.__class__(self.blocks, self.axes)
bm._is_consolidated = False
bm._consolidate_inplace()
return bm
def _consolidate_inplace(self):
if not self.is_consolidated():
self.blocks = tuple(_consolidate(self.blocks))
self._is_consolidated = True
self._known_consolidated = True
self._rebuild_blknos_and_blklocs()
def get(self, item, fastpath=True):
"""
Return values for selected item (ndarray or BlockManager).
"""
if self.items.is_unique:
if not isnull(item):
loc = self.items.get_loc(item)
else:
indexer = np.arange(len(self.items))[isnull(self.items)]
# allow a single nan location indexer
if not lib.isscalar(indexer):
if len(indexer) == 1:
loc = indexer.item()
else:
raise ValueError("cannot label index with a null key")
return self.iget(loc, fastpath=fastpath)
else:
if isnull(item):
raise TypeError("cannot label index with a null key")
indexer = self.items.get_indexer_for([item])
return self.reindex_indexer(new_axis=self.items[indexer],
indexer=indexer, axis=0,
allow_dups=True)
def iget(self, i, fastpath=True):
"""
Return the data as a SingleBlockManager if fastpath=True and possible
Otherwise return as a ndarray
"""
block = self.blocks[self._blknos[i]]
values = block.iget(self._blklocs[i])
if not fastpath or not block._box_to_block_values or values.ndim != 1:
return values
# fastpath shortcut for select a single-dim from a 2-dim BM
return SingleBlockManager(
[block.make_block_same_class(values,
placement=slice(0, len(values)),
ndim=1, fastpath=True)],
self.axes[1])
def get_scalar(self, tup):
"""
Retrieve single item
"""
full_loc = list(ax.get_loc(x) for ax, x in zip(self.axes, tup))
blk = self.blocks[self._blknos[full_loc[0]]]
values = blk.values
# FIXME: this may return non-upcasted types?
if values.ndim == 1:
return values[full_loc[1]]
full_loc[0] = self._blklocs[full_loc[0]]
return values[tuple(full_loc)]
def delete(self, item):
"""
Delete selected item (items if non-unique) in-place.
"""
indexer = self.items.get_loc(item)
is_deleted = np.zeros(self.shape[0], dtype=np.bool_)
is_deleted[indexer] = True
ref_loc_offset = -is_deleted.cumsum()
is_blk_deleted = [False] * len(self.blocks)
if isinstance(indexer, int):
affected_start = indexer
else:
affected_start = is_deleted.nonzero()[0][0]
for blkno, _ in _fast_count_smallints(self._blknos[affected_start:]):
blk = self.blocks[blkno]
bml = blk.mgr_locs
blk_del = is_deleted[bml.indexer].nonzero()[0]
if len(blk_del) == len(bml):
is_blk_deleted[blkno] = True
continue
elif len(blk_del) != 0:
blk.delete(blk_del)
bml = blk.mgr_locs
blk.mgr_locs = bml.add(ref_loc_offset[bml.indexer])
# FIXME: use Index.delete as soon as it uses fastpath=True
self.axes[0] = self.items[~is_deleted]
self.blocks = tuple(b for blkno, b in enumerate(self.blocks)
if not is_blk_deleted[blkno])
self._shape = None
self._rebuild_blknos_and_blklocs()
def set(self, item, value, check=False):
"""
Set new item in-place. Does not consolidate. Adds new Block if not
contained in the current set of items
if check, then validate that we are not setting the same data in-place
"""
# FIXME: refactor, clearly separate broadcasting & zip-like assignment
# can prob also fix the various if tests for sparse/categorical
value_is_extension_type = is_extension_type(value)
# categorical/spares/datetimetz
if value_is_extension_type:
def value_getitem(placement):
return value
else:
if value.ndim == self.ndim - 1:
value = value.reshape((1,) + value.shape)
def value_getitem(placement):
return value
else:
def value_getitem(placement):
return value[placement.indexer]
if value.shape[1:] != self.shape[1:]:
raise AssertionError('Shape of new values must be compatible '
'with manager shape')
try:
loc = self.items.get_loc(item)
except KeyError:
# This item wasn't present, just insert at end
self.insert(len(self.items), item, value)
return
if isinstance(loc, int):
loc = [loc]
blknos = self._blknos[loc]
blklocs = self._blklocs[loc].copy()
unfit_mgr_locs = []
unfit_val_locs = []
removed_blknos = []
for blkno, val_locs in _get_blkno_placements(blknos, len(self.blocks),
group=True):
blk = self.blocks[blkno]
blk_locs = blklocs[val_locs.indexer]
if blk.should_store(value):
blk.set(blk_locs, value_getitem(val_locs), check=check)
else:
unfit_mgr_locs.append(blk.mgr_locs.as_array[blk_locs])
unfit_val_locs.append(val_locs)
# If all block items are unfit, schedule the block for removal.
if len(val_locs) == len(blk.mgr_locs):
removed_blknos.append(blkno)
else:
self._blklocs[blk.mgr_locs.indexer] = -1
blk.delete(blk_locs)
self._blklocs[blk.mgr_locs.indexer] = np.arange(len(blk))
if len(removed_blknos):
# Remove blocks & update blknos accordingly
is_deleted = np.zeros(self.nblocks, dtype=np.bool_)
is_deleted[removed_blknos] = True
new_blknos = np.empty(self.nblocks, dtype=np.int64)
new_blknos.fill(-1)
new_blknos[~is_deleted] = np.arange(self.nblocks -
len(removed_blknos))
self._blknos = algos.take_1d(new_blknos, self._blknos, axis=0,
allow_fill=False)
self.blocks = tuple(blk for i, blk in enumerate(self.blocks)
if i not in set(removed_blknos))
if unfit_val_locs:
unfit_mgr_locs = np.concatenate(unfit_mgr_locs)
unfit_count = len(unfit_mgr_locs)
new_blocks = []
if value_is_extension_type:
# This code (ab-)uses the fact that sparse blocks contain only
# one item.
new_blocks.extend(
make_block(values=value.copy(), ndim=self.ndim,
placement=slice(mgr_loc, mgr_loc + 1))
for mgr_loc in unfit_mgr_locs)
self._blknos[unfit_mgr_locs] = (np.arange(unfit_count) +
len(self.blocks))
self._blklocs[unfit_mgr_locs] = 0
else:
# unfit_val_locs contains BlockPlacement objects
unfit_val_items = unfit_val_locs[0].append(unfit_val_locs[1:])
new_blocks.append(
make_block(values=value_getitem(unfit_val_items),
ndim=self.ndim, placement=unfit_mgr_locs))
self._blknos[unfit_mgr_locs] = len(self.blocks)
self._blklocs[unfit_mgr_locs] = np.arange(unfit_count)
self.blocks += tuple(new_blocks)
# Newly created block's dtype may already be present.
self._known_consolidated = False
def insert(self, loc, item, value, allow_duplicates=False):
"""
Insert item at selected position.
Parameters
----------
loc : int
item : hashable
value : array_like
allow_duplicates: bool
If False, trying to insert non-unique item will raise
"""
if not allow_duplicates and item in self.items:
# Should this be a different kind of error??
raise ValueError('cannot insert %s, already exists' % item)
if not isinstance(loc, int):
raise TypeError("loc must be int")
# insert to the axis; this could possibly raise a TypeError
new_axis = self.items.insert(loc, item)
block = make_block(values=value, ndim=self.ndim,
placement=slice(loc, loc + 1))
for blkno, count in _fast_count_smallints(self._blknos[loc:]):
blk = self.blocks[blkno]
if count == len(blk.mgr_locs):
blk.mgr_locs = blk.mgr_locs.add(1)
else:
new_mgr_locs = blk.mgr_locs.as_array.copy()
new_mgr_locs[new_mgr_locs >= loc] += 1
blk.mgr_locs = new_mgr_locs
if loc == self._blklocs.shape[0]:
# np.append is a lot faster (at least in numpy 1.7.1), let's use it
# if we can.
self._blklocs = np.append(self._blklocs, 0)
self._blknos = np.append(self._blknos, len(self.blocks))
else:
self._blklocs = np.insert(self._blklocs, loc, 0)
self._blknos = np.insert(self._blknos, loc, len(self.blocks))
self.axes[0] = new_axis
self.blocks += (block,)
self._shape = None
self._known_consolidated = False
if len(self.blocks) > 100:
self._consolidate_inplace()
def reindex_axis(self, new_index, axis, method=None, limit=None,
fill_value=None, copy=True):
"""
Conform block manager to new index.
"""
new_index = _ensure_index(new_index)
new_index, indexer = self.axes[axis].reindex(new_index, method=method,
limit=limit)
return self.reindex_indexer(new_index, indexer, axis=axis,
fill_value=fill_value, copy=copy)
def reindex_indexer(self, new_axis, indexer, axis, fill_value=None,
allow_dups=False, copy=True):
"""
Parameters
----------
new_axis : Index
indexer : ndarray of int64 or None
axis : int
fill_value : object
allow_dups : bool
pandas-indexer with -1's only.
"""
if indexer is None:
if new_axis is self.axes[axis] and not copy:
return self
result = self.copy(deep=copy)
result.axes = list(self.axes)
result.axes[axis] = new_axis
return result
self._consolidate_inplace()
# some axes don't allow reindexing with dups
if not allow_dups:
self.axes[axis]._can_reindex(indexer)
if axis >= self.ndim:
raise IndexError("Requested axis not found in manager")
if axis == 0:
new_blocks = self._slice_take_blocks_ax0(indexer,
fill_tuple=(fill_value,))
else:
new_blocks = [blk.take_nd(indexer, axis=axis, fill_tuple=(
fill_value if fill_value is not None else blk.fill_value,))
for blk in self.blocks]
new_axes = list(self.axes)
new_axes[axis] = new_axis
return self.__class__(new_blocks, new_axes)
def _slice_take_blocks_ax0(self, slice_or_indexer, fill_tuple=None):
"""
Slice/take blocks along axis=0.
Overloaded for SingleBlock
Returns
-------
new_blocks : list of Block
"""
allow_fill = fill_tuple is not None
sl_type, slobj, sllen = _preprocess_slice_or_indexer(
slice_or_indexer, self.shape[0], allow_fill=allow_fill)
if self._is_single_block:
blk = self.blocks[0]
if sl_type in ('slice', 'mask'):
return [blk.getitem_block(slobj, new_mgr_locs=slice(0, sllen))]
elif not allow_fill or self.ndim == 1:
if allow_fill and fill_tuple[0] is None:
_, fill_value = com._maybe_promote(blk.dtype)
fill_tuple = (fill_value, )
return [blk.take_nd(slobj, axis=0,
new_mgr_locs=slice(0, sllen),
fill_tuple=fill_tuple)]
if sl_type in ('slice', 'mask'):
blknos = self._blknos[slobj]
blklocs = self._blklocs[slobj]
else:
blknos = algos.take_1d(self._blknos, slobj, fill_value=-1,
allow_fill=allow_fill)
blklocs = algos.take_1d(self._blklocs, slobj, fill_value=-1,
allow_fill=allow_fill)
# When filling blknos, make sure blknos is updated before appending to
# blocks list, that way new blkno is exactly len(blocks).
#
# FIXME: mgr_groupby_blknos must return mgr_locs in ascending order,
# pytables serialization will break otherwise.
blocks = []
for blkno, mgr_locs in _get_blkno_placements(blknos, len(self.blocks),
group=True):
if blkno == -1:
# If we've got here, fill_tuple was not None.
fill_value = fill_tuple[0]
blocks.append(self._make_na_block(placement=mgr_locs,
fill_value=fill_value))
else:
blk = self.blocks[blkno]
# Otherwise, slicing along items axis is necessary.
if not blk._can_consolidate:
# A non-consolidatable block, it's easy, because there's
# only one item and each mgr loc is a copy of that single
# item.
for mgr_loc in mgr_locs:
newblk = blk.copy(deep=True)
newblk.mgr_locs = slice(mgr_loc, mgr_loc + 1)
blocks.append(newblk)
else:
blocks.append(blk.take_nd(blklocs[mgr_locs.indexer],
axis=0, new_mgr_locs=mgr_locs,
fill_tuple=None))
return blocks
def _make_na_block(self, placement, fill_value=None):
# TODO: infer dtypes other than float64 from fill_value
if fill_value is None:
fill_value = np.nan
block_shape = list(self.shape)
block_shape[0] = len(placement)
dtype, fill_value = com._infer_dtype_from_scalar(fill_value)
block_values = np.empty(block_shape, dtype=dtype)
block_values.fill(fill_value)
return make_block(block_values, placement=placement)
def take(self, indexer, axis=1, verify=True, convert=True):
"""
Take items along any axis.
"""
self._consolidate_inplace()
indexer = (np.arange(indexer.start, indexer.stop, indexer.step,
dtype='int64')
if isinstance(indexer, slice)
else np.asanyarray(indexer, dtype='int64'))
n = self.shape[axis]
if convert:
indexer = maybe_convert_indices(indexer, n)
if verify:
if ((indexer == -1) | (indexer >= n)).any():
raise Exception('Indices must be nonzero and less than '
'the axis length')
new_labels = self.axes[axis].take(indexer)
return self.reindex_indexer(new_axis=new_labels, indexer=indexer,
axis=axis, allow_dups=True)
def merge(self, other, lsuffix='', rsuffix=''):
if not self._is_indexed_like(other):
raise AssertionError('Must have same axes to merge managers')
l, r = items_overlap_with_suffix(left=self.items, lsuffix=lsuffix,
right=other.items, rsuffix=rsuffix)
new_items = _concat_indexes([l, r])
new_blocks = [blk.copy(deep=False) for blk in self.blocks]
offset = self.shape[0]
for blk in other.blocks:
blk = blk.copy(deep=False)
blk.mgr_locs = blk.mgr_locs.add(offset)
new_blocks.append(blk)
new_axes = list(self.axes)
new_axes[0] = new_items
return self.__class__(_consolidate(new_blocks), new_axes)
def _is_indexed_like(self, other):
"""
Check all axes except items
"""
if self.ndim != other.ndim:
raise AssertionError('Number of dimensions must agree '
'got %d and %d' % (self.ndim, other.ndim))
for ax, oax in zip(self.axes[1:], other.axes[1:]):
if not ax.equals(oax):
return False
return True
def equals(self, other):
self_axes, other_axes = self.axes, other.axes
if len(self_axes) != len(other_axes):
return False
if not all(ax1.equals(ax2) for ax1, ax2 in zip(self_axes, other_axes)):
return False
self._consolidate_inplace()
other._consolidate_inplace()
if len(self.blocks) != len(other.blocks):
return False
# canonicalize block order, using a tuple combining the type
# name and then mgr_locs because there might be unconsolidated
# blocks (say, Categorical) which can only be distinguished by
# the iteration order
def canonicalize(block):
return (block.dtype.name, block.mgr_locs.as_array.tolist())
self_blocks = sorted(self.blocks, key=canonicalize)
other_blocks = sorted(other.blocks, key=canonicalize)
return all(block.equals(oblock)
for block, oblock in zip(self_blocks, other_blocks))
class SingleBlockManager(BlockManager):
""" manage a single block with """
ndim = 1
_is_consolidated = True
_known_consolidated = True
__slots__ = ()
def __init__(self, block, axis, do_integrity_check=False, fastpath=False):
if isinstance(axis, list):
if len(axis) != 1:
raise ValueError("cannot create SingleBlockManager with more "
"than 1 axis")
axis = axis[0]
# passed from constructor, single block, single axis
if fastpath:
self.axes = [axis]
if isinstance(block, list):
# empty block
if len(block) == 0:
block = [np.array([])]
elif len(block) != 1:
raise ValueError('Cannot create SingleBlockManager with '
'more than 1 block')
block = block[0]
else:
self.axes = [_ensure_index(axis)]
# create the block here
if isinstance(block, list):
# provide consolidation to the interleaved_dtype
if len(block) > 1:
dtype = _interleaved_dtype(block)
block = [b.astype(dtype) for b in block]
block = _consolidate(block)
if len(block) != 1:
raise ValueError('Cannot create SingleBlockManager with '
'more than 1 block')
block = block[0]
if not isinstance(block, Block):
block = make_block(block, placement=slice(0, len(axis)), ndim=1,
fastpath=True)
self.blocks = [block]
def _post_setstate(self):
pass
@property
def _block(self):
return self.blocks[0]
@property
def _values(self):
return self._block.values
def reindex(self, new_axis, indexer=None, method=None, fill_value=None,
limit=None, copy=True):
# if we are the same and don't copy, just return
if self.index.equals(new_axis):
if copy:
return self.copy(deep=True)
else:
return self
values = self._block.get_values()
if indexer is None:
indexer = self.items.get_indexer_for(new_axis)
if fill_value is None:
fill_value = np.nan
new_values = algos.take_1d(values, indexer, fill_value=fill_value)
# fill if needed
if method is not None or limit is not None:
new_values = missing.interpolate_2d(new_values,
method=method,
limit=limit,
fill_value=fill_value)
if self._block.is_sparse:
make_block = self._block.make_block_same_class
block = make_block(new_values, copy=copy,
placement=slice(0, len(new_axis)))
mgr = SingleBlockManager(block, new_axis)
mgr._consolidate_inplace()
return mgr
def get_slice(self, slobj, axis=0):
if axis >= self.ndim:
raise IndexError("Requested axis not found in manager")
return self.__class__(self._block._slice(slobj),
self.index[slobj], fastpath=True)
@property
def index(self):
return self.axes[0]
def convert(self, **kwargs):
""" convert the whole block as one """
kwargs['by_item'] = False
return self.apply('convert', **kwargs)
@property
def dtype(self):
return self._block.dtype
@property
def array_dtype(self):
return self._block.array_dtype
@property
def ftype(self):
return self._block.ftype
def get_dtype_counts(self):
return {self.dtype.name: 1}
def get_ftype_counts(self):
return {self.ftype: 1}
def get_dtypes(self):
return np.array([self._block.dtype])
def get_ftypes(self):
return np.array([self._block.ftype])
def external_values(self):
return self._block.external_values()
def internal_values(self):
return self._block.internal_values()
def get_values(self):
""" return a dense type view """
return np.array(self._block.to_dense(), copy=False)
@property
def asobject(self):
"""
return a object dtype array. datetime/timedelta like values are boxed
to Timestamp/Timedelta instances.
"""
return self._block.get_values(dtype=object)
@property
def itemsize(self):
return self._block.values.itemsize
@property
def _can_hold_na(self):
return self._block._can_hold_na
def is_consolidated(self):
return True
def _consolidate_check(self):
pass
def _consolidate_inplace(self):
pass
def delete(self, item):
"""
Delete single item from SingleBlockManager.
Ensures that self.blocks doesn't become empty.
"""
loc = self.items.get_loc(item)
self._block.delete(loc)
self.axes[0] = self.axes[0].delete(loc)
def fast_xs(self, loc):
"""
fast path for getting a cross-section
return a view of the data
"""
return self._block.values[loc]
def construction_error(tot_items, block_shape, axes, e=None):
""" raise a helpful message about our construction """
passed = tuple(map(int, [tot_items] + list(block_shape)))
implied = tuple(map(int, [len(ax) for ax in axes]))
if passed == implied and e is not None:
raise e
if block_shape[0] == 0:
raise ValueError("Empty data passed with indices specified.")
raise ValueError("Shape of passed values is {0}, indices imply {1}".format(
passed, implied))
def create_block_manager_from_blocks(blocks, axes):
try:
if len(blocks) == 1 and not isinstance(blocks[0], Block):
# if blocks[0] is of length 0, return empty blocks
if not len(blocks[0]):
blocks = []
else:
# It's OK if a single block is passed as values, its placement
# is basically "all items", but if there're many, don't bother
# converting, it's an error anyway.
blocks = [make_block(values=blocks[0],
placement=slice(0, len(axes[0])))]
mgr = BlockManager(blocks, axes)
mgr._consolidate_inplace()
return mgr
except (ValueError) as e:
blocks = [getattr(b, 'values', b) for b in blocks]
tot_items = sum(b.shape[0] for b in blocks)
construction_error(tot_items, blocks[0].shape[1:], axes, e)
def create_block_manager_from_arrays(arrays, names, axes):
try:
blocks = form_blocks(arrays, names, axes)
mgr = BlockManager(blocks, axes)
mgr._consolidate_inplace()
return mgr
except ValueError as e:
construction_error(len(arrays), arrays[0].shape, axes, e)
def form_blocks(arrays, names, axes):
# put "leftover" items in float bucket, where else?
# generalize?
float_items = []
complex_items = []
int_items = []
bool_items = []
object_items = []
sparse_items = []
datetime_items = []
datetime_tz_items = []
cat_items = []
extra_locs = []
names_idx = Index(names)
if names_idx.equals(axes[0]):
names_indexer = np.arange(len(names_idx))
else:
assert names_idx.intersection(axes[0]).is_unique
names_indexer = names_idx.get_indexer_for(axes[0])
for i, name_idx in enumerate(names_indexer):
if name_idx == -1:
extra_locs.append(i)
continue
k = names[name_idx]
v = arrays[name_idx]
if is_sparse(v):
sparse_items.append((i, k, v))
elif issubclass(v.dtype.type, np.floating):
float_items.append((i, k, v))
elif issubclass(v.dtype.type, np.complexfloating):
complex_items.append((i, k, v))
elif issubclass(v.dtype.type, np.datetime64):
if v.dtype != _NS_DTYPE:
v = tslib.cast_to_nanoseconds(v)
if is_datetimetz(v):
datetime_tz_items.append((i, k, v))
else:
datetime_items.append((i, k, v))
elif is_datetimetz(v):
datetime_tz_items.append((i, k, v))
elif issubclass(v.dtype.type, np.integer):
if v.dtype == np.uint64:
# HACK #2355 definite overflow
if (v > 2**63 - 1).any():
object_items.append((i, k, v))
continue
int_items.append((i, k, v))
elif v.dtype == np.bool_:
bool_items.append((i, k, v))
elif is_categorical(v):
cat_items.append((i, k, v))
else:
object_items.append((i, k, v))
blocks = []
if len(float_items):
float_blocks = _multi_blockify(float_items)
blocks.extend(float_blocks)
if len(complex_items):
complex_blocks = _multi_blockify(complex_items)
blocks.extend(complex_blocks)
if len(int_items):
int_blocks = _multi_blockify(int_items)
blocks.extend(int_blocks)
if len(datetime_items):
datetime_blocks = _simple_blockify(datetime_items, _NS_DTYPE)
blocks.extend(datetime_blocks)
if len(datetime_tz_items):
dttz_blocks = [make_block(array,
klass=DatetimeTZBlock,
fastpath=True,
placement=[i], )
for i, _, array in datetime_tz_items]
blocks.extend(dttz_blocks)
if len(bool_items):
bool_blocks = _simple_blockify(bool_items, np.bool_)
blocks.extend(bool_blocks)
if len(object_items) > 0:
object_blocks = _simple_blockify(object_items, np.object_)
blocks.extend(object_blocks)
if len(sparse_items) > 0:
sparse_blocks = _sparse_blockify(sparse_items)
blocks.extend(sparse_blocks)
if len(cat_items) > 0:
cat_blocks = [make_block(array, klass=CategoricalBlock, fastpath=True,
placement=[i])
for i, _, array in cat_items]
blocks.extend(cat_blocks)
if len(extra_locs):
shape = (len(extra_locs),) + tuple(len(x) for x in axes[1:])
# empty items -> dtype object
block_values = np.empty(shape, dtype=object)
block_values.fill(np.nan)
na_block = make_block(block_values, placement=extra_locs)
blocks.append(na_block)
return blocks
def _simple_blockify(tuples, dtype):
""" return a single array of a block that has a single dtype; if dtype is
not None, coerce to this dtype
"""
values, placement = _stack_arrays(tuples, dtype)
# CHECK DTYPE?
if dtype is not None and values.dtype != dtype: # pragma: no cover
values = values.astype(dtype)
block = make_block(values, placement=placement)
return [block]
def _multi_blockify(tuples, dtype=None):
""" return an array of blocks that potentially have different dtypes """
# group by dtype
grouper = itertools.groupby(tuples, lambda x: x[2].dtype)
new_blocks = []
for dtype, tup_block in grouper:
values, placement = _stack_arrays(list(tup_block), dtype)
block = make_block(values, placement=placement)
new_blocks.append(block)
return new_blocks
def _sparse_blockify(tuples, dtype=None):
""" return an array of blocks that potentially have different dtypes (and
are sparse)
"""
new_blocks = []
for i, names, array in tuples:
array = _maybe_to_sparse(array)
block = make_block(array, klass=SparseBlock, fastpath=True,
placement=[i])
new_blocks.append(block)
return new_blocks
def _stack_arrays(tuples, dtype):
# fml
def _asarray_compat(x):
if isinstance(x, ABCSeries):
return x._values
else:
return np.asarray(x)
def _shape_compat(x):
if isinstance(x, ABCSeries):
return len(x),
else:
return x.shape
placement, names, arrays = zip(*tuples)
first = arrays[0]
shape = (len(arrays),) + _shape_compat(first)
stacked = np.empty(shape, dtype=dtype)
for i, arr in enumerate(arrays):
stacked[i] = _asarray_compat(arr)
return stacked, placement
def _interleaved_dtype(blocks):
if not len(blocks):
return None
counts = defaultdict(list)
for x in blocks:
counts[type(x)].append(x)
def _lcd_dtype(l):
""" find the lowest dtype that can accomodate the given types """
m = l[0].dtype
for x in l[1:]:
if x.dtype.itemsize > m.itemsize:
m = x.dtype
return m
have_int = len(counts[IntBlock]) > 0
have_bool = len(counts[BoolBlock]) > 0
have_object = len(counts[ObjectBlock]) > 0
have_float = len(counts[FloatBlock]) > 0
have_complex = len(counts[ComplexBlock]) > 0
have_dt64 = len(counts[DatetimeBlock]) > 0
have_dt64_tz = len(counts[DatetimeTZBlock]) > 0
have_td64 = len(counts[TimeDeltaBlock]) > 0
have_cat = len(counts[CategoricalBlock]) > 0
# TODO: have_sparse is not used
have_sparse = len(counts[SparseBlock]) > 0 # noqa
have_numeric = have_float or have_complex or have_int
has_non_numeric = have_dt64 or have_dt64_tz or have_td64 or have_cat
if (have_object or
(have_bool and
(have_numeric or have_dt64 or have_dt64_tz or have_td64)) or
(have_numeric and has_non_numeric) or have_cat or have_dt64 or
have_dt64_tz or have_td64):
return np.dtype(object)
elif have_bool:
return np.dtype(bool)
elif have_int and not have_float and not have_complex:
# if we are mixing unsigned and signed, then return
# the next biggest int type (if we can)
lcd = _lcd_dtype(counts[IntBlock])
kinds = set([i.dtype.kind for i in counts[IntBlock]])
if len(kinds) == 1:
return lcd
if lcd == 'uint64' or lcd == 'int64':
return np.dtype('int64')
# return 1 bigger on the itemsize if unsinged
if lcd.kind == 'u':
return np.dtype('int%s' % (lcd.itemsize * 8 * 2))
return lcd
elif have_complex:
return np.dtype('c16')
else:
return _lcd_dtype(counts[FloatBlock] + counts[SparseBlock])
def _consolidate(blocks):
"""
Merge blocks having same dtype, exclude non-consolidating blocks
"""
# sort by _can_consolidate, dtype
gkey = lambda x: x._consolidate_key
grouper = itertools.groupby(sorted(blocks, key=gkey), gkey)
new_blocks = []
for (_can_consolidate, dtype), group_blocks in grouper:
merged_blocks = _merge_blocks(list(group_blocks), dtype=dtype,
_can_consolidate=_can_consolidate)
new_blocks = _extend_blocks(merged_blocks, new_blocks)
return new_blocks
def _merge_blocks(blocks, dtype=None, _can_consolidate=True):
if len(blocks) == 1:
return blocks[0]
if _can_consolidate:
if dtype is None:
if len(set([b.dtype for b in blocks])) != 1:
raise AssertionError("_merge_blocks are invalid!")
dtype = blocks[0].dtype
# FIXME: optimization potential in case all mgrs contain slices and
# combination of those slices is a slice, too.
new_mgr_locs = np.concatenate([b.mgr_locs.as_array for b in blocks])
new_values = _vstack([b.values for b in blocks], dtype)
argsort = np.argsort(new_mgr_locs)
new_values = new_values[argsort]
new_mgr_locs = new_mgr_locs[argsort]
return make_block(new_values, fastpath=True, placement=new_mgr_locs)
# no merge
return blocks
def _extend_blocks(result, blocks=None):
""" return a new extended blocks, givin the result """
if blocks is None:
blocks = []
if isinstance(result, list):
for r in result:
if isinstance(r, list):
blocks.extend(r)
else:
blocks.append(r)
elif isinstance(result, BlockManager):
blocks.extend(result.blocks)
else:
blocks.append(result)
return blocks
def _block_shape(values, ndim=1, shape=None):
""" guarantee the shape of the values to be at least 1 d """
if values.ndim <= ndim:
if shape is None:
shape = values.shape
values = values.reshape(tuple((1, ) + shape))
return values
def _vstack(to_stack, dtype):
# work around NumPy 1.6 bug
if dtype == _NS_DTYPE or dtype == _TD_DTYPE:
new_values = np.vstack([x.view('i8') for x in to_stack])
return new_values.view(dtype)
else:
return np.vstack(to_stack)
def _possibly_compare(a, b, op):
is_a_array = isinstance(a, np.ndarray)
is_b_array = isinstance(b, np.ndarray)
# numpy deprecation warning to have i8 vs integer comparisions
if is_datetimelike_v_numeric(a, b):
result = False
# numpy deprecation warning if comparing numeric vs string-like
elif is_numeric_v_string_like(a, b):
result = False
else:
result = op(a, b)
if lib.isscalar(result) and (is_a_array or is_b_array):
type_names = [type(a).__name__, type(b).__name__]
if is_a_array:
type_names[0] = 'ndarray(dtype=%s)' % a.dtype
if is_b_array:
type_names[1] = 'ndarray(dtype=%s)' % b.dtype
raise TypeError("Cannot compare types %r and %r" % tuple(type_names))
return result
def _concat_indexes(indexes):
return indexes[0].append(indexes[1:])
def _block2d_to_blocknd(values, placement, shape, labels, ref_items):
""" pivot to the labels shape """
from pandas.core.internals import make_block
panel_shape = (len(placement),) + shape
# TODO: lexsort depth needs to be 2!!
# Create observation selection vector using major and minor
# labels, for converting to panel format.
selector = _factor_indexer(shape[1:], labels)
mask = np.zeros(np.prod(shape), dtype=bool)
mask.put(selector, True)
if mask.all():
pvalues = np.empty(panel_shape, dtype=values.dtype)
else:
dtype, fill_value = _maybe_promote(values.dtype)
pvalues = np.empty(panel_shape, dtype=dtype)
pvalues.fill(fill_value)
values = values
for i in range(len(placement)):
pvalues[i].flat[mask] = values[:, i]
return make_block(pvalues, placement=placement)
def _factor_indexer(shape, labels):
"""
given a tuple of shape and a list of Categorical labels, return the
expanded label indexer
"""
mult = np.array(shape)[::-1].cumprod()[::-1]
return com._ensure_platform_int(
np.sum(np.array(labels).T * np.append(mult, [1]), axis=1).T)
def _get_blkno_placements(blknos, blk_count, group=True):
"""
Parameters
----------
blknos : array of int64
blk_count : int
group : bool
Returns
-------
iterator
yield (BlockPlacement, blkno)
"""
blknos = com._ensure_int64(blknos)
# FIXME: blk_count is unused, but it may avoid the use of dicts in cython
for blkno, indexer in lib.get_blkno_indexers(blknos, group):
yield blkno, BlockPlacement(indexer)
def items_overlap_with_suffix(left, lsuffix, right, rsuffix):
"""
If two indices overlap, add suffixes to overlapping entries.
If corresponding suffix is empty, the entry is simply converted to string.
"""
to_rename = left.intersection(right)
if len(to_rename) == 0:
return left, right
else:
if not lsuffix and not rsuffix:
raise ValueError('columns overlap but no suffix specified: %s' %
to_rename)
def lrenamer(x):
if x in to_rename:
return '%s%s' % (x, lsuffix)
return x
def rrenamer(x):
if x in to_rename:
return '%s%s' % (x, rsuffix)
return x
return (_transform_index(left, lrenamer),
_transform_index(right, rrenamer))
def _transform_index(index, func):
"""
Apply function to all values found in index.
This includes transforming multiindex entries separately.
"""
if isinstance(index, MultiIndex):
items = [tuple(func(y) for y in x) for x in index]
return MultiIndex.from_tuples(items, names=index.names)
else:
items = [func(x) for x in index]
return Index(items, name=index.name)
def _putmask_smart(v, m, n):
"""
Return a new block, try to preserve dtype if possible.
Parameters
----------
v : `values`, updated in-place (array like)
m : `mask`, applies to both sides (array like)
n : `new values` either scalar or an array like aligned with `values`
"""
# n should be the length of the mask or a scalar here
if not is_list_like(n):
n = np.array([n] * len(m))
elif isinstance(n, np.ndarray) and n.ndim == 0: # numpy scalar
n = np.repeat(np.array(n, ndmin=1), len(m))
# see if we are only masking values that if putted
# will work in the current dtype
try:
nn = n[m]
# make sure that we have a nullable type
# if we have nulls
if not _is_na_compat(v, nn[0]):
raise ValueError
nn_at = nn.astype(v.dtype)
# avoid invalid dtype comparisons
if not is_numeric_v_string_like(nn, nn_at):
comp = (nn == nn_at)
if is_list_like(comp) and comp.all():
nv = v.copy()
nv[m] = nn_at
return nv
except (ValueError, IndexError, TypeError):
pass
# change the dtype
dtype, _ = com._maybe_promote(n.dtype)
nv = v.astype(dtype)
try:
nv[m] = n[m]
except ValueError:
idx, = np.where(np.squeeze(m))
for mask_index, new_val in zip(idx, n[m]):
nv[mask_index] = new_val
return nv
def concatenate_block_managers(mgrs_indexers, axes, concat_axis, copy):
"""
Concatenate block managers into one.
Parameters
----------
mgrs_indexers : list of (BlockManager, {axis: indexer,...}) tuples
axes : list of Index
concat_axis : int
copy : bool
"""
concat_plan = combine_concat_plans(
[get_mgr_concatenation_plan(mgr, indexers)
for mgr, indexers in mgrs_indexers], concat_axis)
blocks = [make_block(concatenate_join_units(join_units, concat_axis,
copy=copy),
placement=placement)
for placement, join_units in concat_plan]
return BlockManager(blocks, axes)
def get_empty_dtype_and_na(join_units):
"""
Return dtype and N/A values to use when concatenating specified units.
Returned N/A value may be None which means there was no casting involved.
Returns
-------
dtype
na
"""
if len(join_units) == 1:
blk = join_units[0].block
if blk is None:
return np.float64, np.nan
has_none_blocks = False
dtypes = [None] * len(join_units)
for i, unit in enumerate(join_units):
if unit.block is None:
has_none_blocks = True
else:
dtypes[i] = unit.dtype
upcast_classes = defaultdict(list)
null_upcast_classes = defaultdict(list)
for dtype, unit in zip(dtypes, join_units):
if dtype is None:
continue
if com.is_categorical_dtype(dtype):
upcast_cls = 'category'
elif com.is_datetimetz(dtype):
upcast_cls = 'datetimetz'
elif issubclass(dtype.type, np.bool_):
upcast_cls = 'bool'
elif issubclass(dtype.type, np.object_):
upcast_cls = 'object'
elif is_datetime64_dtype(dtype):
upcast_cls = 'datetime'
elif is_timedelta64_dtype(dtype):
upcast_cls = 'timedelta'
else:
upcast_cls = 'float'
# Null blocks should not influence upcast class selection, unless there
# are only null blocks, when same upcasting rules must be applied to
# null upcast classes.
if unit.is_null:
null_upcast_classes[upcast_cls].append(dtype)
else:
upcast_classes[upcast_cls].append(dtype)
if not upcast_classes:
upcast_classes = null_upcast_classes
# create the result
if 'object' in upcast_classes:
return np.dtype(np.object_), np.nan
elif 'bool' in upcast_classes:
if has_none_blocks:
return np.dtype(np.object_), np.nan
else:
return np.dtype(np.bool_), None
elif 'category' in upcast_classes:
return np.dtype(np.object_), np.nan
elif 'float' in upcast_classes:
return np.dtype(np.float64), np.nan
elif 'datetimetz' in upcast_classes:
dtype = upcast_classes['datetimetz']
return dtype[0], tslib.iNaT
elif 'datetime' in upcast_classes:
return np.dtype('M8[ns]'), tslib.iNaT
elif 'timedelta' in upcast_classes:
return np.dtype('m8[ns]'), tslib.iNaT
else: # pragma
raise AssertionError("invalid dtype determination in get_concat_dtype")
def concatenate_join_units(join_units, concat_axis, copy):
"""
Concatenate values from several join units along selected axis.
"""
if concat_axis == 0 and len(join_units) > 1:
# Concatenating join units along ax0 is handled in _merge_blocks.
raise AssertionError("Concatenating join units along axis0")
empty_dtype, upcasted_na = get_empty_dtype_and_na(join_units)
to_concat = [ju.get_reindexed_values(empty_dtype=empty_dtype,
upcasted_na=upcasted_na)
for ju in join_units]
if len(to_concat) == 1:
# Only one block, nothing to concatenate.
concat_values = to_concat[0]
if copy and concat_values.base is not None:
concat_values = concat_values.copy()
else:
concat_values = _concat._concat_compat(to_concat, axis=concat_axis)
return concat_values
def get_mgr_concatenation_plan(mgr, indexers):
"""
Construct concatenation plan for given block manager and indexers.
Parameters
----------
mgr : BlockManager
indexers : dict of {axis: indexer}
Returns
-------
plan : list of (BlockPlacement, JoinUnit) tuples
"""
# Calculate post-reindex shape , save for item axis which will be separate
# for each block anyway.
mgr_shape = list(mgr.shape)
for ax, indexer in indexers.items():
mgr_shape[ax] = len(indexer)
mgr_shape = tuple(mgr_shape)
if 0 in indexers:
ax0_indexer = indexers.pop(0)
blknos = algos.take_1d(mgr._blknos, ax0_indexer, fill_value=-1)
blklocs = algos.take_1d(mgr._blklocs, ax0_indexer, fill_value=-1)
else:
if mgr._is_single_block:
blk = mgr.blocks[0]
return [(blk.mgr_locs, JoinUnit(blk, mgr_shape, indexers))]
ax0_indexer = None
blknos = mgr._blknos
blklocs = mgr._blklocs
plan = []
for blkno, placements in _get_blkno_placements(blknos, len(mgr.blocks),
group=False):
assert placements.is_slice_like
join_unit_indexers = indexers.copy()
shape = list(mgr_shape)
shape[0] = len(placements)
shape = tuple(shape)
if blkno == -1:
unit = JoinUnit(None, shape)
else:
blk = mgr.blocks[blkno]
ax0_blk_indexer = blklocs[placements.indexer]
unit_no_ax0_reindexing = (len(placements) == len(blk.mgr_locs) and
# Fastpath detection of join unit not
# needing to reindex its block: no ax0
# reindexing took place and block
# placement was sequential before.
((ax0_indexer is None and
blk.mgr_locs.is_slice_like and
blk.mgr_locs.as_slice.step == 1) or
# Slow-ish detection: all indexer locs
# are sequential (and length match is
# checked above).
(np.diff(ax0_blk_indexer) == 1).all()))
# Omit indexer if no item reindexing is required.
if unit_no_ax0_reindexing:
join_unit_indexers.pop(0, None)
else:
join_unit_indexers[0] = ax0_blk_indexer
unit = JoinUnit(blk, shape, join_unit_indexers)
plan.append((placements, unit))
return plan
def combine_concat_plans(plans, concat_axis):
"""
Combine multiple concatenation plans into one.
existing_plan is updated in-place.
"""
if len(plans) == 1:
for p in plans[0]:
yield p[0], [p[1]]
elif concat_axis == 0:
offset = 0
for plan in plans:
last_plc = None
for plc, unit in plan:
yield plc.add(offset), [unit]
last_plc = plc
if last_plc is not None:
offset += last_plc.as_slice.stop
else:
num_ended = [0]
def _next_or_none(seq):
retval = next(seq, None)
if retval is None:
num_ended[0] += 1
return retval
plans = list(map(iter, plans))
next_items = list(map(_next_or_none, plans))
while num_ended[0] != len(next_items):
if num_ended[0] > 0:
raise ValueError("Plan shapes are not aligned")
placements, units = zip(*next_items)
lengths = list(map(len, placements))
min_len, max_len = min(lengths), max(lengths)
if min_len == max_len:
yield placements[0], units
next_items[:] = map(_next_or_none, plans)
else:
yielded_placement = None
yielded_units = [None] * len(next_items)
for i, (plc, unit) in enumerate(next_items):
yielded_units[i] = unit
if len(plc) > min_len:
# trim_join_unit updates unit in place, so only
# placement needs to be sliced to skip min_len.
next_items[i] = (plc[min_len:],
trim_join_unit(unit, min_len))
else:
yielded_placement = plc
next_items[i] = _next_or_none(plans[i])
yield yielded_placement, yielded_units
def trim_join_unit(join_unit, length):
"""
Reduce join_unit's shape along item axis to length.
Extra items that didn't fit are returned as a separate block.
"""
if 0 not in join_unit.indexers:
extra_indexers = join_unit.indexers
if join_unit.block is None:
extra_block = None
else:
extra_block = join_unit.block.getitem_block(slice(length, None))
join_unit.block = join_unit.block.getitem_block(slice(length))
else:
extra_block = join_unit.block
extra_indexers = copy.copy(join_unit.indexers)
extra_indexers[0] = extra_indexers[0][length:]
join_unit.indexers[0] = join_unit.indexers[0][:length]
extra_shape = (join_unit.shape[0] - length,) + join_unit.shape[1:]
join_unit.shape = (length,) + join_unit.shape[1:]
return JoinUnit(block=extra_block, indexers=extra_indexers,
shape=extra_shape)
class JoinUnit(object):
def __init__(self, block, shape, indexers=None):
# Passing shape explicitly is required for cases when block is None.
if indexers is None:
indexers = {}
self.block = block
self.indexers = indexers
self.shape = shape
def __repr__(self):
return '%s(%r, %s)' % (self.__class__.__name__, self.block,
self.indexers)
@cache_readonly
def needs_filling(self):
for indexer in self.indexers.values():
# FIXME: cache results of indexer == -1 checks.
if (indexer == -1).any():
return True
return False
@cache_readonly
def dtype(self):
if self.block is None:
raise AssertionError("Block is None, no dtype")
if not self.needs_filling:
return self.block.dtype
else:
return com._get_dtype(com._maybe_promote(self.block.dtype,
self.block.fill_value)[0])
return self._dtype
@cache_readonly
def is_null(self):
if self.block is None:
return True
if not self.block._can_hold_na:
return False
# Usually it's enough to check but a small fraction of values to see if
# a block is NOT null, chunks should help in such cases. 1000 value
# was chosen rather arbitrarily.
values = self.block.values
if self.block.is_categorical:
values_flat = values.categories
elif self.block.is_sparse:
# fill_value is not NaN and have holes
if not values._null_fill_value and values.sp_index.ngaps > 0:
return False
values_flat = values.ravel(order='K')
else:
values_flat = values.ravel(order='K')
total_len = values_flat.shape[0]
chunk_len = max(total_len // 40, 1000)
for i in range(0, total_len, chunk_len):
if not isnull(values_flat[i:i + chunk_len]).all():
return False
return True
def get_reindexed_values(self, empty_dtype, upcasted_na):
if upcasted_na is None:
# No upcasting is necessary
fill_value = self.block.fill_value
values = self.block.get_values()
else:
fill_value = upcasted_na
if self.is_null:
if getattr(self.block, 'is_object', False):
# we want to avoid filling with np.nan if we are
# using None; we already know that we are all
# nulls
values = self.block.values.ravel(order='K')
if len(values) and values[0] is None:
fill_value = None
if getattr(self.block, 'is_datetimetz', False):
pass
elif getattr(self.block, 'is_categorical', False):
pass
elif getattr(self.block, 'is_sparse', False):
pass
else:
missing_arr = np.empty(self.shape, dtype=empty_dtype)
missing_arr.fill(fill_value)
return missing_arr
if not self.indexers:
if not self.block._can_consolidate:
# preserve these for validation in _concat_compat
return self.block.values
if self.block.is_bool:
# External code requested filling/upcasting, bool values must
# be upcasted to object to avoid being upcasted to numeric.
values = self.block.astype(np.object_).values
else:
# No dtype upcasting is done here, it will be performed during
# concatenation itself.
values = self.block.get_values()
if not self.indexers:
# If there's no indexing to be done, we want to signal outside
# code that this array must be copied explicitly. This is done
# by returning a view and checking `retval.base`.
values = values.view()
else:
for ax, indexer in self.indexers.items():
values = algos.take_nd(values, indexer, axis=ax,
fill_value=fill_value)
return values
def _fast_count_smallints(arr):
"""Faster version of set(arr) for sequences of small numbers."""
if len(arr) == 0:
# Handle empty arr case separately: numpy 1.6 chokes on that.
return np.empty((0, 2), dtype=arr.dtype)
else:
counts = np.bincount(arr.astype(np.int_))
nz = counts.nonzero()[0]
return np.c_[nz, counts[nz]]
def _preprocess_slice_or_indexer(slice_or_indexer, length, allow_fill):
if isinstance(slice_or_indexer, slice):
return 'slice', slice_or_indexer, lib.slice_len(slice_or_indexer,
length)
elif (isinstance(slice_or_indexer, np.ndarray) and
slice_or_indexer.dtype == np.bool_):
return 'mask', slice_or_indexer, slice_or_indexer.sum()
else:
indexer = np.asanyarray(slice_or_indexer, dtype=np.int64)
if not allow_fill:
indexer = maybe_convert_indices(indexer, length)
return 'fancy', indexer, len(indexer)
| mit |
EhudTsivion/QCkit | thermalDesorption/mdSim.py | 1 | 8639 | import datetime
import random
import logging as log
from matplotlib import pyplot as plt
import numpy as np
from QCkit.atom import Atom
from QCkit.molecule import Molecule
from QCkit import physical_constants
from QCkit.thermalDesorption.mdjob import MDjob
from QCkit.thermalDesorption.mdScratchParser import MdScratchParser
class MD:
"""
A class for simulation of temperature programmed desorption experiments
using molecular dynamics
"""
def __init__(self,
molecule, # the molecule object, contains all the information about the molecule
basis, # basis set to use
exchange, # exchange to use (such as B3LYP etc.)
temperature, # target temperature simulation
aimd_num_steps, # number of MD steps
thermostat_timescale, # friction of system with the thermostat head-bath
# time step of the MD simulation, in atomic units = 0.0242 fs, default is 0.0242 fs
aimd_step_duration=10,
thermostat="langevin", # type of thermostat
threads=None, # number of openmp threads to use. If none then is OMP_NUM_THREADS
job_name=None,
velocities=None,
restart=False,
extra_rems=None): # name of the job. appears in all related output files.
self.temperature = temperature
self.molecule = molecule
self.time_step = aimd_step_duration
self.aimd_steps = aimd_num_steps
self.basis = basis
self.threads = threads
self.exchange = exchange
self.thermostat = thermostat
self.thermostat_timescale = thermostat_timescale
self.job_name = job_name
self.velocities = velocities
self.extra_rems = extra_rems
if not job_name:
self.job_name = "mdrun-QChem" + datetime.datetime.now().strftime('-%Y-%m-%d-') \
+ str(random.randint(1000000, 9999999))
else:
if not restart and job_name != 'test':
# always append a random number, because several
# jobs with same name can run in parallel
self.job_name = '{}-{}'.format(job_name, str(random.randint(1000000, 9999999)))
else:
# all output is appended
self.job_name = job_name
if not type(self.time_step) == int:
print('non integer simulation time step detected, attempt to round')
self.time_step = round(self.time_step)
log.basicConfig(filename="{}.log".format(self.job_name),
filemode='a',
level='INFO',
format='')
if restart:
log.info("\n\n{:*^30}".format("RESTART"))
self.run(restart)
log.info("******** Simulation ended")
def run(self, restart=False):
print('called run')
rems = {"aimd_thermostat": self.thermostat,
"aimd_time_step": self.time_step,
"aimd_temp": self.temperature,
"aimd_steps": self.aimd_steps,
"aimd_init_veloc": "thermal",
"aimd_print": "1",
"max_scf_cycles": "200",
"aimd_langevin_timescale": self.thermostat_timescale}
# if extra_rems are specified
# add them to these rems
if self.extra_rems:
rems.update(self.extra_rems)
# I think this MDjob thing is
# probably unnecessary
job = MDjob(molecule=self.molecule,
job_type='aimd',
basis=self.basis,
threads=self.threads,
exchange=self.exchange,
molden_format='False',
job_name=self.job_name,
rems=rems)
log.info('\n\n{:*^30}'.format('new MD simulation'))
log.info('\ncomputational details:')
log.info('basis: {}, exchange: {}'.format(self.basis, self.exchange))
log.info('Target temperature {}'.format(self.temperature))
log.info('using {} thermostat'.format(job.rems["aimd_thermostat"]))
log.info('thermostat frequency: '.format(job.rems['aimd_langevin_timescale']))
if restart:
job.set_velocities(self.velocities)
job.rm_rem('aimd_init_veloc')
first_run = False
else:
first_run = True
job.run()
if not job.failed:
log.info('Q-Chem finished MD run successfully')
else:
log.info('Q-Chem job failed')
# create a new scratch parser to parse the scratch
# from $QCSCRATCH/AIMD
scr_parser = MdScratchParser(self.job_name)
# update position for next AIMD run
job.molecule.positions = scr_parser.get_positions() * physical_constants.angstrom_to_bohr
self.scratch_generator(job.temperatures_list,
job.trajectory,
scr_parser.get_velocities(),
self.temperature)
# plot interesting quantities
tandv = scr_parser.get_temp_and_potential
plt.title('job: {}'.format(self.job_name))
fig, (ax0, ax1) = plt.subplots(nrows=2, sharex=True)
ax0.plot(tandv['time'], tandv['temperature'])
ax1.plot(tandv['time'], tandv['potentialE'])
ax0.set_title('temperature')
ax1.set_title('potential energy')
#
# plt.plot(x_axis, job.potential_energy_list)
#
# plt.savefig(self.job_name + 'temperature.png')
#
# plt.
#
plt.savefig(self.job_name + '_temperature.png')
def scratch_generator(self, inst_temperature, trj_data, velocities, temperature):
# append TRJ data to file
with open(self.job_name + ".trj", 'a') as f:
# counts the steps of the simulation
step_counter = 0
# counts the lines of xyz data which includes:
# first line is number of atoms
# second line comment
# rest of the lines xyz of atoms
# total of Natoms + 2 lines ( +1 if you count from 0)
lines_counter = 0
for line in trj_data.splitlines():
if step_counter >= 1:
if lines_counter == 0:
lines_counter += 1
f.write(line + '\n')
elif lines_counter == 1:
lines_counter += 1
f.write('T {} K, time {:0.2f} fs\n'.format(inst_temperature[step_counter],
step_counter *
self.time_step /
physical_constants.atomic_unit_of_time_to_femtosec))
elif lines_counter % (self.molecule.atom_count + 1) == 0:
lines_counter = 0
step_counter += 1
f.write(line + '\n')
else:
lines_counter += 1
f.write(line + '\n')
# the first xyz data is just a starting
# single-point calculation which is of no interest
# and is therefore discarded
elif step_counter == 0 and lines_counter < self.molecule.atom_count + 1:
lines_counter += 1
elif step_counter == 0 and lines_counter == self.molecule.atom_count + 1:
lines_counter = 0
step_counter += 1
# generate file with temperatures
with open(self.job_name + ".tempra", 'a') as f:
f.write('Target Temperature {} K\n'.format(self.temperature))
f.write(inst_temperature)
# since we're interested only in the last couple of lines
# the data is now parsed into lines
velocities = velocities.splitlines()
trj_data = trj_data.splitlines()
with open(self.job_name + ".last_pos_vel", 'a') as f:
f.write('Target Temperature {} K\n'.format(self.temperature))
f.write('Ended Q-Chem aimd run with the following positions and velocities:\n')
for i in reversed(range(1, self.molecule.atom_count + 1)):
f.write('{} {}\n'.format(trj_data[-1 * i], velocities[-1 * i]))
if __name__ == "__main__":
pass
| lgpl-3.0 |
maziarraissi/ParametricGP-in-Python | PGPs_tensorflow/PGP/parametric_GP.py | 1 | 7144 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: Maziar Raissi
"""
import numpy as np
import tensorflow as tf
from sklearn.cluster import KMeans
from Utilities import kernel, kernel_tf, fetch_minibatch
import timeit
class PGP:
def __init__(self, X, y, M=10, max_iter = 2000, N_batch = 1,
monitor_likelihood = 10, lrate = 1e-3):
(N,D) = X.shape
# kmeans on a subset of data
N_subset = min(N, 10000)
idx = np.random.choice(N, N_subset, replace=False)
kmeans = KMeans(n_clusters=M, random_state=0).fit(X[idx,:])
Z = kmeans.cluster_centers_
hyp = np.log(np.ones(D+1))
logsigma_n = np.array([-4.0])
hyp = np.concatenate([hyp, logsigma_n])
m = np.zeros((M,1))
S = kernel(Z,Z,hyp[:-1])
self.X = X
self.y = y
self.M = M
self.Z = tf.Variable(Z,dtype=tf.float64,trainable=False)
self.K_u_inv = tf.Variable(np.eye(M),dtype=tf.float64,trainable=False)
self.m = tf.Variable(m,dtype=tf.float64,trainable=False)
self.S = tf.Variable(S,dtype=tf.float64,trainable=False)
self.nlml = tf.Variable(0.0, dtype=tf.float64, trainable=False)
self.hyp = hyp
self.max_iter = max_iter
self.N_batch = N_batch
self.monitor_likelihood = monitor_likelihood
self.jitter = 1e-8
self.jitter_cov = 1e-8
self.lrate = lrate
self.optimizer = tf.train.AdamOptimizer(self.lrate)
# Tensor Flow Session
# self.sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
self.sess = tf.Session()
def train(self):
print("Total number of parameters: %d" % (self.hyp.shape[0]))
X_tf = tf.placeholder(tf.float64)
y_tf = tf.placeholder(tf.float64)
hyp_tf = tf.Variable(self.hyp, dtype=tf.float64)
train = self.likelihood(hyp_tf, X_tf, y_tf)
init = tf.global_variables_initializer()
self.sess.run(init)
start_time = timeit.default_timer()
for i in range(1,self.max_iter+1):
# Fetch minibatch
X_batch, y_batch = fetch_minibatch(self.X,self.y,self.N_batch)
self.sess.run(train, {X_tf:X_batch, y_tf:y_batch})
if i % self.monitor_likelihood == 0:
elapsed = timeit.default_timer() - start_time
nlml = self.sess.run(self.nlml)
print('Iteration: %d, NLML: %.2f, Time: %.2f' % (i, nlml, elapsed))
start_time = timeit.default_timer()
self.hyp = self.sess.run(hyp_tf)
def likelihood(self, hyp, X_batch, y_batch, monitor=False):
M = self.M
Z = self.Z
m = self.m
S = self.S
jitter = self.jitter
jitter_cov = self.jitter_cov
N = tf.shape(X_batch)[0]
logsigma_n = hyp[-1]
sigma_n = tf.exp(logsigma_n)
# Compute K_u_inv
K_u = kernel_tf(Z, Z, hyp[:-1])
L = tf.cholesky(K_u + np.eye(M)*jitter_cov)
K_u_inv = tf.matrix_triangular_solve(tf.transpose(L), tf.matrix_triangular_solve(L, np.eye(M), lower=True), lower=False)
K_u_inv_op = self.K_u_inv.assign(K_u_inv)
# Compute mu
psi = kernel_tf(Z, X_batch, hyp[:-1])
K_u_inv_m = tf.matmul(K_u_inv, m)
MU = tf.matmul(tf.transpose(psi), K_u_inv_m)
# Compute cov
Alpha = tf.matmul(K_u_inv, psi)
COV = kernel_tf(X_batch, X_batch, hyp[:-1]) - tf.matmul(tf.transpose(psi), tf.matmul(K_u_inv,psi)) + \
tf.matmul(tf.transpose(Alpha), tf.matmul(S,Alpha))
# Compute COV_inv
LL = tf.cholesky(COV + tf.eye(N, dtype=tf.float64)*sigma_n + tf.eye(N, dtype=tf.float64)*jitter)
COV_inv = tf.matrix_triangular_solve(tf.transpose(LL), tf.matrix_triangular_solve(LL, tf.eye(N, dtype=tf.float64), lower=True), lower=False)
# Compute cov(Z, X)
cov_ZX = tf.matmul(S,Alpha)
# Update m and S
alpha = tf.matmul(COV_inv, tf.transpose(cov_ZX))
m_new = m + tf.matmul(cov_ZX, tf.matmul(COV_inv, y_batch-MU))
S_new = S - tf.matmul(cov_ZX, alpha)
if monitor == False:
m_op = self.m.assign(m_new)
S_op = self.S.assign(S_new)
# Compute NLML
K_u_inv_m = tf.matmul(K_u_inv, m_new)
NLML = 0.5*tf.matmul(tf.transpose(m_new), K_u_inv_m) + tf.reduce_sum(tf.log(tf.diag_part(L))) + 0.5*np.log(2.*np.pi)*tf.cast(M, tf.float64)
train = self.optimizer.minimize(NLML)
nlml_op = self.nlml.assign(NLML[0,0])
return tf.group(*[train, m_op, S_op, nlml_op, K_u_inv_op])
def predict(self, X_star):
Z = self.sess.run(self.Z)
m = self.sess.run(self.m)
S = self.sess.run(self.S)
hyp = self.hyp
K_u_inv = self.sess.run(self.K_u_inv)
N_star = X_star.shape[0]
partitions_size = 10000
(number_of_partitions, remainder_partition) = divmod(N_star, partitions_size)
mean_star = np.zeros((N_star,1));
var_star = np.zeros((N_star,1));
for partition in range(0,number_of_partitions):
print("Predicting partition: %d" % (partition))
idx_1 = partition*partitions_size
idx_2 = (partition+1)*partitions_size
# Compute mu
psi = kernel(Z, X_star[idx_1:idx_2,:], hyp[:-1])
K_u_inv_m = np.matmul(K_u_inv,m)
mu = np.matmul(psi.T,K_u_inv_m)
mean_star[idx_1:idx_2,0:1] = mu;
# Compute cov
Alpha = np.matmul(K_u_inv,psi)
cov = kernel(X_star[idx_1:idx_2,:], X_star[idx_1:idx_2,:], hyp[:-1]) - \
np.matmul(psi.T, np.matmul(K_u_inv,psi)) + np.matmul(Alpha.T, np.matmul(S,Alpha))
var = np.abs(np.diag(cov))# + np.exp(hyp[-1])
var_star[idx_1:idx_2,0] = var
print("Predicting the last partition")
idx_1 = number_of_partitions*partitions_size
idx_2 = number_of_partitions*partitions_size + remainder_partition
# Compute mu
psi = kernel(Z, X_star[idx_1:idx_2,:], hyp[:-1])
K_u_inv_m = np.matmul(K_u_inv,m)
mu = np.matmul(psi.T,K_u_inv_m)
mean_star[idx_1:idx_2,0:1] = mu;
# Compute cov
Alpha = np.matmul(K_u_inv,psi)
cov = kernel(X_star[idx_1:idx_2,:], X_star[idx_1:idx_2,:], hyp[:-1]) - \
np.matmul(psi.T, np.matmul(K_u_inv,psi)) + np.matmul(Alpha.T, np.matmul(S,Alpha))
var = np.abs(np.diag(cov))# + np.exp(hyp[-1])
var_star[idx_1:idx_2,0] = var
return mean_star, var_star
| mit |
jat255/seaborn | seaborn/palettes.py | 4 | 28814 | from __future__ import division
import colorsys
from itertools import cycle
import numpy as np
import matplotlib as mpl
from .external import husl
from .external.six import string_types
from .external.six.moves import range
from .utils import desaturate, set_hls_values, get_color_cycle
from .xkcd_rgb import xkcd_rgb
from .crayons import crayons
SEABORN_PALETTES = dict(
deep=["#4C72B0", "#55A868", "#C44E52",
"#8172B2", "#CCB974", "#64B5CD"],
muted=["#4878CF", "#6ACC65", "#D65F5F",
"#B47CC7", "#C4AD66", "#77BEDB"],
pastel=["#92C6FF", "#97F0AA", "#FF9F9A",
"#D0BBFF", "#FFFEA3", "#B0E0E6"],
bright=["#003FFF", "#03ED3A", "#E8000B",
"#8A2BE2", "#FFC400", "#00D7FF"],
dark=["#001C7F", "#017517", "#8C0900",
"#7600A1", "#B8860B", "#006374"],
colorblind=["#0072B2", "#009E73", "#D55E00",
"#CC79A7", "#F0E442", "#56B4E9"]
)
class _ColorPalette(list):
"""Set the color palette in a with statement, otherwise be a list."""
def __enter__(self):
"""Open the context."""
from .rcmod import set_palette
self._orig_palette = color_palette()
set_palette(self)
return self
def __exit__(self, *args):
"""Close the context."""
from .rcmod import set_palette
set_palette(self._orig_palette)
def as_hex(self):
"""Return a color palette with hex codes instead of RGB values."""
hex = [mpl.colors.rgb2hex(rgb) for rgb in self]
return _ColorPalette(hex)
def color_palette(palette=None, n_colors=None, desat=None):
"""Return a list of colors defining a color palette.
Availible seaborn palette names:
deep, muted, bright, pastel, dark, colorblind
Other options:
hls, husl, any named matplotlib palette, list of colors
Calling this function with ``palette=None`` will return the current
matplotlib color cycle.
Matplotlib paletes can be specified as reversed palettes by appending
"_r" to the name or as dark palettes by appending "_d" to the name.
(These options are mutually exclusive, but the resulting list of colors
can also be reversed).
This function can also be used in a ``with`` statement to temporarily
set the color cycle for a plot or set of plots.
Parameters
----------
palette: None, string, or sequence, optional
Name of palette or None to return current palette. If a sequence, input
colors are used but possibly cycled and desaturated.
n_colors : int, optional
Number of colors in the palette. If ``None``, the default will depend
on how ``palette`` is specified. Named palettes default to 6 colors,
but grabbing the current palette or passing in a list of colors will
not change the number of colors unless this is specified. Asking for
more colors than exist in the palette will cause it to cycle.
desat : float, optional
Proportion to desaturate each color by.
Returns
-------
palette : list of RGB tuples.
Color palette. Behaves like a list, but can be used as a context
manager and possesses an ``as_hex`` method to convert to hex color
codes.
See Also
--------
set_palette : Set the default color cycle for all plots.
set_color_codes : Reassign color codes like ``"b"``, ``"g"``, etc. to
colors from one of the seaborn palettes.
Examples
--------
Show one of the "seaborn palettes", which have the same basic order of hues
as the default matplotlib color cycle but more attractive colors.
.. plot::
:context: close-figs
>>> import seaborn as sns; sns.set()
>>> sns.palplot(sns.color_palette("muted"))
Use discrete values from one of the built-in matplotlib colormaps.
.. plot::
:context: close-figs
>>> sns.palplot(sns.color_palette("RdBu", n_colors=7))
Make a "dark" matplotlib sequential palette variant. (This can be good
when coloring multiple lines or points that correspond to an ordered
variable, where you don't want the lightest lines to be invisible).
.. plot::
:context: close-figs
>>> sns.palplot(sns.color_palette("Blues_d"))
Use a categorical matplotlib palette, add some desaturation. (This can be
good when making plots with large patches, which look best with dimmer
colors).
.. plot::
:context: close-figs
>>> sns.palplot(sns.color_palette("Set1", n_colors=8, desat=.5))
Use as a context manager:
.. plot::
:context: close-figs
>>> import numpy as np, matplotlib.pyplot as plt
>>> with sns.color_palette("husl", 8):
... _ = plt.plot(np.c_[np.zeros(8), np.arange(8)].T)
"""
if palette is None:
palette = get_color_cycle()
if n_colors is None:
n_colors = len(palette)
elif not isinstance(palette, string_types):
palette = palette
if n_colors is None:
n_colors = len(palette)
else:
if n_colors is None:
n_colors = 6
if palette == "hls":
palette = hls_palette(n_colors)
elif palette == "husl":
palette = husl_palette(n_colors)
elif palette.lower() == "jet":
raise ValueError("No.")
elif palette in SEABORN_PALETTES:
palette = SEABORN_PALETTES[palette]
elif palette in dir(mpl.cm):
palette = mpl_palette(palette, n_colors)
elif palette[:-2] in dir(mpl.cm):
palette = mpl_palette(palette, n_colors)
else:
raise ValueError("%s is not a valid palette name" % palette)
if desat is not None:
palette = [desaturate(c, desat) for c in palette]
# Always return as many colors as we asked for
pal_cycle = cycle(palette)
palette = [next(pal_cycle) for _ in range(n_colors)]
# Always return in r, g, b tuple format
try:
palette = map(mpl.colors.colorConverter.to_rgb, palette)
palette = _ColorPalette(palette)
except ValueError:
raise ValueError("Could not generate a palette for %s" % str(palette))
return palette
def hls_palette(n_colors=6, h=.01, l=.6, s=.65):
"""Get a set of evenly spaced colors in HLS hue space.
h, l, and s should be between 0 and 1
Parameters
----------
n_colors : int
number of colors in the palette
h : float
first hue
l : float
lightness
s : float
saturation
Returns
-------
palette : seaborn color palette
List-like object of colors as RGB tuples.
See Also
--------
husl_palette : Make a palette using evently spaced circular hues in the
HUSL system.
Examples
--------
Create a palette of 10 colors with the default parameters:
.. plot::
:context: close-figs
>>> import seaborn as sns; sns.set()
>>> sns.palplot(sns.hls_palette(10))
Create a palette of 10 colors that begins at a different hue value:
.. plot::
:context: close-figs
>>> sns.palplot(sns.hls_palette(10, h=.5))
Create a palette of 10 colors that are darker than the default:
.. plot::
:context: close-figs
>>> sns.palplot(sns.hls_palette(10, l=.4))
Create a palette of 10 colors that are less saturated than the default:
.. plot::
:context: close-figs
>>> sns.palplot(sns.hls_palette(10, s=.4))
"""
hues = np.linspace(0, 1, n_colors + 1)[:-1]
hues += h
hues %= 1
hues -= hues.astype(int)
palette = [colorsys.hls_to_rgb(h_i, l, s) for h_i in hues]
return _ColorPalette(palette)
def husl_palette(n_colors=6, h=.01, s=.9, l=.65):
"""Get a set of evenly spaced colors in HUSL hue space.
h, s, and l should be between 0 and 1
Parameters
----------
n_colors : int
number of colors in the palette
h : float
first hue
s : float
saturation
l : float
lightness
Returns
-------
palette : seaborn color palette
List-like object of colors as RGB tuples.
See Also
--------
hls_palette : Make a palette using evently spaced circular hues in the
HSL system.
Examples
--------
Create a palette of 10 colors with the default parameters:
.. plot::
:context: close-figs
>>> import seaborn as sns; sns.set()
>>> sns.palplot(sns.husl_palette(10))
Create a palette of 10 colors that begins at a different hue value:
.. plot::
:context: close-figs
>>> sns.palplot(sns.husl_palette(10, h=.5))
Create a palette of 10 colors that are darker than the default:
.. plot::
:context: close-figs
>>> sns.palplot(sns.husl_palette(10, l=.4))
Create a palette of 10 colors that are less saturated than the default:
.. plot::
:context: close-figs
>>> sns.palplot(sns.husl_palette(10, s=.4))
"""
hues = np.linspace(0, 1, n_colors + 1)[:-1]
hues += h
hues %= 1
hues *= 359
s *= 99
l *= 99
palette = [husl.husl_to_rgb(h_i, s, l) for h_i in hues]
return _ColorPalette(palette)
def mpl_palette(name, n_colors=6):
"""Return discrete colors from a matplotlib palette.
Note that this handles the qualitative colorbrewer palettes
properly, although if you ask for more colors than a particular
qualitative palette can provide you will get fewer than you are
expecting. In contrast, asking for qualitative color brewer palettes
using :func:`color_palette` will return the expected number of colors,
but they will cycle.
If you are using the IPython notebook, you can also use the function
:func:`choose_colorbrewer_palette` to interactively select palettes.
Parameters
----------
name : string
Name of the palette. This should be a named matplotlib colormap.
n_colors : int
Number of discrete colors in the palette.
Returns
-------
palette or cmap : seaborn color palette or matplotlib colormap
List-like object of colors as RGB tuples, or colormap object that
can map continuous values to colors, depending on the value of the
``as_cmap`` parameter.
Examples
--------
Create a qualitative colorbrewer palette with 8 colors:
.. plot::
:context: close-figs
>>> import seaborn as sns; sns.set()
>>> sns.palplot(sns.mpl_palette("Set2", 8))
Create a sequential colorbrewer palette:
.. plot::
:context: close-figs
>>> sns.palplot(sns.mpl_palette("Blues"))
Create a diverging palette:
.. plot::
:context: close-figs
>>> sns.palplot(sns.mpl_palette("seismic", 8))
Create a "dark" sequential palette:
.. plot::
:context: close-figs
>>> sns.palplot(sns.mpl_palette("GnBu_d"))
"""
brewer_qual_pals = {"Accent": 8, "Dark2": 8, "Paired": 12,
"Pastel1": 9, "Pastel2": 8,
"Set1": 9, "Set2": 8, "Set3": 12}
if name.endswith("_d"):
pal = ["#333333"]
pal.extend(color_palette(name.replace("_d", "_r"), 2))
cmap = blend_palette(pal, n_colors, as_cmap=True)
else:
cmap = getattr(mpl.cm, name)
if name in brewer_qual_pals:
bins = np.linspace(0, 1, brewer_qual_pals[name])[:n_colors]
else:
bins = np.linspace(0, 1, n_colors + 2)[1:-1]
palette = list(map(tuple, cmap(bins)[:, :3]))
return _ColorPalette(palette)
def _color_to_rgb(color, input):
"""Add some more flexibility to color choices."""
if input == "hls":
color = colorsys.hls_to_rgb(*color)
elif input == "husl":
color = husl.husl_to_rgb(*color)
elif input == "xkcd":
color = xkcd_rgb[color]
return color
def dark_palette(color, n_colors=6, reverse=False, as_cmap=False, input="rgb"):
"""Make a sequential palette that blends from dark to ``color``.
This kind of palette is good for data that range between relatively
uninteresting low values and interesting high values.
The ``color`` parameter can be specified in a number of ways, including
all options for defining a color in matplotlib and several additional
color spaces that are handled by seaborn. You can also use the database
of named colors from the XKCD color survey.
If you are using the IPython notebook, you can also choose this palette
interactively with the :func:`choose_dark_palette` function.
Parameters
----------
color : base color for high values
hex, rgb-tuple, or html color name
n_colors : int, optional
number of colors in the palette
reverse : bool, optional
if True, reverse the direction of the blend
as_cmap : bool, optional
if True, return as a matplotlib colormap instead of list
input : {'rgb', 'hls', 'husl', xkcd'}
Color space to interpret the input color. The first three options
apply to tuple inputs and the latter applies to string inputs.
Returns
-------
palette or cmap : seaborn color palette or matplotlib colormap
List-like object of colors as RGB tuples, or colormap object that
can map continuous values to colors, depending on the value of the
``as_cmap`` parameter.
See Also
--------
light_palette : Create a sequential palette with bright low values.
diverging_palette : Create a diverging palette with two colors.
Examples
--------
Generate a palette from an HTML color:
.. plot::
:context: close-figs
>>> import seaborn as sns; sns.set()
>>> sns.palplot(sns.dark_palette("purple"))
Generate a palette that decreases in lightness:
.. plot::
:context: close-figs
>>> sns.palplot(sns.dark_palette("seagreen", reverse=True))
Generate a palette from an HUSL-space seed:
.. plot::
:context: close-figs
>>> sns.palplot(sns.dark_palette((260, 75, 60), input="husl"))
Generate a colormap object:
.. plot::
:context: close-figs
>>> from numpy import arange
>>> x = arange(25).reshape(5, 5)
>>> cmap = sns.dark_palette("#2ecc71", as_cmap=True)
>>> ax = sns.heatmap(x, cmap=cmap)
"""
color = _color_to_rgb(color, input)
gray = "#222222"
colors = [color, gray] if reverse else [gray, color]
return blend_palette(colors, n_colors, as_cmap)
def light_palette(color, n_colors=6, reverse=False, as_cmap=False,
input="rgb"):
"""Make a sequential palette that blends from light to ``color``.
This kind of palette is good for data that range between relatively
uninteresting low values and interesting high values.
The ``color`` parameter can be specified in a number of ways, including
all options for defining a color in matplotlib and several additional
color spaces that are handled by seaborn. You can also use the database
of named colors from the XKCD color survey.
If you are using the IPython notebook, you can also choose this palette
interactively with the :func:`choose_light_palette` function.
Parameters
----------
color : base color for high values
hex code, html color name, or tuple in ``input`` space.
n_colors : int, optional
number of colors in the palette
reverse : bool, optional
if True, reverse the direction of the blend
as_cmap : bool, optional
if True, return as a matplotlib colormap instead of list
input : {'rgb', 'hls', 'husl', xkcd'}
Color space to interpret the input color. The first three options
apply to tuple inputs and the latter applies to string inputs.
Returns
-------
palette or cmap : seaborn color palette or matplotlib colormap
List-like object of colors as RGB tuples, or colormap object that
can map continuous values to colors, depending on the value of the
``as_cmap`` parameter.
See Also
--------
dark_palette : Create a sequential palette with dark low values.
diverging_palette : Create a diverging palette with two colors.
Examples
--------
Generate a palette from an HTML color:
.. plot::
:context: close-figs
>>> import seaborn as sns; sns.set()
>>> sns.palplot(sns.light_palette("purple"))
Generate a palette that increases in lightness:
.. plot::
:context: close-figs
>>> sns.palplot(sns.light_palette("seagreen", reverse=True))
Generate a palette from an HUSL-space seed:
.. plot::
:context: close-figs
>>> sns.palplot(sns.light_palette((260, 75, 60), input="husl"))
Generate a colormap object:
.. plot::
:context: close-figs
>>> from numpy import arange
>>> x = arange(25).reshape(5, 5)
>>> cmap = sns.light_palette("#2ecc71", as_cmap=True)
>>> ax = sns.heatmap(x, cmap=cmap)
"""
color = _color_to_rgb(color, input)
light = set_hls_values(color, l=.95)
colors = [color, light] if reverse else [light, color]
return blend_palette(colors, n_colors, as_cmap)
def _flat_palette(color, n_colors=6, reverse=False, as_cmap=False,
input="rgb"):
"""Make a sequential palette that blends from gray to ``color``.
Parameters
----------
color : matplotlib color
hex, rgb-tuple, or html color name
n_colors : int, optional
number of colors in the palette
reverse : bool, optional
if True, reverse the direction of the blend
as_cmap : bool, optional
if True, return as a matplotlib colormap instead of list
Returns
-------
palette : list or colormap
dark_palette : Create a sequential palette with dark low values.
"""
color = _color_to_rgb(color, input)
flat = desaturate(color, 0)
colors = [color, flat] if reverse else [flat, color]
return blend_palette(colors, n_colors, as_cmap)
def diverging_palette(h_neg, h_pos, s=75, l=50, sep=10, n=6, center="light",
as_cmap=False):
"""Make a diverging palette between two HUSL colors.
If you are using the IPython notebook, you can also choose this palette
interactively with the :func:`choose_diverging_palette` function.
Parameters
----------
h_neg, h_pos : float in [0, 359]
Anchor hues for negative and positive extents of the map.
s : float in [0, 100], optional
Anchor saturation for both extents of the map.
l : float in [0, 100], optional
Anchor lightness for both extents of the map.
n : int, optional
Number of colors in the palette (if not returning a cmap)
center : {"light", "dark"}, optional
Whether the center of the palette is light or dark
as_cmap : bool, optional
If true, return a matplotlib colormap object rather than a
list of colors.
Returns
-------
palette or cmap : seaborn color palette or matplotlib colormap
List-like object of colors as RGB tuples, or colormap object that
can map continuous values to colors, depending on the value of the
``as_cmap`` parameter.
See Also
--------
dark_palette : Create a sequential palette with dark values.
light_palette : Create a sequential palette with light values.
Examples
--------
Generate a blue-white-red palette:
.. plot::
:context: close-figs
>>> import seaborn as sns; sns.set()
>>> sns.palplot(sns.diverging_palette(240, 10, n=9))
Generate a brighter green-white-purple palette:
.. plot::
:context: close-figs
>>> sns.palplot(sns.diverging_palette(150, 275, s=80, l=55, n=9))
Generate a blue-black-red palette:
.. plot::
:context: close-figs
>>> sns.palplot(sns.diverging_palette(250, 15, s=75, l=40,
... n=9, center="dark"))
Generate a colormap object:
.. plot::
:context: close-figs
>>> from numpy import arange
>>> x = arange(25).reshape(5, 5)
>>> cmap = sns.diverging_palette(220, 20, sep=20, as_cmap=True)
>>> ax = sns.heatmap(x, cmap=cmap)
"""
palfunc = dark_palette if center == "dark" else light_palette
neg = palfunc((h_neg, s, l), 128 - (sep / 2), reverse=True, input="husl")
pos = palfunc((h_pos, s, l), 128 - (sep / 2), input="husl")
midpoint = dict(light=[(.95, .95, .95, 1.)],
dark=[(.133, .133, .133, 1.)])[center]
mid = midpoint * sep
pal = blend_palette(np.concatenate([neg, mid, pos]), n, as_cmap=as_cmap)
return pal
def blend_palette(colors, n_colors=6, as_cmap=False, input="rgb"):
"""Make a palette that blends between a list of colors.
Parameters
----------
colors : sequence of colors in various formats interpreted by ``input``
hex code, html color name, or tuple in ``input`` space.
n_colors : int, optional
Number of colors in the palette.
as_cmap : bool, optional
If True, return as a matplotlib colormap instead of list.
Returns
-------
palette or cmap : seaborn color palette or matplotlib colormap
List-like object of colors as RGB tuples, or colormap object that
can map continuous values to colors, depending on the value of the
``as_cmap`` parameter.
"""
colors = [_color_to_rgb(color, input) for color in colors]
name = "blend"
pal = mpl.colors.LinearSegmentedColormap.from_list(name, colors)
if not as_cmap:
pal = _ColorPalette(pal(np.linspace(0, 1, n_colors)))
return pal
def xkcd_palette(colors):
"""Make a palette with color names from the xkcd color survey.
See xkcd for the full list of colors: http://xkcd.com/color/rgb/
This is just a simple wrapper around the ``seaborn.xkcd_rgb`` dictionary.
Parameters
----------
colors : list of strings
List of keys in the ``seaborn.xkcd_rgb`` dictionary.
Returns
-------
palette : seaborn color palette
Returns the list of colors as RGB tuples in an object that behaves like
other seaborn color palettes.
See Also
--------
crayon_palette : Make a palette with Crayola crayon colors.
"""
palette = [xkcd_rgb[name] for name in colors]
return color_palette(palette, len(palette))
def crayon_palette(colors):
"""Make a palette with color names from Crayola crayons.
Colors are taken from here:
http://en.wikipedia.org/wiki/List_of_Crayola_crayon_colors
This is just a simple wrapper around the ``seaborn.crayons`` dictionary.
Parameters
----------
colors : list of strings
List of keys in the ``seaborn.crayons`` dictionary.
Returns
-------
palette : seaborn color palette
Returns the list of colors as rgb tuples in an object that behaves like
other seaborn color palettes.
See Also
--------
xkcd_palette : Make a palette with named colors from the XKCD color survey.
"""
palette = [crayons[name] for name in colors]
return color_palette(palette, len(palette))
def cubehelix_palette(n_colors=6, start=0, rot=.4, gamma=1.0, hue=0.8,
light=.85, dark=.15, reverse=False, as_cmap=False):
"""Make a sequential palette from the cubehelix system.
This produces a colormap with linearly-decreasing (or increasing)
brightness. That means that information will be preserved if printed to
black and white or viewed by someone who is colorblind. "cubehelix" is
also availible as a matplotlib-based palette, but this function gives the
user more control over the look of the palette and has a different set of
defaults.
Parameters
----------
n_colors : int
Number of colors in the palette.
start : float, 0 <= start <= 3
The hue at the start of the helix.
rot : float
Rotations around the hue wheel over the range of the palette.
gamma : float 0 <= gamma
Gamma factor to emphasize darker (gamma < 1) or lighter (gamma > 1)
colors.
hue : float, 0 <= hue <= 1
Saturation of the colors.
dark : float 0 <= dark <= 1
Intensity of the darkest color in the palette.
light : float 0 <= light <= 1
Intensity of the lightest color in the palette.
reverse : bool
If True, the palette will go from dark to light.
as_cmap : bool
If True, return a matplotlib colormap instead of a list of colors.
Returns
-------
palette or cmap : seaborn color palette or matplotlib colormap
List-like object of colors as RGB tuples, or colormap object that
can map continuous values to colors, depending on the value of the
``as_cmap`` parameter.
See Also
--------
choose_cubehelix_palette : Launch an interactive widget to select cubehelix
palette parameters.
dark_palette : Create a sequential palette with dark low values.
light_palette : Create a sequential palette with bright low values.
References
----------
Green, D. A. (2011). "A colour scheme for the display of astronomical
intensity images". Bulletin of the Astromical Society of India, Vol. 39,
p. 289-295.
Examples
--------
Generate the default palette:
.. plot::
:context: close-figs
>>> import seaborn as sns; sns.set()
>>> sns.palplot(sns.cubehelix_palette())
Rotate backwards from the same starting location:
.. plot::
:context: close-figs
>>> sns.palplot(sns.cubehelix_palette(rot=-.4))
Use a different starting point and shorter rotation:
.. plot::
:context: close-figs
>>> sns.palplot(sns.cubehelix_palette(start=2.8, rot=.1))
Reverse the direction of the lightness ramp:
.. plot::
:context: close-figs
>>> sns.palplot(sns.cubehelix_palette(reverse=True))
Generate a colormap object:
.. plot::
:context: close-figs
>>> from numpy import arange
>>> x = arange(25).reshape(5, 5)
>>> cmap = sns.cubehelix_palette(as_cmap=True)
>>> ax = sns.heatmap(x, cmap=cmap)
Use the full lightness range:
.. plot::
:context: close-figs
>>> cmap = sns.cubehelix_palette(dark=0, light=1, as_cmap=True)
>>> ax = sns.heatmap(x, cmap=cmap)
"""
cdict = mpl._cm.cubehelix(gamma, start, rot, hue)
cmap = mpl.colors.LinearSegmentedColormap("cubehelix", cdict)
x = np.linspace(light, dark, n_colors)
pal = cmap(x)[:, :3].tolist()
if reverse:
pal = pal[::-1]
if as_cmap:
x_256 = np.linspace(light, dark, 256)
if reverse:
x_256 = x_256[::-1]
pal_256 = cmap(x_256)
cmap = mpl.colors.ListedColormap(pal_256)
return cmap
else:
return _ColorPalette(pal)
def set_color_codes(palette="deep"):
"""Change how matplotlib color shorthands are interpreted.
Calling this will change how shorthand codes like "b" or "g"
are interpreted by matplotlib in subsequent plots.
Parameters
----------
palette : {deep, muted, pastel, dark, bright, colorblind}
Named seaborn palette to use as the source of colors.
See Also
--------
set : Color codes can be set through the high-level seaborn style
manager.
set_palette : Color codes can also be set through the function that
sets the matplotlib color cycle.
Examples
--------
Map matplotlib color codes to the default seaborn palette.
.. plot::
:context: close-figs
>>> import matplotlib.pyplot as plt
>>> import seaborn as sns; sns.set()
>>> sns.set_color_codes()
>>> _ = plt.plot([0, 1], color="r")
Use a different seaborn palette.
.. plot::
:context: close-figs
>>> sns.set_color_codes("dark")
>>> _ = plt.plot([0, 1], color="g")
>>> _ = plt.plot([0, 2], color="m")
"""
if palette == "reset":
colors = [(0., 0., 1.), (0., .5, 0.), (1., 0., 0.), (.75, .75, 0.),
(.75, .75, 0.), (0., .75, .75), (0., 0., 0.)]
else:
colors = SEABORN_PALETTES[palette] + [(.1, .1, .1)]
for code, color in zip("bgrmyck", colors):
rgb = mpl.colors.colorConverter.to_rgb(color)
mpl.colors.colorConverter.colors[code] = rgb
mpl.colors.colorConverter.cache[code] = rgb
| bsd-3-clause |
wathen/PhD | MHD/FEniCS/MHD/Stabilised/SaddlePointForm/Test/InnerOuterSchurTest/MHDallatonce.py | 4 | 9242 | import petsc4py
import sys
petsc4py.init(sys.argv)
from petsc4py import PETSc
import numpy as np
from dolfin import tic, toc
import HiptmairSetup
import PETScIO as IO
import scipy.sparse as sp
import matplotlib.pylab as plt
import MatrixOperations as MO
class BaseMyPC(object):
def setup(self, pc):
pass
def reset(self, pc):
pass
def apply(self, pc, x, y):
raise NotImplementedError
def applyT(self, pc, x, y):
self.apply(pc, x, y)
def applyS(self, pc, x, y):
self.apply(pc, x, y)
def applySL(self, pc, x, y):
self.applyS(pc, x, y)
def applySR(self, pc, x, y):
self.applyS(pc, x, y)
def applyRich(self, pc, x, y, w, tols):
self.apply(pc, x, y)
class InnerOuterWITHOUT2inverse(BaseMyPC):
def __init__(self, W, kspF, kspA, kspQ,Fp,kspScalar, kspCGScalar, kspVector, G, P, A, Hiptmairtol,F):
self.W = W
self.kspF = kspF
self.kspA = kspA
self.kspQ = kspQ
self.Fp = Fp
self.kspScalar = kspScalar
self.kspCGScalar = kspCGScalar
self.kspVector = kspVector
# self.Bt = Bt
self.HiptmairIts = 0
self.CGits = 0
self.F = F
# print range(self.W[0].dim(),self.W[0].dim()+self.W[1].dim())
# ss
self.P = P
self.G = G
self.AA = A
self.tol = Hiptmairtol
self.u_is = PETSc.IS().createGeneral(range(self.W[0].dim()))
self.b_is = PETSc.IS().createGeneral(range(self.W[0].dim(),self.W[0].dim()+self.W[1].dim()))
self.p_is = PETSc.IS().createGeneral(range(self.W[0].dim()+self.W[1].dim(),
self.W[0].dim()+self.W[1].dim()+self.W[2].dim()))
self.r_is = PETSc.IS().createGeneral(range(self.W[0].dim()+self.W[1].dim()+self.W[2].dim(),
self.W[0].dim()+self.W[1].dim()+self.W[2].dim()+self.W[3].dim()))
def create(self, pc):
print "Create"
def setUp(self, pc):
A, P = pc.getOperators()
print A.size
self.Ct = A.getSubMatrix(self.u_is,self.b_is)
self.C = A.getSubMatrix(self.b_is,self.u_is)
self.D = A.getSubMatrix(self.r_is,self.b_is)
self.Bt = A.getSubMatrix(self.u_is,self.p_is)
self.B = A.getSubMatrix(self.p_is,self.u_is)
self.Dt = A.getSubMatrix(self.b_is,self.r_is)
# print self.Ct.view()
#CFC = sp.csr_matrix( (data,(row,column)), shape=(self.W[1].dim(),self.W[1].dim()) )
#print CFC.shape
#CFC = PETSc.Mat().createAIJ(size=CFC.shape,csr=(CFC.indptr, CFC.indices, CFC.data))
#print CFC.size, self.AA.size
#MX = self.AA+self.F
MX = self.F # MO.StoreMatrix(B,"A")
# print FC.todense()
self.kspF.setType('preonly')
self.kspF.getPC().setType('lu')
self.kspF.setFromOptions()
self.kspF.setPCSide(0)
self.kspA.setType('preonly')
self.kspA.getPC().setType('lu')
self.kspA.setFromOptions()
self.kspA.setPCSide(0)
self.kspQ.setType('preonly')
self.kspQ.getPC().setType('lu')
self.kspQ.setFromOptions()
self.kspQ.setPCSide(0)
self.kspScalar.setType('preonly')
self.kspScalar.getPC().setType('lu')
self.kspScalar.setFromOptions()
self.kspScalar.setPCSide(0)
kspMX = PETSc.KSP()
kspMX.create(comm=PETSc.COMM_WORLD)
pcMX = kspMX.getPC()
kspMX.setType('preonly')
pcMX.setType('lu')
kspMX.setOperators(MX,MX)
OptDB = PETSc.Options()
#OptDB["pc_factor_mat_ordering_type"] = "rcm"
#OptDB["pc_factor_mat_solver_package"] = "mumps"
kspMX.setFromOptions()
self.kspMX = kspMX
# self.kspCGScalar.setType('preonly')
# self.kspCGScalar.getPC().setType('lu')
# self.kspCGScalar.setFromOptions()
# self.kspCGScalar.setPCSide(0)
self.kspVector.setType('preonly')
self.kspVector.getPC().setType('lu')
self.kspVector.setFromOptions()
self.kspVector.setPCSide(0)
print "setup"
def apply(self, pc, x, y):
br = x.getSubVector(self.r_is)
xr = br.duplicate()
self.kspScalar.solve(br, xr)
# print self.D.size
x2 = x.getSubVector(self.p_is)
y2 = x2.duplicate()
y3 = x2.duplicate()
xp = x2.duplicate()
self.kspA.solve(x2,y2)
self.Fp.mult(y2,y3)
self.kspQ.solve(y3,xp)
# self.kspF.solve(bu1-bu4-bu2,xu)
bb = x.getSubVector(self.b_is)
bb = bb - self.Dt*xr
xb = bb.duplicate()
self.kspMX.solve(bb,xb)
bu1 = x.getSubVector(self.u_is)
bu2 = self.Bt*xp
bu4 = self.Ct*xb
XX = bu1.duplicate()
xu = XX.duplicate()
self.kspF.solve(bu1-bu4-bu2,xu)
#self.kspF.solve(bu1,xu)
y.array = (np.concatenate([xu.array, xb.array,xp.array,xr.array]))
def ITS(self):
return self.CGits, self.HiptmairIts , self.CGtime, self.HiptmairTime
class InnerOuterMAGNETICinverse(BaseMyPC):
def __init__(self, W, kspF, kspA, kspQ,Fp,kspScalar, kspCGScalar, kspVector, G, P, A, Hiptmairtol,F):
self.W = W
self.kspF = kspF
self.kspA = kspA
self.kspQ = kspQ
self.Fp = Fp
self.kspScalar = kspScalar
self.kspCGScalar = kspCGScalar
self.kspVector = kspVector
# self.Bt = Bt
self.HiptmairIts = 0
self.CGits = 0
self.F = F
# print range(self.W[0].dim(),self.W[0].dim()+self.W[1].dim())
# ss
self.P = P
self.G = G
self.AA = A
self.tol = Hiptmairtol
self.u_is = PETSc.IS().createGeneral(range(self.W[0].dim()))
self.b_is = PETSc.IS().createGeneral(range(self.W[0].dim(),self.W[0].dim()+self.W[1].dim()))
self.p_is = PETSc.IS().createGeneral(range(self.W[0].dim()+self.W[1].dim(),
self.W[0].dim()+self.W[1].dim()+self.W[2].dim()))
self.r_is = PETSc.IS().createGeneral(range(self.W[0].dim()+self.W[1].dim()+self.W[2].dim(),
self.W[0].dim()+self.W[1].dim()+self.W[2].dim()+self.W[3].dim()))
def create(self, pc):
print "Create"
def setUp(self, pc):
A, P = pc.getOperators()
print A.size
self.Ct = A.getSubMatrix(self.u_is,self.b_is)
self.C = A.getSubMatrix(self.b_is,self.u_is)
self.D = A.getSubMatrix(self.r_is,self.b_is)
self.Bt = A.getSubMatrix(self.u_is,self.p_is)
self.B = A.getSubMatrix(self.p_is,self.u_is)
self.Dt = A.getSubMatrix(self.b_is,self.r_is)
# print self.Ct.view()
#CFC = sp.csr_matrix( (data,(row,column)), shape=(self.W[1].dim(),self.W[1].dim()) )
#print CFC.shape
#CFC = PETSc.Mat().createAIJ(size=CFC.shape,csr=(CFC.indptr, CFC.indices, CFC.data))
#print CFC.size, self.AA.size
FF = self.F
# MO.StoreMatrix(B,"A")
# print FC.todense()
self.kspF.setOperators(FF,FF)
self.kspF.setType('preonly')
self.kspF.getPC().setType('lu')
self.kspF.setFromOptions()
self.kspF.setPCSide(0)
self.kspA.setType('preonly')
self.kspA.getPC().setType('lu')
self.kspA.setFromOptions()
self.kspA.setPCSide(0)
self.kspQ.setType('preonly')
self.kspQ.getPC().setType('lu')
self.kspQ.setFromOptions()
self.kspQ.setPCSide(0)
self.kspScalar.setType('preonly')
self.kspScalar.getPC().setType('lu')
self.kspScalar.setFromOptions()
self.kspScalar.setPCSide(0)
kspMX = PETSc.KSP()
kspMX.create(comm=PETSc.COMM_WORLD)
pcMX = kspMX.getPC()
kspMX.setType('preonly')
pcMX.setType('lu')
OptDB = PETSc.Options()
kspMX.setOperators(self.AA,self.AA)
self.kspMX = kspMX
# self.kspCGScalar.setType('preonly')
# self.kspCGScalar.getPC().setType('lu')
# self.kspCGScalar.setFromOptions()
# self.kspCGScalar.setPCSide(0)
self.kspVector.setType('preonly')
self.kspVector.getPC().setType('lu')
self.kspVector.setFromOptions()
self.kspVector.setPCSide(0)
print "setup"
def apply(self, pc, x, y):
br = x.getSubVector(self.r_is)
xr = br.duplicate()
self.kspScalar.solve(br, xr)
# print self.D.size
x2 = x.getSubVector(self.p_is)
y2 = x2.duplicate()
y3 = x2.duplicate()
xp = x2.duplicate()
self.kspA.solve(x2,y2)
self.Fp.mult(y2,y3)
self.kspQ.solve(y3,xp)
# self.kspF.solve(bu1-bu4-bu2,xu)
bb = x.getSubVector(self.b_is)
bb = bb - self.Dt*xr
xb = bb.duplicate()
self.kspMX.solve(bb,xb)
bu1 = x.getSubVector(self.u_is)
bu2 = self.Bt*xp
bu4 = self.Ct*xb
XX = bu1.duplicate()
xu = XX.duplicate()
self.kspF.solve(bu1-bu4-bu2,xu)
#self.kspF.solve(bu1,xu)
y.array = (np.concatenate([xu.array, xb.array,xp.array,xr.array]))
def ITS(self):
return self.CGits, self.HiptmairIts , self.CGtime, self.HiptmairTime
| mit |
spookysys/turbopump | pump/lobanoff/vane_and_shroud_thickness.py | 1 | 2453 | """Figure 3-11: Recommended minimum impeller vane and shroud thickness for castability (standard cast materials) from D2"""
from __future__ import print_function
from itertools import chain
import numpy as np
from matplotlib import pyplot as plt
from numpy.polynomial import polynomial
from utils import memoized, polyfit2d
from units import ureg
from pump.lobanoff._data import _data as _lobanoff_data
def _data():
return _lobanoff_data()['vane_and_shroud_thickness']
@memoized
def get_limits():
"""Return the limits for D2"""
vanes = list(chain.from_iterable(
x['D2'] for x in _data()
))
return min(vanes), max(vanes)
def _points(part):
diameters, values = [], []
for iter in _data():
p = iter['D2']
dias = [p[0], np.average(p), p[-1]]
diameters.extend(dias)
values.extend([iter[part]] * len(dias))
return diameters, values
@memoized
def get_coeffs(part, point):
"""Polynomial coefficients relating D2 to vane/shroud thickness at different points"""
diameters, values = _points(part)
values = [x[point] for x in values]
return np.polyfit(diameters, values, 2)
def plot(part):
plt.figure()
# Plot data
x, y = _points(part)
plt.plot(x, y, 'r.')
# Plot fitted curve
x = np.linspace(*get_limits())
for point in range(0, len(y[0])):
y = np.polyval(get_coeffs(part, point), x)
plt.plot(x, y, 'g-')
plt.gca().set_title(part)
plt.gca().set_xticks(np.arange(0, get_limits()[1] + get_limits()[0] + 1, 10))
plt.gca().set_yticks(np.arange(0, 1, 1. / 16))
plt.gca().set_xlim(0, get_limits()[1] + get_limits()[0])
plt.gca().set_ylim(0, 1)
plt.minorticks_on()
plt.grid()
plt.xlabel('D2 - Impeller diameter [inch]')
plt.ylabel(part + 'thickness [inch]')
plt.draw()
if __name__ == '__main__':
plot('vane')
plot('shroud')
plt.show()
@ureg.wraps('inch', ('inch'))
def calc_vane_thickness(D2, point):
"""Minimum vane thickness, 0: outlet, 1:middle, 2:inlet"""
startpoint, endpoint = get_limits()
assert startpoint < D2 < endpoint
return np.polyval(get_coeffs('vane', point), D2)
@ureg.wraps('inch', ('inch'))
def calc_shroud_thickness(D2, point):
"""Minimum back shroud thickness, 0:t1 at outlet, 1:t2 at 3/4*D2"""
startpoint, endpoint = get_limits()
assert startpoint < D2 < endpoint
return np.polyval(get_coeffs('shroud', point), D2)
| gpl-3.0 |
icdishb/scikit-learn | examples/ensemble/plot_adaboost_multiclass.py | 354 | 4124 | """
=====================================
Multi-class AdaBoosted Decision Trees
=====================================
This example reproduces Figure 1 of Zhu et al [1] and shows how boosting can
improve prediction accuracy on a multi-class problem. The classification
dataset is constructed by taking a ten-dimensional standard normal distribution
and defining three classes separated by nested concentric ten-dimensional
spheres such that roughly equal numbers of samples are in each class (quantiles
of the :math:`\chi^2` distribution).
The performance of the SAMME and SAMME.R [1] algorithms are compared. SAMME.R
uses the probability estimates to update the additive model, while SAMME uses
the classifications only. As the example illustrates, the SAMME.R algorithm
typically converges faster than SAMME, achieving a lower test error with fewer
boosting iterations. The error of each algorithm on the test set after each
boosting iteration is shown on the left, the classification error on the test
set of each tree is shown in the middle, and the boost weight of each tree is
shown on the right. All trees have a weight of one in the SAMME.R algorithm and
therefore are not shown.
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
from sklearn.externals.six.moves import zip
import matplotlib.pyplot as plt
from sklearn.datasets import make_gaussian_quantiles
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeClassifier
X, y = make_gaussian_quantiles(n_samples=13000, n_features=10,
n_classes=3, random_state=1)
n_split = 3000
X_train, X_test = X[:n_split], X[n_split:]
y_train, y_test = y[:n_split], y[n_split:]
bdt_real = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1)
bdt_discrete = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1.5,
algorithm="SAMME")
bdt_real.fit(X_train, y_train)
bdt_discrete.fit(X_train, y_train)
real_test_errors = []
discrete_test_errors = []
for real_test_predict, discrete_train_predict in zip(
bdt_real.staged_predict(X_test), bdt_discrete.staged_predict(X_test)):
real_test_errors.append(
1. - accuracy_score(real_test_predict, y_test))
discrete_test_errors.append(
1. - accuracy_score(discrete_train_predict, y_test))
n_trees_discrete = len(bdt_discrete)
n_trees_real = len(bdt_real)
# Boosting might terminate early, but the following arrays are always
# n_estimators long. We crop them to the actual number of trees here:
discrete_estimator_errors = bdt_discrete.estimator_errors_[:n_trees_discrete]
real_estimator_errors = bdt_real.estimator_errors_[:n_trees_real]
discrete_estimator_weights = bdt_discrete.estimator_weights_[:n_trees_discrete]
plt.figure(figsize=(15, 5))
plt.subplot(131)
plt.plot(range(1, n_trees_discrete + 1),
discrete_test_errors, c='black', label='SAMME')
plt.plot(range(1, n_trees_real + 1),
real_test_errors, c='black',
linestyle='dashed', label='SAMME.R')
plt.legend()
plt.ylim(0.18, 0.62)
plt.ylabel('Test Error')
plt.xlabel('Number of Trees')
plt.subplot(132)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_errors,
"b", label='SAMME', alpha=.5)
plt.plot(range(1, n_trees_real + 1), real_estimator_errors,
"r", label='SAMME.R', alpha=.5)
plt.legend()
plt.ylabel('Error')
plt.xlabel('Number of Trees')
plt.ylim((.2,
max(real_estimator_errors.max(),
discrete_estimator_errors.max()) * 1.2))
plt.xlim((-20, len(bdt_discrete) + 20))
plt.subplot(133)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_weights,
"b", label='SAMME')
plt.legend()
plt.ylabel('Weight')
plt.xlabel('Number of Trees')
plt.ylim((0, discrete_estimator_weights.max() * 1.2))
plt.xlim((-20, n_trees_discrete + 20))
# prevent overlapping y-axis labels
plt.subplots_adjust(wspace=0.25)
plt.show()
| bsd-3-clause |
mayblue9/scikit-learn | benchmarks/bench_rcv1_logreg_convergence.py | 149 | 7173 | # Authors: Tom Dupre la Tour <tom.dupre-la-tour@m4x.org>
# Olivier Grisel <olivier.grisel@ensta.org>
#
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
import gc
import time
from sklearn.externals.joblib import Memory
from sklearn.linear_model import (LogisticRegression, SGDClassifier)
from sklearn.datasets import fetch_rcv1
from sklearn.linear_model.sag import get_auto_step_size
from sklearn.linear_model.sag_fast import get_max_squared_sum
try:
import lightning.classification as lightning_clf
except ImportError:
lightning_clf = None
m = Memory(cachedir='.', verbose=0)
# compute logistic loss
def get_loss(w, intercept, myX, myy, C):
n_samples = myX.shape[0]
w = w.ravel()
p = np.mean(np.log(1. + np.exp(-myy * (myX.dot(w) + intercept))))
print("%f + %f" % (p, w.dot(w) / 2. / C / n_samples))
p += w.dot(w) / 2. / C / n_samples
return p
# We use joblib to cache individual fits. Note that we do not pass the dataset
# as argument as the hashing would be too slow, so we assume that the dataset
# never changes.
@m.cache()
def bench_one(name, clf_type, clf_params, n_iter):
clf = clf_type(**clf_params)
try:
clf.set_params(max_iter=n_iter, random_state=42)
except:
clf.set_params(n_iter=n_iter, random_state=42)
st = time.time()
clf.fit(X, y)
end = time.time()
try:
C = 1.0 / clf.alpha / n_samples
except:
C = clf.C
try:
intercept = clf.intercept_
except:
intercept = 0.
train_loss = get_loss(clf.coef_, intercept, X, y, C)
train_score = clf.score(X, y)
test_score = clf.score(X_test, y_test)
duration = end - st
return train_loss, train_score, test_score, duration
def bench(clfs):
for (name, clf, iter_range, train_losses, train_scores,
test_scores, durations) in clfs:
print("training %s" % name)
clf_type = type(clf)
clf_params = clf.get_params()
for n_iter in iter_range:
gc.collect()
train_loss, train_score, test_score, duration = bench_one(
name, clf_type, clf_params, n_iter)
train_losses.append(train_loss)
train_scores.append(train_score)
test_scores.append(test_score)
durations.append(duration)
print("classifier: %s" % name)
print("train_loss: %.8f" % train_loss)
print("train_score: %.8f" % train_score)
print("test_score: %.8f" % test_score)
print("time for fit: %.8f seconds" % duration)
print("")
print("")
return clfs
def plot_train_losses(clfs):
plt.figure()
for (name, _, _, train_losses, _, _, durations) in clfs:
plt.plot(durations, train_losses, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("train loss")
def plot_train_scores(clfs):
plt.figure()
for (name, _, _, _, train_scores, _, durations) in clfs:
plt.plot(durations, train_scores, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("train score")
plt.ylim((0.92, 0.96))
def plot_test_scores(clfs):
plt.figure()
for (name, _, _, _, _, test_scores, durations) in clfs:
plt.plot(durations, test_scores, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("test score")
plt.ylim((0.92, 0.96))
def plot_dloss(clfs):
plt.figure()
pobj_final = []
for (name, _, _, train_losses, _, _, durations) in clfs:
pobj_final.append(train_losses[-1])
indices = np.argsort(pobj_final)
pobj_best = pobj_final[indices[0]]
for (name, _, _, train_losses, _, _, durations) in clfs:
log_pobj = np.log(abs(np.array(train_losses) - pobj_best)) / np.log(10)
plt.plot(durations, log_pobj, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("log(best - train_loss)")
rcv1 = fetch_rcv1()
X = rcv1.data
n_samples, n_features = X.shape
# consider the binary classification problem 'CCAT' vs the rest
ccat_idx = rcv1.target_names.tolist().index('CCAT')
y = rcv1.target.tocsc()[:, ccat_idx].toarray().ravel().astype(np.float64)
y[y == 0] = -1
# parameters
C = 1.
fit_intercept = True
tol = 1.0e-14
# max_iter range
sgd_iter_range = list(range(1, 121, 10))
newton_iter_range = list(range(1, 25, 3))
lbfgs_iter_range = list(range(1, 242, 12))
liblinear_iter_range = list(range(1, 37, 3))
liblinear_dual_iter_range = list(range(1, 85, 6))
sag_iter_range = list(range(1, 37, 3))
clfs = [
("LR-liblinear",
LogisticRegression(C=C, tol=tol,
solver="liblinear", fit_intercept=fit_intercept,
intercept_scaling=1),
liblinear_iter_range, [], [], [], []),
("LR-liblinear-dual",
LogisticRegression(C=C, tol=tol, dual=True,
solver="liblinear", fit_intercept=fit_intercept,
intercept_scaling=1),
liblinear_dual_iter_range, [], [], [], []),
("LR-SAG",
LogisticRegression(C=C, tol=tol,
solver="sag", fit_intercept=fit_intercept),
sag_iter_range, [], [], [], []),
("LR-newton-cg",
LogisticRegression(C=C, tol=tol, solver="newton-cg",
fit_intercept=fit_intercept),
newton_iter_range, [], [], [], []),
("LR-lbfgs",
LogisticRegression(C=C, tol=tol,
solver="lbfgs", fit_intercept=fit_intercept),
lbfgs_iter_range, [], [], [], []),
("SGD",
SGDClassifier(alpha=1.0 / C / n_samples, penalty='l2', loss='log',
fit_intercept=fit_intercept, verbose=0),
sgd_iter_range, [], [], [], [])]
if lightning_clf is not None and not fit_intercept:
alpha = 1. / C / n_samples
# compute the same step_size than in LR-sag
max_squared_sum = get_max_squared_sum(X)
step_size = get_auto_step_size(max_squared_sum, alpha, "log",
fit_intercept)
clfs.append(
("Lightning-SVRG",
lightning_clf.SVRGClassifier(alpha=alpha, eta=step_size,
tol=tol, loss="log"),
sag_iter_range, [], [], [], []))
clfs.append(
("Lightning-SAG",
lightning_clf.SAGClassifier(alpha=alpha, eta=step_size,
tol=tol, loss="log"),
sag_iter_range, [], [], [], []))
# We keep only 200 features, to have a dense dataset,
# and compare to lightning SAG, which seems incorrect in the sparse case.
X_csc = X.tocsc()
nnz_in_each_features = X_csc.indptr[1:] - X_csc.indptr[:-1]
X = X_csc[:, np.argsort(nnz_in_each_features)[-200:]]
X = X.toarray()
print("dataset: %.3f MB" % (X.nbytes / 1e6))
# Split training and testing. Switch train and test subset compared to
# LYRL2004 split, to have a larger training dataset.
n = 23149
X_test = X[:n, :]
y_test = y[:n]
X = X[n:, :]
y = y[n:]
clfs = bench(clfs)
plot_train_scores(clfs)
plot_test_scores(clfs)
plot_train_losses(clfs)
plot_dloss(clfs)
plt.show()
| bsd-3-clause |
Kyle-Crypton/CATL_Project | decisiontree.py | 1 | 1533 | import pydot
from sklearn import tree
from sklearn.ensemble import RandomForestClassifier
from sklearn.externals.six import StringIO
from db_operation import db_exec
from decisiontree_extracting import tree_to_code, tree_to_code_db
def DSTree(X, y, feature_names, class_names):
# Undefined input variable
package_name = 'EPP-14-225'
print 'Generating Random Forest Classifier...'
clf = RandomForestClassifier(n_estimators = 4)
clf = clf.fit(X, y)
# print 'Different importances of each features: %s' % clf.feature_importances_
print 'Removing the old package result...'
sql = 'delete from dsresult where package=\'{}\''.format(package_name)
db_exec('catl', sql)
print 'Generating Tree plot...'
for i in xrange(len(clf.estimators_)):
dot_data = StringIO()
# result_filename = 'classifying_result/classifying-{}.txt'.format(i)
# tree_to_code(clf.estimators_[i], feature_names = feature_names, class_names = class_names, result_filename = result_filename)
tree_to_code_db(clf.estimators_[i], feature_names = feature_names, class_names = class_names, package_name = package_name)
print 'Generating %d plot...' % i
tree.export_graphviz(clf.estimators_[i], out_file = dot_data, feature_names = feature_names, class_names = class_names, filled = True, rounded = True, special_characters = True, leaves_parallel = True)
print 'Convering %d plot...' % i
graph = pydot.graph_from_dot_data(dot_data.getvalue())
graph[0].write_png('classifying_result/classifying-%d.png' % i)
if __name__ == '__main__':
main() | mit |
stephenliu1989/HK_DataMiner | hkdataminer/cluster/aplod_.py | 1 | 17907 | __author__ = 'LIU Song <liusong299@gmail.com>'
__contributors__ = "Lizhe ZHU, Tiago Lobato Gimenes, Xuhui HUANG"
__version__ = "0.91"
# Copyright (c) 2016, Hong Kong University of Science and Technology (HKUST)
# All rights reserved.
# ===============================================================================
# GLOBAL IMPORTS:
import random
import operator
import time
import numpy as np
from multiprocessing import Pool
from sklearn.base import BaseEstimator, ClusterMixin
from sklearn.utils import check_array
from sklearn.neighbors import NearestNeighbors
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.utils.validation import check_is_fitted
from metrics.pairwise import pairwise_distances
# ===============================================================================
# LOCAL IMPORTS:
#import knn as knnn
# ===============================================================================
def FaissNearestNeighbors(X, eps, min_samples, nlist, nprobe, return_distance=False, IVFFlat=True, GPU=False):
dimension = X.shape[1]
if GPU is True:
if IVFFlat is True:
quantizer = faiss.IndexFlatL2(dimension)
index_cpu = faiss.IndexIVFFlat(quantizer, dimension, nlist, faiss.METRIC_L2)
# here we specify METRIC_L2, by default it performs inner-product search
res = faiss.StandardGpuResources() # use a single GPU
flat_config = faiss.GpuIndexFlatConfig()
flat_config.device = 0
# make it an IVF GPU index
index_gpu = faiss.index_cpu_to_gpu(res, 0, index_cpu)
assert not index _gpu.is_trained
index_gpu.train(X)
assert index_gpu.is_trained
# here we specify METRIC_L2, by default it performs inner-product search
else:
index_cpu = faiss.IndexFlatL2(dimension)
res = faiss.StandardGpuResources()
flat_config = faiss.GpuIndexFlatConfig()
flat_config.device = 0
index_gpu = faiss.index_cpu_to_gpu(res, 0, index_cpu)
index_gpu.add(X)
n_samples = 10
k = min_samples
samples = np.random.choice(len(X), n_samples)
# print(samples)
D, I = index_gpu.search(X[samples], k) # sanity check
while np.max(D[:, k - 1]) < eps:
k = k * 2
D, I = index_gpu.search(X[samples], k)
# print(np.max(D[:, k - 1]), k, eps)
index_gpu.nprobe = nprobe
D, I = index_gpu.search(X, k) # actual search
else:
if IVFFlat is True:
quantizer = faiss.IndexFlatL2(dimension)
index_cpu = faiss.IndexIVFFlat(quantizer, dimension, nlist, faiss.METRIC_L2)
# here we specify METRIC_L2, by default it performs inner-product search
assert not index_cpu.is_trained
index_cpu.train(X)
assert index_cpu.is_trained
# here we specify METRIC_L2, by default it performs inner-product search
else:
index_cpu = faiss.IndexFlatL2(dimension)
index_cpu.add(X)
n_samples = 10
k = min_samples
samples = np.random.choice(len(X), n_samples)
# print(samples)
D, I = index_cpu.search(X[samples], k) # sanity check
while np.max(D[:, k - 1]) < eps:
k = k * 2
D, I = index_cpu.search(X[samples], k)
# print(np.max(D[:, k - 1]), k, eps)
index_cpu.nprobe = nprobe
D, I = index_cpu.search(X, k) # actual search
if return_distance is True:
return D, I
else:
return D
def run_knn(X, n_neighbors=100, n_samples=1000, metric='rmsd', algorithm='vp_tree'):
# X = check_array(X, accept_sparse='csr')
#print "Calculating pairwise ", metric, " distances of ", n_samples, " samples..."
t0 = time.time()
if metric is "rmsd":
samples = random.sample(X, n_samples)
whole_samples= reduce(operator.add, (samples[i] for i in xrange(len(samples))))
else:
whole_samples = random.sample(X, n_samples)
sample_dist_metric = pairwise_distances( whole_samples, whole_samples, metric=metric )
t1 = time.time()
#print "time:", t1-t0,
#print "Done."
# Calculate neighborhood for all samples. This leaves the original point
# in, which needs to be considered later
#print "Calculating knn..."
t0 = time.time()
if metric is 'rmsd':
shape_x = np.shape(X.xyz)
knn = knnn.vp_tree_parallel( np.reshape(X.xyz, (shape_x[0] * shape_x[1] * shape_x[2])), shape_x[1] * 3, "rmsd_serial" )
distances_, indices = knn.query( np.linspace(0, len(X.xyz)-1, len(X.xyz), dtype='int'), n_neighbors )
else:
if algorithm is 'vp_tree':
shape_x = np.shape(X)
#print "shape_x:", shape_x
knn = knnn.vp_tree_parallel( np.reshape(X, (shape_x[0] * shape_x[1])), shape_x[1], "euclidean_serial" )
distances_, indices = knn.query( np.linspace(0, len(X)-1, len(X), dtype='int'), n_neighbors )
else:
neighbors_model = NearestNeighbors(n_neighbors=n_neighbors, algorithm=algorithm, metric=metric)
neighbors_model.fit(X)
distances_, indices = neighbors_model.kneighbors(X, n_neighbors=n_neighbors, return_distance=True)
t1 = time.time()
#print "time:", t1-t0,
#print "Done."
# Calculate distance between sample, and find dc
# np.savetxt("./sample_dist_metric.txt", sample_dist_metric, fmt="%f")
#np.savetxt("./distances_.txt", distances_, fmt="%f")
#np.savetxt("./indices.txt", indices, fmt="%d")
return sample_dist_metric, distances_, indices
def calculate_rho(X_len, n_neighbors, dc_2, indices, distances_):
rho = np.zeros(X_len, dtype=np.float32)
for i in xrange(0, X_len):
for j in xrange(0,n_neighbors):
index = indices[i,j]
dist = distances_[i,j]
gaussian = np.math.exp(-(dist**2/dc_2))
rho[i] += gaussian
rho[index] += gauss
return rho
def aplod_clustering(X, weight=None, rho_cutoff=1.0, delta_cutoff=1.0, percent=0.1, n_neighbors=100, n_samples=1000,
metric='rmsd', algorithm='auto', sample_dist_metric=None, distances_=None, indices=None, parallel=True):
"""Adaptive Partitioning by Local Density-peaks (APLoD)
We present an efficient density-based adaptive-resolution clustering method APLoD
for analyzing large-scale molecular dynamics (MD) trajectories. APLoD performs the
k-nearest-neighbors search to estimate the density of MD conformations in a local fashion,
which can group MD conformations in the same high-density region into a cluster.
APLoD greatly improves the popular density peaks algorithm by reducing the running
time and the memory usage by 2-3 orders of magnitude for systems ranging from alanine
dipeptide to a 370-residue Maltose-binding protein. In addition, we demonstrate that
APLoD can produce clusters with various sizes that are adaptive to the underlying density
(i.e. larger clusters at low density regions, while smaller clusters at high density regions),
which is a clear advantage over other popular clustering algorithms including k-centers and
k-medoids. We anticipate that APLoD can be widely applied to split ultra-large MD datasets
containing millions of conformations for subsequent construction of Markov State Models.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
rho_cutoff : float, optional
The cut-off of the local density rho
delta_cutoff : float, optional
The cut-off of the distance delta from points of higher density
percent : float, optional
Average percentage of neighbours
n_neighbors : int, optional
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.calculate_distance for its
metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
Returns
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers.
labels_ :
Labels of each point.
Notes
----------
NONE
References
----------
Song Liu, Lizhe Zhu and Xuhui Huang, "Adaptive Partitioning by Local
ensity-peaks (APLoD): An efficient density-based clustering algorithm
for analyzing molecular dynamics trajectories". Journal of Computational
Chemistry XX/XX/2016: Vol.XXX No. XXX pp. XXXX-XXXX
DOI: XXXX
Alex Rodriguez, Alessandro Laio, "Clustering by fast search and find
of APLoDs". Science 27 June 2014: Vol. 344 no. 6191 pp. 1492-1496
DOI: 10.1126/science.1242072
"""
if rho_cutoff < 0.0:
raise ValueError("rho must be non negative!")
if delta_cutoff < 0.0 and not delta_cutoff == None:
raise ValueError("delta must be non negative!")
if algorithm is not "precomputed":
sample_dist_metric, distances_, indices = run_knn(X=X, n_neighbors=n_neighbors, n_samples=n_samples, metric=metric, algorithm=algorithm)
else:
if sample_dist_metric is None:
raise ValueError("sample_dist_metric must not be None!")
if distances_ is None:
raise ValueError("distances must not be None!")
if indices is None:
raise ValueError("indices must not be None!")
# Calculate distance between sample, and find dc
sample_dist = []
for i in xrange(0, n_samples):
for j in xrange(i+1, n_samples):
sample_dist.append(sample_dist_metric[i, j])
sorted_sample_dist=np.sort(sample_dist)
index = int(round(n_samples*percent))
dc = sorted_sample_dist[index]
# ----Cal rho: Gaussian kernel with neighbors
# Calculating Rho using Gaussian kernel.
X_len = len(X)
rho = np.zeros(X_len, dtype=np.float32)
dc_2 = dc**2
if weight is None:
#Don't have a weight
for i in xrange(0, X_len):
for j in xrange(0,n_neighbors):
index = indices[i,j]
dist = distances_[i,j]
gaussian = np.math.exp(-(dist**2/dc_2))
rho[i] += gaussian
rho[index] += gaussian
else:
for i in xrange(0, X_len):
for j in xrange(0,n_neighbors):
index = indices[i,j]
dist = distances_[i,j]
gaussian = np.math.exp(-(dist**2/dc_2)) * weight[j]
rho[i] += gaussian
rho[index] += gaussian
# Calculating Highest Rho and Max Distance.
rho_argsorted = np.argsort(rho)[::-1]
max_rho = rho[rho_argsorted[0]]
max_dist = np.max(distances_)
if delta_cutoff == None:
delta_cutoff = max_dist
delta = [max_dist for i in xrange(X_len)]
nneigh = [i for i in xrange(X_len)]
# Calculating Delta.
for i in range(0, X_len):
for j in range(0, n_neighbors):
index = indices[i, j]
if rho[index] > rho[i]:
delta[i] = distances_[i, j]
nneigh[i] = index
break
# Clustering Data.
# Initially, all samples are noise.
rho_cut_index = int(1.0 * X_len) - 1
rho_cut = rho[rho_argsorted[rho_cut_index]]
distances_sorted = np.sort(distances_, axis=None)
distances_index = int(1.0 * len(distances_sorted)) - 1
delta_cut = distances_sorted[distances_index]
#print "rho_cut:", rho_cut, rho_cut_index
#print "delta_cut", delta_cut, distances_index
if metric is "rmsd:":
labels_ = -np.ones(X.xyz.shape[0], dtype=np.intp)
else:
labels_ = -np.ones(X_len, dtype=np.intp)
cluster_centers_list = []
cluster = 0
for i in range(len(rho_argsorted)):
index = rho_argsorted[i]
if labels_[index] == -1 and delta[index] >= delta_cut and rho[index] > rho_cut:
labels_[index] = cluster
cluster_centers_list.append(index)
cluster += 1
else:
if labels_[index] == -1 and labels_[nneigh[index]] != -1:
labels_[index] = labels_[nneigh[index]]
cluster_centers_ = cluster_centers_list
return cluster_centers_, labels_
class APLoD(BaseEstimator, ClusterMixin):
"""Adaptive Partitioning by Local Density-peaks (APLoD)
We present an efficient density-based adaptive-resolution clustering method APLoD
for analyzing large-scale molecular dynamics (MD) trajectories. APLoD performs the
k-nearest-neighbors search to estimate the density of MD conformations in a local fashion,
which can group MD conformations in the same high-density region into a cluster.
APLoD greatly improves the popular density peaks algorithm by reducing the running
time and the memory usage by 2-3 orders of magnitude for systems ranging from alanine
dipeptide to a 370-residue Maltose-binding protein. In addition, we demonstrate that
APLoD can produce clusters with various sizes that are adaptive to the underlying density
(i.e. larger clusters at low density regions, while smaller clusters at high density regions),
which is a clear advantage over other popular clustering algorithms including k-centers and
k-medoids. We anticipate that APLoD can be widely applied to split ultra-large MD datasets
containing millions of conformations for subsequent construction of Markov State Models.
Parameters
----------
rho_cutoff : float, optional
The cut-off of the local density rho
delta_cutoff : float, optional
The cut-off of the distance delta from points of higher density
percent : float, optional
Average percentage of neighbours
n_neighbors : int, optional
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.calculate_distance for its
metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
Returns
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers.
labels_ :
Labels of each point.
Notes
----------
NONE
References
----------
Song Liu, Lizhe Zhu and Xuhui Huang, "Adaptive Partitioning by Local
ensity-peaks (APLoD): An efficient density-based clustering algorithm
for analyzing molecular dynamics trajectories". Journal of Computational
Chemistry XX/XX/2016: Vol.XXX No. XXX pp. XXXX-XXXX
DOI: XXXX
Alex Rodriguez, Alessandro Laio, "Clustering by fast search and find
of APLoDs". Science 27 June 2014: Vol. 344 no. 6191 pp. 1492-1496
DOI: 10.1126/science.1242072
"""
def __init__(self, rho_cutoff=1.0, delta_cutoff=1.0, percent=0.2, n_neighbors=100, n_samples=1000,
metric='rmsd', algorithm='auto', sample_dist_metric=None, distances_=None, indices=None, parallel=True):
self.rho_cutoff = rho_cutoff
self.delta_cutoff = delta_cutoff
self.percent = percent
self.n_neighbors = n_neighbors
self.n_samples = n_samples
self.metric = metric
self.algorithm = algorithm
self.sample_dist_metric = sample_dist_metric
self.distances_ = distances_
self.indices = indices
self.parallel = parallel
def fit(self, X, weight=None):
"""Perform clustering.
Parameters
-----------
X : array-like, shape=[n_samples, n_features]
Samples to cluster.
"""
# X = check_array(X)
print( "Doing APLoD clustering..." )
t0 = time.time()
self.cluster_centers_, self.labels_ = \
aplod_clustering(X, weight=weight, rho_cutoff=self.rho_cutoff, delta_cutoff=self.delta_cutoff,
percent=self.percent, n_neighbors=self.n_neighbors,n_samples=self.n_samples,
metric=self.metric, algorithm=self.algorithm,sample_dist_metric=self.sample_dist_metric,
distances_=self.distances_, indices=self.indices, parallel=self.parallel)
t1 = time.time()
print( "APLoD Time Cost:", t1-t0 )
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : {array-like, sparse matrix}, shape=[n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, "cluster_centers_")
return pairwise_distances_argmin(X, self.cluster_centers_)
| apache-2.0 |
bgoodr/how-to | python/python_matplotlib/gantt_chart_basic.py | 1 | 3472 | # -*- mode: python; -*-
# Execute this script using Bash script of the same name but without a file
# extension. Use ../pdbwrapper/pdbwrapper instead of pdb for debugging.
"""
GANTT Chart with Matplotlib
Sukhbinder
Inspired from
http://www.clowersresearch.com/main/gantt-charts-in-matplotlib/
"""
import os
import sys
import argparse
import datetime as dt
import matplotlib.pyplot as plt
import matplotlib.font_manager as font_manager
import matplotlib.dates
from matplotlib.dates import WEEKLY, DateFormatter, rrulewrapper, RRuleLocator
import numpy as np
def _create_date(datetxt):
"""Creates the date"""
day, month, year = datetxt.split('-')
date = dt.datetime(int(year), int(month), int(day))
mdate = matplotlib.dates.date2num(date)
return mdate
def CreateGanttChart(fname):
"""Create gantt charts with matplotlib
Give file name.
"""
ylabels = []
customDates = []
textlist = open(fname).readlines()
for tx in textlist:
if not tx.startswith('#'):
ylabel, startdate, enddate = tx.split(',')
ylabels.append(ylabel.replace('\n', ''))
customDates.append([_create_date(startdate.replace('\n', '')), _create_date(enddate.replace('\n', ''))])
ilen = len(ylabels)
pos = np.arange(0.5, ilen * 0.5 + 0.5, 0.5)
task_dates = {}
for i, task in enumerate(ylabels):
task_dates[task] = customDates[i]
fig = plt.figure(figsize=(20, 8))
ax = fig.add_subplot(111)
for i in range(len(ylabels)):
start_date, end_date = task_dates[ylabels[i]]
ax.barh((i * 0.5) + 0.5, end_date - start_date, left=start_date, height=0.3, align='center', edgecolor='lightgreen', color='orange', alpha=0.8)
locsy, labelsy = plt.yticks(pos, ylabels)
plt.setp(labelsy, fontsize=14)
# ax.axis('tight')
ax.set_ylim(ymin=-0.1, ymax=ilen * 0.5 + 0.5)
ax.grid(color='g', linestyle=':')
ax.xaxis_date()
rule = rrulewrapper(WEEKLY, interval=1)
loc = RRuleLocator(rule)
# formatter = DateFormatter("%d-%b '%y")
formatter = DateFormatter("%d-%b")
ax.xaxis.set_major_locator(loc)
ax.xaxis.set_major_formatter(formatter)
labelsx = ax.get_xticklabels()
plt.setp(labelsx, rotation=30, fontsize=10)
font = font_manager.FontProperties(size='small')
ax.legend(loc=1, prop=font)
ax.invert_yaxis()
fig.autofmt_xdate()
plt.savefig(fname + '.svg')
plt.show()
def script_base():
'The basename of the script file for usage.'
return os.path.basename(os.path.splitext(sys.argv[0])[0])
description = r"""
{script} -- Basic Gantt Chart
Demonstrate the code at https://sukhbinder.wordpress.com/2016/05/10/quick-gantt-chart-with-matplotlib/
Example:
{script} -f projectChart.txt
""".format(script=script_base())
def main():
"""
Main function
"""
# Parse command-line arguments:
# https://docs.python.org/2/howto/argparse.html
parser = argparse.ArgumentParser(
prog=os.path.basename(os.path.splitext(sys.argv[0])[0]), # Avoid showing the .py file extension in the usage help
description=description,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-f', '--project_file', help='Path to project file.', required=True)
args = parser.parse_args(sys.argv[1:])
CreateGanttChart(args.project_file)
if __name__ == '__main__':
sys.exit(0 if main() else 1) # Return non-zero exit codes upon failure
| mit |
ninotoshi/tensorflow | tensorflow/contrib/learn/python/learn/estimators/rnn.py | 1 | 8531 | """Recurrent Neural Network estimators."""
# Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators.base import TensorFlowEstimator
from tensorflow.contrib.learn.python.learn import models
def null_input_op_fn(X):
"""This function does no transformation on the inputs, used as default"""
return X
class TensorFlowRNNClassifier(TensorFlowEstimator, _sklearn.ClassifierMixin):
"""TensorFlow RNN Classifier model.
Parameters:
rnn_size: The size for rnn cell, e.g. size of your word embeddings.
cell_type: The type of rnn cell, including rnn, gru, and lstm.
num_layers: The number of layers of the rnn model.
input_op_fn: Function that will transform the input tensor, such as
creating word embeddings, byte list, etc. This takes
an argument X for input and returns transformed X.
bidirectional: boolean, Whether this is a bidirectional rnn.
sequence_length: If sequence_length is provided, dynamic calculation is
performed. This saves computational time when unrolling past max sequence
length.
initial_state: An initial state for the RNN. This must be a tensor of
appropriate type and shape [batch_size x cell.state_size].
n_classes: Number of classes in the target.
batch_size: Mini batch size.
steps: Number of steps to run over data.
optimizer: Optimizer name (or class), for example "SGD", "Adam", "Adagrad".
learning_rate: If this is constant float value, no decay function is
used. Instead, a customized decay function can be passed that accepts
global_step as parameter and returns a Tensor.
e.g. exponential decay function:
def exp_decay(global_step):
return tf.train.exponential_decay(
learning_rate=0.1, global_step,
decay_steps=2, decay_rate=0.001)
class_weight: None or list of n_classes floats. Weight associated with
classes for loss computation. If not given, all classes are
supposed to have weight one.
continue_training: when continue_training is True, once initialized
model will be continuely trained on every call of fit.
config: RunConfig object that controls the configurations of the session,
e.g. num_cores, gpu_memory_fraction, etc.
"""
def __init__(self,
rnn_size,
n_classes,
cell_type='gru',
num_layers=1,
input_op_fn=null_input_op_fn,
initial_state=None,
bidirectional=False,
sequence_length=None,
batch_size=32,
steps=50,
optimizer='Adagrad',
learning_rate=0.1,
class_weight=None,
clip_gradients=5.0,
continue_training=False,
config=None,
verbose=1):
self.rnn_size = rnn_size
self.cell_type = cell_type
self.input_op_fn = input_op_fn
self.bidirectional = bidirectional
self.num_layers = num_layers
self.sequence_length = sequence_length
self.initial_state = initial_state
super(TensorFlowRNNClassifier, self).__init__(
model_fn=self._model_fn,
n_classes=n_classes,
batch_size=batch_size,
steps=steps,
optimizer=optimizer,
learning_rate=learning_rate,
class_weight=class_weight,
clip_gradients=clip_gradients,
continue_training=continue_training,
config=config,
verbose=verbose)
def _model_fn(self, X, y):
return models.get_rnn_model(self.rnn_size, self.cell_type, self.num_layers,
self.input_op_fn, self.bidirectional,
models.logistic_regression,
self.sequence_length, self.initial_state)(X, y)
@property
def bias_(self):
"""Returns bias of the rnn layer."""
return self.get_tensor_value('logistic_regression/bias')
@property
def weights_(self):
"""Returns weights of the rnn layer."""
return self.get_tensor_value('logistic_regression/weights')
class TensorFlowRNNRegressor(TensorFlowEstimator, _sklearn.RegressorMixin):
"""TensorFlow RNN Regressor model.
Parameters:
rnn_size: The size for rnn cell, e.g. size of your word embeddings.
cell_type: The type of rnn cell, including rnn, gru, and lstm.
num_layers: The number of layers of the rnn model.
input_op_fn: Function that will transform the input tensor, such as
creating word embeddings, byte list, etc. This takes
an argument X for input and returns transformed X.
bidirectional: boolean, Whether this is a bidirectional rnn.
sequence_length: If sequence_length is provided, dynamic calculation is
performed. This saves computational time when unrolling past max sequence
length.
initial_state: An initial state for the RNN. This must be a tensor of
appropriate type and shape [batch_size x cell.state_size].
batch_size: Mini batch size.
steps: Number of steps to run over data.
optimizer: Optimizer name (or class), for example "SGD", "Adam", "Adagrad".
learning_rate: If this is constant float value, no decay function is
used. Instead, a customized decay function can be passed that accepts
global_step as parameter and returns a Tensor.
e.g. exponential decay function:
def exp_decay(global_step):
return tf.train.exponential_decay(
learning_rate=0.1, global_step,
decay_steps=2, decay_rate=0.001)
continue_training: when continue_training is True, once initialized
model will be continuely trained on every call of fit.
config: RunConfig object that controls the configurations of the
session, e.g. num_cores, gpu_memory_fraction, etc.
verbose: Controls the verbosity, possible values:
0: the algorithm and debug information is muted.
1: trainer prints the progress.
2: log device placement is printed.
"""
def __init__(self,
rnn_size,
cell_type='gru',
num_layers=1,
input_op_fn=null_input_op_fn,
initial_state=None,
bidirectional=False,
sequence_length=None,
n_classes=0,
batch_size=32,
steps=50,
optimizer='Adagrad',
learning_rate=0.1,
clip_gradients=5.0,
continue_training=False,
config=None,
verbose=1):
self.rnn_size = rnn_size
self.cell_type = cell_type
self.input_op_fn = input_op_fn
self.bidirectional = bidirectional
self.num_layers = num_layers
self.sequence_length = sequence_length
self.initial_state = initial_state
super(TensorFlowRNNRegressor, self).__init__(
model_fn=self._model_fn,
n_classes=n_classes,
batch_size=batch_size,
steps=steps,
optimizer=optimizer,
learning_rate=learning_rate,
clip_gradients=clip_gradients,
continue_training=continue_training,
config=config,
verbose=verbose)
def _model_fn(self, X, y):
return models.get_rnn_model(self.rnn_size, self.cell_type, self.num_layers,
self.input_op_fn, self.bidirectional,
models.linear_regression, self.sequence_length,
self.initial_state)(X, y)
@property
def bias_(self):
"""Returns bias of the rnn layer."""
return self.get_tensor_value('linear_regression/bias')
@property
def weights_(self):
"""Returns weights of the rnn layer."""
return self.get_tensor_value('linear_regression/weights')
| apache-2.0 |
nrhine1/scikit-learn | sklearn/cluster/spectral.py | 233 | 18153 | # -*- coding: utf-8 -*-
"""Algorithms for spectral clustering"""
# Author: Gael Varoquaux gael.varoquaux@normalesup.org
# Brian Cheung
# Wei LI <kuantkid@gmail.com>
# License: BSD 3 clause
import warnings
import numpy as np
from ..base import BaseEstimator, ClusterMixin
from ..utils import check_random_state, as_float_array
from ..utils.validation import check_array
from ..utils.extmath import norm
from ..metrics.pairwise import pairwise_kernels
from ..neighbors import kneighbors_graph
from ..manifold import spectral_embedding
from .k_means_ import k_means
def discretize(vectors, copy=True, max_svd_restarts=30, n_iter_max=20,
random_state=None):
"""Search for a partition matrix (clustering) which is closest to the
eigenvector embedding.
Parameters
----------
vectors : array-like, shape: (n_samples, n_clusters)
The embedding space of the samples.
copy : boolean, optional, default: True
Whether to copy vectors, or perform in-place normalization.
max_svd_restarts : int, optional, default: 30
Maximum number of attempts to restart SVD if convergence fails
n_iter_max : int, optional, default: 30
Maximum number of iterations to attempt in rotation and partition
matrix search if machine precision convergence is not reached
random_state: int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization of the
of the rotation matrix
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
-----
The eigenvector embedding is used to iteratively search for the
closest discrete partition. First, the eigenvector embedding is
normalized to the space of partition matrices. An optimal discrete
partition matrix closest to this normalized embedding multiplied by
an initial rotation is calculated. Fixing this discrete partition
matrix, an optimal rotation matrix is calculated. These two
calculations are performed until convergence. The discrete partition
matrix is returned as the clustering solution. Used in spectral
clustering, this method tends to be faster and more robust to random
initialization than k-means.
"""
from scipy.sparse import csc_matrix
from scipy.linalg import LinAlgError
random_state = check_random_state(random_state)
vectors = as_float_array(vectors, copy=copy)
eps = np.finfo(float).eps
n_samples, n_components = vectors.shape
# Normalize the eigenvectors to an equal length of a vector of ones.
# Reorient the eigenvectors to point in the negative direction with respect
# to the first element. This may have to do with constraining the
# eigenvectors to lie in a specific quadrant to make the discretization
# search easier.
norm_ones = np.sqrt(n_samples)
for i in range(vectors.shape[1]):
vectors[:, i] = (vectors[:, i] / norm(vectors[:, i])) \
* norm_ones
if vectors[0, i] != 0:
vectors[:, i] = -1 * vectors[:, i] * np.sign(vectors[0, i])
# Normalize the rows of the eigenvectors. Samples should lie on the unit
# hypersphere centered at the origin. This transforms the samples in the
# embedding space to the space of partition matrices.
vectors = vectors / np.sqrt((vectors ** 2).sum(axis=1))[:, np.newaxis]
svd_restarts = 0
has_converged = False
# If there is an exception we try to randomize and rerun SVD again
# do this max_svd_restarts times.
while (svd_restarts < max_svd_restarts) and not has_converged:
# Initialize first column of rotation matrix with a row of the
# eigenvectors
rotation = np.zeros((n_components, n_components))
rotation[:, 0] = vectors[random_state.randint(n_samples), :].T
# To initialize the rest of the rotation matrix, find the rows
# of the eigenvectors that are as orthogonal to each other as
# possible
c = np.zeros(n_samples)
for j in range(1, n_components):
# Accumulate c to ensure row is as orthogonal as possible to
# previous picks as well as current one
c += np.abs(np.dot(vectors, rotation[:, j - 1]))
rotation[:, j] = vectors[c.argmin(), :].T
last_objective_value = 0.0
n_iter = 0
while not has_converged:
n_iter += 1
t_discrete = np.dot(vectors, rotation)
labels = t_discrete.argmax(axis=1)
vectors_discrete = csc_matrix(
(np.ones(len(labels)), (np.arange(0, n_samples), labels)),
shape=(n_samples, n_components))
t_svd = vectors_discrete.T * vectors
try:
U, S, Vh = np.linalg.svd(t_svd)
svd_restarts += 1
except LinAlgError:
print("SVD did not converge, randomizing and trying again")
break
ncut_value = 2.0 * (n_samples - S.sum())
if ((abs(ncut_value - last_objective_value) < eps) or
(n_iter > n_iter_max)):
has_converged = True
else:
# otherwise calculate rotation and continue
last_objective_value = ncut_value
rotation = np.dot(Vh.T, U.T)
if not has_converged:
raise LinAlgError('SVD did not converge')
return labels
def spectral_clustering(affinity, n_clusters=8, n_components=None,
eigen_solver=None, random_state=None, n_init=10,
eigen_tol=0.0, assign_labels='kmeans'):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
-----------
affinity : array-like or sparse matrix, shape: (n_samples, n_samples)
The affinity matrix describing the relationship of the samples to
embed. **Must be symmetric**.
Possible examples:
- adjacency matrix of a graph,
- heat kernel of the pairwise distance matrix of the samples,
- symmetric k-nearest neighbours connectivity matrix of the samples.
n_clusters : integer, optional
Number of clusters to extract.
n_components : integer, optional, default is n_clusters
Number of eigen vectors to use for the spectral embedding
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another
approach which is less sensitive to random initialization. See
the 'Multiclass spectral clustering' paper referenced below for
more details on the discretization approach.
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
------
The graph should contain only one connect component, elsewhere
the results make little sense.
This algorithm solves the normalized cut for k=2: it is a
normalized spectral clustering.
"""
if assign_labels not in ('kmeans', 'discretize'):
raise ValueError("The 'assign_labels' parameter should be "
"'kmeans' or 'discretize', but '%s' was given"
% assign_labels)
random_state = check_random_state(random_state)
n_components = n_clusters if n_components is None else n_components
maps = spectral_embedding(affinity, n_components=n_components,
eigen_solver=eigen_solver,
random_state=random_state,
eigen_tol=eigen_tol, drop_first=False)
if assign_labels == 'kmeans':
_, labels, _ = k_means(maps, n_clusters, random_state=random_state,
n_init=n_init)
else:
labels = discretize(maps, random_state=random_state)
return labels
class SpectralClustering(BaseEstimator, ClusterMixin):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
When calling ``fit``, an affinity matrix is constructed using either
kernel function such the Gaussian (aka RBF) kernel of the euclidean
distanced ``d(X, X)``::
np.exp(-gamma * d(X,X) ** 2)
or a k-nearest neighbors connectivity matrix.
Alternatively, using ``precomputed``, a user-provided affinity
matrix can be used.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
-----------
n_clusters : integer, optional
The dimension of the projection subspace.
affinity : string, array-like or callable, default 'rbf'
If a string, this may be one of 'nearest_neighbors', 'precomputed',
'rbf' or one of the kernels supported by
`sklearn.metrics.pairwise_kernels`.
Only kernels that produce similarity scores (non-negative values that
increase with similarity) should be used. This property is not checked
by the clustering algorithm.
gamma : float
Scaling factor of RBF, polynomial, exponential chi^2 and
sigmoid affinity kernel. Ignored for
``affinity='nearest_neighbors'``.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
n_neighbors : integer
Number of neighbors to use when constructing the affinity matrix using
the nearest neighbors method. Ignored for ``affinity='rbf'``.
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another approach
which is less sensitive to random initialization.
kernel_params : dictionary of string to any, optional
Parameters (keyword arguments) and values for kernel passed as
callable object. Ignored by other kernels.
Attributes
----------
affinity_matrix_ : array-like, shape (n_samples, n_samples)
Affinity matrix used for clustering. Available only if after calling
``fit``.
labels_ :
Labels of each point
Notes
-----
If you have an affinity matrix, such as a distance matrix,
for which 0 means identical elements, and high values means
very dissimilar elements, it can be transformed in a
similarity matrix that is well suited for the algorithm by
applying the Gaussian (RBF, heat) kernel::
np.exp(- X ** 2 / (2. * delta ** 2))
Another alternative is to take a symmetric version of the k
nearest neighbors connectivity matrix of the points.
If the pyamg package is installed, it is used: this greatly
speeds up computation.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
"""
def __init__(self, n_clusters=8, eigen_solver=None, random_state=None,
n_init=10, gamma=1., affinity='rbf', n_neighbors=10,
eigen_tol=0.0, assign_labels='kmeans', degree=3, coef0=1,
kernel_params=None):
self.n_clusters = n_clusters
self.eigen_solver = eigen_solver
self.random_state = random_state
self.n_init = n_init
self.gamma = gamma
self.affinity = affinity
self.n_neighbors = n_neighbors
self.eigen_tol = eigen_tol
self.assign_labels = assign_labels
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def fit(self, X, y=None):
"""Creates an affinity matrix for X using the selected affinity,
then applies spectral clustering to this affinity matrix.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
OR, if affinity==`precomputed`, a precomputed affinity
matrix of shape (n_samples, n_samples)
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=np.float64)
if X.shape[0] == X.shape[1] and self.affinity != "precomputed":
warnings.warn("The spectral clustering API has changed. ``fit``"
"now constructs an affinity matrix from data. To use"
" a custom affinity matrix, "
"set ``affinity=precomputed``.")
if self.affinity == 'nearest_neighbors':
connectivity = kneighbors_graph(X, n_neighbors=self.n_neighbors, include_self=True)
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
elif self.affinity == 'precomputed':
self.affinity_matrix_ = X
else:
params = self.kernel_params
if params is None:
params = {}
if not callable(self.affinity):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
self.affinity_matrix_ = pairwise_kernels(X, metric=self.affinity,
filter_params=True,
**params)
random_state = check_random_state(self.random_state)
self.labels_ = spectral_clustering(self.affinity_matrix_,
n_clusters=self.n_clusters,
eigen_solver=self.eigen_solver,
random_state=random_state,
n_init=self.n_init,
eigen_tol=self.eigen_tol,
assign_labels=self.assign_labels)
return self
@property
def _pairwise(self):
return self.affinity == "precomputed"
| bsd-3-clause |
reedessick/pointy-Poisson | concentrationMultiPopVectors.py | 1 | 3445 | #!/usr/bin/python
usage = "concentrationMultiPopVectors.py [--options] vectors.txt"
description = "a tool to make concentration diagrams of data stored in vectors.txt"
author = "reed.essick@ligo.org"
import numpy as np
import matplotlib
matplotlib.use("Agg")
from matplotlib import pyplot as plt
plt.rcParams['text.usetex'] = True
from optparse import OptionParser
#-------------------------------------------------
parser = OptionParser(usage=usage, description=description)
parser.add_option("-v", "--verbose", default=False, action="store_true")
parser.add_option('-g', '--giniThr', default=0.9, type='float', help='a threshold on the gini index. Vectors with larger gini indexes are reported')
parser.add_option('', '--nbins', default=20, type='int', help='the number of bins for the gini index histogram')
parser.add_option("-o", "--output-dir", default=".", type="string")
parser.add_option("-t", "--tag", default="", type="string")
opts, args = parser.parse_args()
if opts.tag:
opts.tag = "_%s"%(opts.tag)
if len(args)!=1:
raise ValueError("Please supply exactly one input argument\n%s"%(usage))
vectors = args[0]
#-------------------------------------------------
if opts.verbose:
print "reading vectors from : %s"%(vectors)
### extract headers
file_obj = open(vectors, "r")
chans = file_obj.readline().strip().split()[1:] ### skip first column because that is the filename
Nchans = len(chans)
if opts.verbose:
print " found %d channels"%(Nchans)
### load vectors
vects = []
names = []
for line in file_obj:
line = line.strip().split()
if len(line) != Nchans+1:
raise ValueError("inconsistent data format in : %s"%(vectors))
vects.append( [float(_) for _ in line[1:]] )
names.append( line[0] )
vects = np.array(vects)
if opts.verbose:
print " found %d samples"%(len(vects))
#-------------------------------------------------
### plot concentration diagrams and histogram of gini indexes
if opts.verbose:
print "plotting concentration diagrams"
fig = plt.figure(figsize=(10,6))
axC = plt.subplot(1,2,1)
axH = plt.subplot(1,2,2)
x = (np.arange(Nchans)+1.0)/Nchans
dx = x[1]-x[0]
gini = []
keepers = []
for name, vect in zip(names, vects):
if np.any(vect):
y = vect[:]
y.sort()
y = y[::-1]
y = np.cumsum( y**2 )
y /= y[-1]
else:
y = x
axC.plot( x, y, color='b', alpha=0.5 )
g = ( (np.sum( (y[1:]+y[:-1])*0.5 ) + 0.5*y[0]) * dx - 0.5 )*2
if g!=g: raise ValueError
gini.append( g ) ### approximate the integral with trapzoids and subtract off half for gini index
if g >= opts.giniThr:
keepers.append( name )
gini = np.array(gini)
bins = np.logspace( np.log10(1-max(gini)), 0, opts.nbins+1)
axH.hist( 1.0-gini, bins=bins )
### decorate
axC.set_xscale('log')
axC.set_xlabel('fraction of vector (len=%d)'%Nchans)
axC.set_ylabel('fraction of norm')
axH.set_xscale("log")
axH.set_xlabel('1 - (gini index)')
axH.set_ylabel('count')
axC.plot([0,1], [0,1], 'k--')
### save
figname = "%s/concentration%s.png"%(opts.output_dir, opts.tag)
if opts.verbose:
print figname
fig.savefig(figname)
plt.close(fig)
### write keepers to file
filename = "%s/concentration%s.out"%(opts.output_dir, opts.tag)
if opts.verbose:
print "writing large gini names to : %s"%filename
file_obj = open(filename, "w")
for name in sorted(keepers):
print >> file_obj, name
file_obj.close()
| mit |
TomAugspurger/dota | dota/tests/test_scripts.py | 1 | 9003 | # -*- coding: utf-8 -*-
import pathlib
import unittest
from unittest.mock import patch
from os.path import expanduser
try:
from io import StringIO
except ImportError:
from StringIO import StringIO
import requests
from pandas.util.testing import network
from numpy import nan
import pandas as pd
from pandas import Timestamp
import pandas.util.testing as tm
from dota.api import API, HistoryResponse, DetailsResponse
from dota.scripts import get_details_by_id
import dota.scripts.parsers as p
import dota.scripts.json2hdf5 as h5
class TestGetByID(unittest.TestCase):
def setUp(self):
# Make some fake cached games
pathlib.Path('details12345678.json').touch()
pathlib.Path('1234.json').touch()
def test_argparser(self):
args = ['1234']
args = get_details_by_id.parser.parse_args(args)
steam_id, key_path, data_dir = get_details_by_id.argparser(args)
self.assertEqual(steam_id, 1234)
self.assertEqual(key_path,
pathlib.Path(expanduser('~/Dropbox/bin/api-keys.txt')))
self.assertEqual(data_dir,
pathlib.Path(expanduser('~/sandbox/dota/data/')))
def test_get_details(self):
# make fake new game 12345
# TODO: mock out HR / DR construction as well?
hr = HistoryResponse({'status': 1,
'results_remaining': 0,
'num_results': 1,
'total_results': 1,
'matches': [{'match_id': 12345}]})
dr = DetailsResponse({'radiant_win': 1,
'match_id': 12345,
'players': [],
'negative_votes': 1,
'positive_votes': 1,
'lobby_type': 1,
'duration': 1,
'first_blood_time': 1,
'leagueid': 1,
'start_time': 1,
'dire_name': 1,
'radiant_name': 1,
'dire_team_id': 1,
'radiant_team_id': 1})
with patch.object(API, 'get_match_history', return_value=hr):
with patch.object(API, 'get_match_details', return_value=dr):
get_details_by_id.get_details(steam_id=1, key='fake',
data_dir=pathlib.Path('.'))
new = pathlib.Path('12345.json')
self.assertTrue(new.exists())
def tearDown(self):
pathlib.Path('details12345678.json').unlink()
pathlib.Path('1234.json').unlink()
try:
pathlib.Path('12345.json').unlink()
except FileNotFoundError:
pass
# class TestGetProMatches(unittest.TestCase):
# # def test_fetch_new_match_ids(match_ids_path):
# # # mocked
class TestParsers(unittest.TestCase):
# the tabbing and newlining is important
def setUp(self):
self.ex_hero = """"npc_dota_hero_antimage"
{
// General
//-------------------------------------------------------------------------------------------------------------
"HeroID" "1" // unique ID number for this hero. Do not change this once established or it will invalidate collected stats.
"Ability4" "antimage_mana_void" // Ability 4
"ItemSlots"
{
"0"
{
"SlotIndex" "0"
"SlotName" "weapon"
"SlotText" "#LoadoutSlot_Weapon"
"TextureWidth" "128"
"TextureHeight" "256"
"MaxPolygonsLOD0" "400"
"MaxPolygonsLOD1" "350"
}
"Bot"
{
"HeroType" "DOTA_BOT_HARD_CARRY"
"LaningInfo"
{
"SoloDesire" "1"
}
}
}
"""
self.ex_item = """\t"item_blink"
{
// General
//-------------------------------------------------------------------------------------------------------------
"ID" "1" // unique ID number for this item. Do not change this once established or it will invalidate collected stats.
"AbilityName" "item_blink"
"AbilitySpecial"
{
"01"
{
"var_type" "FIELD_INTEGER"
"blink_range" "1200"
}
}
}"""
self.ex_ability = """\t"antimage_blink"
{
// General
//-------------------------------------------------------------------------------------------------------------
"ID" "5004" // unique ID number for this ability. Do not change this once established or it will invalidate collected stats.
"AbilityName" "antimage_blink"
{
"01"
{
"var_type" "FIELD_INTEGER"
"blink_range" "1000 1075 1150 1150"
}
}
}"""
def test_parse_hero(self):
f = StringIO(self.ex_hero)
line = f.readline()
result = p.get_ability_block(f, line)
expected = [('name', 'npc_dota_hero_antimage'),
('HeroID', '1'),
('Ability4', 'antimage_mana_void'),
('SlotIndex', '0'),
('SlotName', 'weapon'),
('SlotText', '#LoadoutSlot_Weapon'),
('TextureWidth', '128'),
('TextureHeight', '256'),
('MaxPolygonsLOD0', '400'),
('MaxPolygonsLOD1', '350'),
('HeroType', 'DOTA_BOT_HARD_CARRY'),
('SoloDesire', '1')]
self.assertEqual(result, expected)
f.close()
def test_parse_item(self):
f = StringIO(self.ex_item)
line = f.readline()
result = p.get_item_block(f, line)
expected = [('name', 'blink'),
('ID', '1'),
('AbilityName', 'item_blink'),
('var_type', 'FIELD_INTEGER'),
('blink_range', '1200')]
self.assertEqual(result, expected)
f.close()
def test_parse_ability(self):
f = StringIO(self.ex_ability)
line = f.readline()
result = p.get_ability_block(f, line)
expected = [('name', 'antimage_blink'),
('ID', '5004'),
('AbilityName', 'antimage_blink'),
('var_type', 'FIELD_INTEGER'),
('blink_range', '1000 1075 1150 1150')]
self.assertEqual(result, expected)
f.close()
class TestHDF5(unittest.TestCase):
def setUp(self):
self.dr = DetailsResponse.from_json('details_response.json')
def test_add_by_side(self):
# just a smoke test
_data = {'duration': {0: 2857, 5: 2857},
'game_mod': {0: 1, 5: 1},
'team': {0: 1, 5: 0},
'item_2': {0: 168, 5: 79},
'hero_healing': {0: 0, 5: 1721},
'kills': {0: 4, 5: 7},
'dire_team_id': {0: None, 5: None},
'hero_damage': {0: 6141, 5: 9766},
'match_id': {0: 547519680, 5: 547519680},
'deaths': {0: 10, 5: 4},
'assists': {0: 5, 5: 14},
'radiant_team_id': {0: None, 5: None},
'gold_spent': {0: 12185, 5: 11910},
'tower_status': {0: nan, 5: nan},
'level': {0: 16, 5: 20},
'start_time': {0: Timestamp('2014-03-04 03:43:14'),
5: Timestamp('2014-03-04 03:43:14')},
'hero': {0: 62, 5: 102},
'player_slot': {0: 128, 5: 2},
'win': {0: False, 5: True},
'item_5': {0: 46, 5: 46},
'item_4': {0: 67, 5: 61},
'account_id': {0: 82787032.0, 5: nan},
'item_1': {0: 50, 5: 90},
'item_0': {0: 36, 5: 180},
'item_3': {0: 185, 5: 0},
'last_hits': {0: 56, 5: 67},
'denies': {0: 1, 5: 7},
'barracks_status': {0: nan, 5: nan},
'gold': {0: 16, 5: 3527}}
expected = pd.DataFrame(_data).sort_index(axis=1)
result = h5.format_df(self.dr).sort_index(axis=1).loc[[0, 5]]
tm.assert_frame_equal(result, expected)
| mit |
HengfengLi/algorithms-impl | 01.graham_scan/graham_scan.py | 1 | 5192 | # Re-write the C code from Computational Geometry in C - Section 3.5.
# max # of points
PMAX = 1000
P_origin = None
class Coord:
def __init__(self, x, y):
self.x = x
self.y = y
def __str__(self):
return "x:%.2f,y:%.2f" % (self.x, self.y)
def __repr__(self):
return self.__str__()
class tsPoint:
def __init__(self, id, coord):
self.id = id
self.coord = coord
self.flag_deleted = False
def __str__(self):
return "vnum:%d,coord:%s,deleted:%s" \
% (self.id, self.coord, self.flag_deleted)
def __repr__(self):
return self.__str__()
class tStackCell:
def __init__(self, p):
self.p = p
self.next = None
class tStack:
def __init__(self):
self.top = None
self.num = 0
def pop(self):
if self.num == 0: return None
temp = self.top
self.top = self.top.next
self.num -= 1
return temp.p
def push(self, p):
c = tStackCell(p)
c.next = self.top
self.top = c
self.num += 1
def values(self):
t = self.top
while t:
yield t.p
t = t.next
def __str__(self):
t = self.top
output = ""
while t:
output += "vnum=%d\tx=%.2f\ty=%.2f\n" \
% (t.p.id, t.p.coord.x, t.p.coord.y)
t = t.next
return output
def __repr__(self):
return self.__str__()
def area2(a, b, c):
return (b.x-a.x) * (c.y-a.y) - (c.x-a.x)*(b.y-a.y)
def compare(pi, pj):
a = area2(P_origin.coord, pi.coord, pj.coord)
if a > 0: return -1
if a < 0: return 1
# collinear with P[0]
x = abs(pi.coord.x - P_origin.coord.x) - abs(pj.coord.x - P_origin.coord.x)
y = abs(pi.coord.y - P_origin.coord.y) - abs(pj.coord.y - P_origin.coord.y)
if x < 0 or y < 0:
pi.flag_deleted = True
return -1
elif x > 0 or y > 0:
pj.flag_deleted = True
return 1
else:
# points are coincident
if pi.id > pj.id:
pj.flag_deleted = True
else:
pi.flag_deleted = True
return 0
def swap(P, i, j):
temp = P[i]
P[i] = P[j]
P[j] = temp
def find_lowest(P):
# get a copy
P = P[:]
# index of lowest
m = 0
for i in range(1,len(P)):
if (P[i].coord.y < P[m].coord.y or P[i].coord.y == P[m].coord.y) \
and (P[i].coord.x > P[m].coord.x):
m = i
swap(P, 0, m)
return P
def graham(P):
# initalize stack
stack = tStack()
stack.push(P[0])
stack.push(P[1])
assert(stack.num == 2)
# bottom two elements will never be removed
i = 2
while i < len(P):
# print "i", i
p1 = stack.top.next.p
p2 = stack.top.p
# print area2(p1.coord, p2.coord, P[i].coord)
if area2(p1.coord, p2.coord, P[i].coord) > 0:
stack.push(P[i])
i += 1
else:
# print "stack.pop"
stack.pop()
return stack
def graham_scan(coords):
global P_origin
# construct the list of points
P = []
for i in range(len(coords)):
x, y = coords[i]
P.append(tsPoint(i, Coord(x, y)))
# actual # of points
n = len(P)
# find the right-most of lowest point
P = find_lowest(P)
print "lowest:", P[0]
P_origin = P[0]
# sort other points angularly
new_P = [P[0]] + sorted(P[1:], cmp=compare)
# squash
new_P = filter(lambda p: p.flag_deleted != True ,new_P)
# print new_P
stack = graham(new_P)
return [(p.coord.x, p.coord.y) for p in stack.values()]
if __name__ == '__main__':
import matplotlib.pyplot as plt
coords = [(3,-2), (5,1), (7,4), (6,5), (4,2), (3,3), (3,5), (2,5), (0,5), \
(0,1), (-3,4), (-2,2), (0,0), (-3,2), (-5,2), (-5,1), (-5,-1), \
(1,-2), (-3,-2)]
assert(len(coords) == 19)
convex_hull_points = graham_scan(coords)
fig = plt.figure(figsize=(10, 8))
ax = fig.add_subplot(111)
ax.set_aspect('equal')
ax.grid(True, which='both')
# set the x-spine (see below for more info on `set_position`)
ax.spines['left'].set_position('zero')
# turn off the right spine/ticks
ax.spines['right'].set_color('none')
# set the y-spine
ax.spines['bottom'].set_position('zero')
# turn off the top spine/ticks
ax.spines['top'].set_color('none')
ax.xaxis.tick_bottom()
ax.yaxis.tick_left()
ax.set_xlim([-6,8])
ax.set_ylim([-3,6])
x1 = [p[0] for p in coords]
y1 = [p[1] for p in coords]
# "clip_on=False, zorder=100" makes the points are above axes and girds
ax.scatter(x1, y1, c='red', clip_on=False, zorder=100)
x2 = [p[0] for p in convex_hull_points + [convex_hull_points[0]]]
y2 = [p[1] for p in convex_hull_points + [convex_hull_points[0]]]
ax.plot(x2, y2, marker='o', clip_on=False, zorder=100)
plt.show() | mit |
heli522/scikit-learn | sklearn/ensemble/partial_dependence.py | 251 | 15097 | """Partial dependence plots for tree ensembles. """
# Authors: Peter Prettenhofer
# License: BSD 3 clause
from itertools import count
import numbers
import numpy as np
from scipy.stats.mstats import mquantiles
from ..utils.extmath import cartesian
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import map, range, zip
from ..utils import check_array
from ..tree._tree import DTYPE
from ._gradient_boosting import _partial_dependence_tree
from .gradient_boosting import BaseGradientBoosting
def _grid_from_X(X, percentiles=(0.05, 0.95), grid_resolution=100):
"""Generate a grid of points based on the ``percentiles of ``X``.
The grid is generated by placing ``grid_resolution`` equally
spaced points between the ``percentiles`` of each column
of ``X``.
Parameters
----------
X : ndarray
The data
percentiles : tuple of floats
The percentiles which are used to construct the extreme
values of the grid axes.
grid_resolution : int
The number of equally spaced points that are placed
on the grid.
Returns
-------
grid : ndarray
All data points on the grid; ``grid.shape[1] == X.shape[1]``
and ``grid.shape[0] == grid_resolution * X.shape[1]``.
axes : seq of ndarray
The axes with which the grid has been created.
"""
if len(percentiles) != 2:
raise ValueError('percentile must be tuple of len 2')
if not all(0. <= x <= 1. for x in percentiles):
raise ValueError('percentile values must be in [0, 1]')
axes = []
for col in range(X.shape[1]):
uniques = np.unique(X[:, col])
if uniques.shape[0] < grid_resolution:
# feature has low resolution use unique vals
axis = uniques
else:
emp_percentiles = mquantiles(X, prob=percentiles, axis=0)
# create axis based on percentiles and grid resolution
axis = np.linspace(emp_percentiles[0, col],
emp_percentiles[1, col],
num=grid_resolution, endpoint=True)
axes.append(axis)
return cartesian(axes), axes
def partial_dependence(gbrt, target_variables, grid=None, X=None,
percentiles=(0.05, 0.95), grid_resolution=100):
"""Partial dependence of ``target_variables``.
Partial dependence plots show the dependence between the joint values
of the ``target_variables`` and the function represented
by the ``gbrt``.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
target_variables : array-like, dtype=int
The target features for which the partial dependecy should be
computed (size should be smaller than 3 for visual renderings).
grid : array-like, shape=(n_points, len(target_variables))
The grid of ``target_variables`` values for which the
partial dependecy should be evaluated (either ``grid`` or ``X``
must be specified).
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained. It is used to generate
a ``grid`` for the ``target_variables``. The ``grid`` comprises
``grid_resolution`` equally spaced points between the two
``percentiles``.
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used create the extreme values
for the ``grid``. Only if ``X`` is not None.
grid_resolution : int, default=100
The number of equally spaced points on the ``grid``.
Returns
-------
pdp : array, shape=(n_classes, n_points)
The partial dependence function evaluated on the ``grid``.
For regression and binary classification ``n_classes==1``.
axes : seq of ndarray or None
The axes with which the grid has been created or None if
the grid has been given.
Examples
--------
>>> samples = [[0, 0, 2], [1, 0, 0]]
>>> labels = [0, 1]
>>> from sklearn.ensemble import GradientBoostingClassifier
>>> gb = GradientBoostingClassifier(random_state=0).fit(samples, labels)
>>> kwargs = dict(X=samples, percentiles=(0, 1), grid_resolution=2)
>>> partial_dependence(gb, [0], **kwargs) # doctest: +SKIP
(array([[-4.52..., 4.52...]]), [array([ 0., 1.])])
"""
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
if (grid is None and X is None) or (grid is not None and X is not None):
raise ValueError('Either grid or X must be specified')
target_variables = np.asarray(target_variables, dtype=np.int32,
order='C').ravel()
if any([not (0 <= fx < gbrt.n_features) for fx in target_variables]):
raise ValueError('target_variables must be in [0, %d]'
% (gbrt.n_features - 1))
if X is not None:
X = check_array(X, dtype=DTYPE, order='C')
grid, axes = _grid_from_X(X[:, target_variables], percentiles,
grid_resolution)
else:
assert grid is not None
# dont return axes if grid is given
axes = None
# grid must be 2d
if grid.ndim == 1:
grid = grid[:, np.newaxis]
if grid.ndim != 2:
raise ValueError('grid must be 2d but is %dd' % grid.ndim)
grid = np.asarray(grid, dtype=DTYPE, order='C')
assert grid.shape[1] == target_variables.shape[0]
n_trees_per_stage = gbrt.estimators_.shape[1]
n_estimators = gbrt.estimators_.shape[0]
pdp = np.zeros((n_trees_per_stage, grid.shape[0],), dtype=np.float64,
order='C')
for stage in range(n_estimators):
for k in range(n_trees_per_stage):
tree = gbrt.estimators_[stage, k].tree_
_partial_dependence_tree(tree, grid, target_variables,
gbrt.learning_rate, pdp[k])
return pdp, axes
def plot_partial_dependence(gbrt, X, features, feature_names=None,
label=None, n_cols=3, grid_resolution=100,
percentiles=(0.05, 0.95), n_jobs=1,
verbose=0, ax=None, line_kw=None,
contour_kw=None, **fig_kw):
"""Partial dependence plots for ``features``.
The ``len(features)`` plots are arranged in a grid with ``n_cols``
columns. Two-way partial dependence plots are plotted as contour
plots.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained.
features : seq of tuples or ints
If seq[i] is an int or a tuple with one int value, a one-way
PDP is created; if seq[i] is a tuple of two ints, a two-way
PDP is created.
feature_names : seq of str
Name of each feature; feature_names[i] holds
the name of the feature with index i.
label : object
The class label for which the PDPs should be computed.
Only if gbrt is a multi-class model. Must be in ``gbrt.classes_``.
n_cols : int
The number of columns in the grid plot (default: 3).
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used to create the extreme values
for the PDP axes.
grid_resolution : int, default=100
The number of equally spaced points on the axes.
n_jobs : int
The number of CPUs to use to compute the PDs. -1 means 'all CPUs'.
Defaults to 1.
verbose : int
Verbose output during PD computations. Defaults to 0.
ax : Matplotlib axis object, default None
An axis object onto which the plots will be drawn.
line_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For one-way partial dependence plots.
contour_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For two-way partial dependence plots.
fig_kw : dict
Dict with keywords passed to the figure() call.
Note that all keywords not recognized above will be automatically
included here.
Returns
-------
fig : figure
The Matplotlib Figure object.
axs : seq of Axis objects
A seq of Axis objects, one for each subplot.
Examples
--------
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.ensemble import GradientBoostingRegressor
>>> X, y = make_friedman1()
>>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)
>>> fig, axs = plot_partial_dependence(clf, X, [0, (0, 1)]) #doctest: +SKIP
...
"""
import matplotlib.pyplot as plt
from matplotlib import transforms
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import ScalarFormatter
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
# set label_idx for multi-class GBRT
if hasattr(gbrt, 'classes_') and np.size(gbrt.classes_) > 2:
if label is None:
raise ValueError('label is not given for multi-class PDP')
label_idx = np.searchsorted(gbrt.classes_, label)
if gbrt.classes_[label_idx] != label:
raise ValueError('label %s not in ``gbrt.classes_``' % str(label))
else:
# regression and binary classification
label_idx = 0
X = check_array(X, dtype=DTYPE, order='C')
if gbrt.n_features != X.shape[1]:
raise ValueError('X.shape[1] does not match gbrt.n_features')
if line_kw is None:
line_kw = {'color': 'green'}
if contour_kw is None:
contour_kw = {}
# convert feature_names to list
if feature_names is None:
# if not feature_names use fx indices as name
feature_names = [str(i) for i in range(gbrt.n_features)]
elif isinstance(feature_names, np.ndarray):
feature_names = feature_names.tolist()
def convert_feature(fx):
if isinstance(fx, six.string_types):
try:
fx = feature_names.index(fx)
except ValueError:
raise ValueError('Feature %s not in feature_names' % fx)
return fx
# convert features into a seq of int tuples
tmp_features = []
for fxs in features:
if isinstance(fxs, (numbers.Integral,) + six.string_types):
fxs = (fxs,)
try:
fxs = np.array([convert_feature(fx) for fx in fxs], dtype=np.int32)
except TypeError:
raise ValueError('features must be either int, str, or tuple '
'of int/str')
if not (1 <= np.size(fxs) <= 2):
raise ValueError('target features must be either one or two')
tmp_features.append(fxs)
features = tmp_features
names = []
try:
for fxs in features:
l = []
# explicit loop so "i" is bound for exception below
for i in fxs:
l.append(feature_names[i])
names.append(l)
except IndexError:
raise ValueError('features[i] must be in [0, n_features) '
'but was %d' % i)
# compute PD functions
pd_result = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(partial_dependence)(gbrt, fxs, X=X,
grid_resolution=grid_resolution,
percentiles=percentiles)
for fxs in features)
# get global min and max values of PD grouped by plot type
pdp_lim = {}
for pdp, axes in pd_result:
min_pd, max_pd = pdp[label_idx].min(), pdp[label_idx].max()
n_fx = len(axes)
old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd))
min_pd = min(min_pd, old_min_pd)
max_pd = max(max_pd, old_max_pd)
pdp_lim[n_fx] = (min_pd, max_pd)
# create contour levels for two-way plots
if 2 in pdp_lim:
Z_level = np.linspace(*pdp_lim[2], num=8)
if ax is None:
fig = plt.figure(**fig_kw)
else:
fig = ax.get_figure()
fig.clear()
n_cols = min(n_cols, len(features))
n_rows = int(np.ceil(len(features) / float(n_cols)))
axs = []
for i, fx, name, (pdp, axes) in zip(count(), features, names,
pd_result):
ax = fig.add_subplot(n_rows, n_cols, i + 1)
if len(axes) == 1:
ax.plot(axes[0], pdp[label_idx].ravel(), **line_kw)
else:
# make contour plot
assert len(axes) == 2
XX, YY = np.meshgrid(axes[0], axes[1])
Z = pdp[label_idx].reshape(list(map(np.size, axes))).T
CS = ax.contour(XX, YY, Z, levels=Z_level, linewidths=0.5,
colors='k')
ax.contourf(XX, YY, Z, levels=Z_level, vmax=Z_level[-1],
vmin=Z_level[0], alpha=0.75, **contour_kw)
ax.clabel(CS, fmt='%2.2f', colors='k', fontsize=10, inline=True)
# plot data deciles + axes labels
deciles = mquantiles(X[:, fx[0]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transData,
ax.transAxes)
ylim = ax.get_ylim()
ax.vlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_xlabel(name[0])
ax.set_ylim(ylim)
# prevent x-axis ticks from overlapping
ax.xaxis.set_major_locator(MaxNLocator(nbins=6, prune='lower'))
tick_formatter = ScalarFormatter()
tick_formatter.set_powerlimits((-3, 4))
ax.xaxis.set_major_formatter(tick_formatter)
if len(axes) > 1:
# two-way PDP - y-axis deciles + labels
deciles = mquantiles(X[:, fx[1]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transAxes,
ax.transData)
xlim = ax.get_xlim()
ax.hlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_ylabel(name[1])
# hline erases xlim
ax.set_xlim(xlim)
else:
ax.set_ylabel('Partial dependence')
if len(axes) == 1:
ax.set_ylim(pdp_lim[1])
axs.append(ax)
fig.subplots_adjust(bottom=0.15, top=0.7, left=0.1, right=0.95, wspace=0.4,
hspace=0.3)
return fig, axs
| bsd-3-clause |
cinai/identification-algorithms | algoritmo_3/Post Presentation/borrador.py | 1 | 8741 | import sys
sys.path.append("..")
import numpy as np
import pandas as pd
from geopy.distance import vincenty
from scipy import stats
import scipy.integrate as integrate
from feature_tools import *
from scipy.cluster.hierarchy import linkage, fcluster
# Auxiliar Functions
def split_sequence_by_weekdays(df_sequence):
weekday = df_sequence.query('weekday != 5 and weekday != 6')
weekend = df_sequence.query('weekday == 5 or weekday == 6')
return [weekday.reset_index(drop=True), weekend.reset_index(drop=True)]
# Funcion que busca una locacion en el arreglo mls y retorna el indice
# buscar_locacion: [string] string -> int
def buscar_locacion(mls,location):
try:
index_location = mls.index(location)
except ValueError:
index_location = -1
return index_location
### The big function
#TODO: check if tiemposubida hay nan! para despues checkear timediff
def get_features(df_sequence):
# variables auxiliares
par_subida = ""; par_bajada = ""
last_stop = ""; last_lat = "";last_long = ""; last_trip_stop = "";
last_trip = -1; last_week_day = -1;
card_type = -1
n_days = -1 # -1 para acceder al arreglo cuando es 1(0)
traveled_days_bs = 0
traveled_days_in_between = 0
shortest_activities = []; longest_activities = []
locations = []; location_set = []; origin_location_set = []
last_origins = []; first_origins = []; last_origin = None
start_times = []; end_times = []; an_end_time = None
pi_locations = []
n_trips_per_day = []
n_trips = 0
rm = np.array([0,0])
distance = 0; max_distance = 0; min_distance = 10.0**8; a_trip_distance = 0
step_counter = 0
exclusive_bus_days = 0.0
bus_exclusive = False
exclusive_metro_days = 0.0
metro_exclusive = False
bus_counter = 0.0
# iteracion sobre las etapas
# TODO: check if it can be somehow faster than this
for index,stage in df_sequence.iterrows():
# Card Type Feature
if card_type == -1 and pd.notnull(stage.adulto):
card_type = stage.adulto
weekday = stage.tiempo_subida.weekday()
if weekday != last_week_day :
if last_week_day != -1:
# last origin feature
if last_origin:
last_origins.append(last_origin)
last_origin = None
if an_end_time:
end_times.append(int(auxiliar_functions.hour_to_seconds(an_end_time)))
an_end_time = None
#que oasa si no hay tiempo???
start_times.append(int(auxiliar_functions.hour_to_seconds(stage.tiempo_subida)))
par_subida = stage.par_subida
par_bajada = stage.par_bajada
#Si es un nuevo viaje debo checkear si hay nuevos indicadores
#Max Distance and Min Distance Feature
if stage.netapa == 1:
if a_trip_distance > max_distance:
max_distance = a_trip_distance
if a_trip_distance < min_distance and a_trip_distance != 0:
min_distance = a_trip_distance
a_trip_distance = 0
last_trip_stop = ""
last_origin = par_subida
an_end_time = stage.tiempo_subida
#Si existe par subida puedo fijarlo como paradero de origen
#A veces existe paradero pero no la latitud
if par_subida == par_subida:
if stage.weekday != last_week_day:
# first origin feature
first_origins.append(par_subida)
location_index = buscar_locacion(location_set,stage.par_subida)
if location_index > -1:
pi_locations[location_index] += 1
else:
if stage.netapa == 1:
origin_location_set.append(par_subida)
location_set.append(stage.par_subida)
pi_locations.append(1)
# radius of gyration
if stage.lat_subida == stage.lat_subida and stage.long_subida == stage.long_subida:
locations.append(np.array([stage.lat_subida,stage.long_subida]))
rm = rm + np.array([stage.lat_subida,stage.long_subida])
#others
origin_stop = par_subida
origin_lat = stage.lat_subida
origin_long = stage.long_subida
#distancia total y distancia parcial
if last_stop != "":
distance += vincenty((last_lat,last_long),(origin_lat,origin_long)).meters
if last_trip_stop != "":
a_trip_distance += vincenty((last_lat,last_long),(origin_lat,origin_long)).meters
if par_bajada != par_bajada or stage.lat_bajada != stage.lat_bajada or stage.long_bajada != stage.long_bajada:
last_stop = origin_stop
last_lat = origin_lat
last_long = origin_long
last_trip_stop = origin_stop
else :
destinatination_stop = par_bajada
destination_lat = stage.lat_bajada
destination_long = stage.long_bajada
distance += vincenty((origin_lat,origin_long),(destination_lat,destination_long)).meters
a_trip_distance += vincenty((origin_lat,origin_long),(destination_lat,destination_long)).meters
last_stop = destinatination_stop
last_trip_stop = destinatination_stop
last_lat = destination_lat
last_long = destination_long
#mean shortest activity
if weekday != last_week_day :
if last_week_day != -1:
diff = abs(weekday - last_week_day)%6
if diff > 1:
traveled_days_bs = traveled_days_in_between
traveled_days_in_between = 0
traveled_days_in_between += 1
n_trips_per_day.append(n_trips)
while(diff > 1):
n_trips_per_day.append(0)
diff -= 1
if bus_exclusive:
exclusive_bus_days += 1
if metro_exclusive:
exclusive_metro_days += 1
shortest_activities.append(np.nan)
longest_activities.append(np.nan)
bus_exclusive = True
metro_exclusive = True
last_trip = -1
n_days += 1
n_trips = 0
last_week_day = weekday
if stage.tipo_transporte == "METRO":
bus_exclusive = False
if stage.tipo_transporte != "METRO":
metro_exclusive = False
bus_counter += 1
if stage.netapa == 1 :
n_trips += 1
last_trip = stage.nviaje
if stage.nviaje != 1:
#msal: mean shortest activity length
#mlal: mean longest activity length
time_diff = auxiliar_functions.td_to_minutes(stage.diferencia_tiempo)
#la diferencia de tiempo entre el ultimo viaje y
#el nuevo viaje TODO:Fix#sobre estimado ya que considera el tiempo del ultimo viaje
if shortest_activities[n_days] != shortest_activities[n_days]:
shortest_activities[n_days] = time_diff
longest_activities[n_days] = time_diff
elif shortest_activities[n_days] > time_diff :
shortest_activities[n_days] = time_diff
if longest_activities[n_days] < time_diff:
longest_activities[n_days] = time_diff
step_counter += 1
traveled_days = n_days + 1 #n_days parte de 0
# capturar datos sobre el ultimo viaje
if a_trip_distance > max_distance:
max_distance = a_trip_distance
if a_trip_distance < min_distance and a_trip_distance != 0:
min_distance = a_trip_distance
if last_origin:
last_origins.append(last_origin)
if bus_exclusive:
exclusive_bus_days += 1
if metro_exclusive:
exclusive_metro_days += 1
p100_exclusive_bus_days = exclusive_bus_days/traveled_days*100
p100_exclusive_metro_days = exclusive_metro_days/traveled_days*100
P100_bus_trips = bus_counter/step_counter*100
# percentage different last origins
last_origin_set = set(last_origins)
if len(last_origins) > 0:
p100_diff_last_origin = len(last_origin_set)*1.0/len(last_origins)*100
else:
p100_diff_last_origin = 0.0
first_origin_set = set(first_origins)
if len(first_origins) > 0:
p100_diff_first_origin = len(first_origin_set)*1.0/len(first_origins)*100
else:
p100_diff_first_origin = 0.0
# start times
start_time = int(np.mean(start_times))
if an_end_time:
end_times.append(int(auxiliar_functions.hour_to_seconds(an_end_time)))
end_time = int(np.mean(end_times))
traveled_days_bs = traveled_days_in_between if traveled_days_bs == 0 else traveled_days_bs
#obtener probabilidad de estar en cada locacion
pi_suma = sum(pi_locations)
pi_locations[:] = [x*1.0/pi_suma for x in pi_locations]
unc_entropy = 0.0
for pi in pi_locations:
unc_entropy = unc_entropy + pi*np.log2(pi)
unc_entropy = -unc_entropy
if len(origin_location_set) > 0:
random_entropy = np.log2(len(origin_location_set))
else:
random_entropy = 0.0
#calcular rg
rm = rm/len(locations)
rg_suma = 0.0
#orden n :c
for r in locations:
rg_distance = vincenty((r[0],r[1]),(rm[0],rm[1])).meters
rg_suma = rg_suma + rg_distance**2
if len(locations) > 0 :
rg = (rg_suma/len(locations))**0.5
else:
rg = 0.0
msal = np.nanmean(shortest_activities)
mlal = np.nanmean(longest_activities)
kmDistance = distance/1000
kmMaxDist = max_distance/1000
kmMinDist = min_distance/1000
card_type = int(card_type)
n_trips_per_day.append(n_trips) # se entrega???
frequence_regularity = stats.mode(n_trips_per_day).count[0]
return [msal,mlal,kmDistance,kmMaxDist,kmMinDist,rg,unc_entropy, \
random_entropy,p100_diff_last_origin,p100_diff_first_origin,card_type,\
start_time,end_time,traveled_days,traveled_days_bs,frequence_regularity,\
p100_exclusive_bus_days,p100_exclusive_metro_days,P100_bus_trips] | mit |
aldian/tensorflow | tensorflow/examples/tutorials/word2vec/word2vec_basic.py | 6 | 10430 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Basic word2vec example."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import os
import random
from tempfile import gettempdir
import zipfile
import numpy as np
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
# Step 1: Download the data.
url = 'http://mattmahoney.net/dc/'
# pylint: disable=redefined-outer-name
def maybe_download(filename, expected_bytes):
"""Download a file if not present, and make sure it's the right size."""
local_filename = os.path.join(gettempdir(), filename)
if not os.path.exists(local_filename):
local_filename, _ = urllib.request.urlretrieve(url + filename,
local_filename)
statinfo = os.stat(local_filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
print(statinfo.st_size)
raise Exception('Failed to verify ' + local_filename +
'. Can you get to it with a browser?')
return local_filename
filename = maybe_download('text8.zip', 31344016)
# Read the data into a list of strings.
def read_data(filename):
"""Extract the first file enclosed in a zip file as a list of words."""
with zipfile.ZipFile(filename) as f:
data = tf.compat.as_str(f.read(f.namelist()[0])).split()
return data
vocabulary = read_data(filename)
print('Data size', len(vocabulary))
# Step 2: Build the dictionary and replace rare words with UNK token.
vocabulary_size = 50000
def build_dataset(words, n_words):
"""Process raw inputs into a dataset."""
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(n_words - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
index = dictionary.get(word, 0)
if index == 0: # dictionary['UNK']
unk_count += 1
data.append(index)
count[0][1] = unk_count
reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reversed_dictionary
# Filling 4 global variables:
# data - list of codes (integers from 0 to vocabulary_size-1).
# This is the original text but words are replaced by their codes
# count - map of words(strings) to count of occurrences
# dictionary - map of words(strings) to their codes(integers)
# reverse_dictionary - maps codes(integers) to words(strings)
data, count, dictionary, reverse_dictionary = build_dataset(vocabulary,
vocabulary_size)
del vocabulary # Hint to reduce memory.
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]])
data_index = 0
# Step 3: Function to generate a training batch for the skip-gram model.
def generate_batch(batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
buffer = collections.deque(maxlen=span)
if data_index + span > len(data):
data_index = 0
buffer.extend(data[data_index:data_index + span])
data_index += span
for i in range(batch_size // num_skips):
context_words = [w for w in range(span) if w != skip_window]
words_to_use = random.sample(context_words, num_skips)
for j, context_word in enumerate(words_to_use):
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[context_word]
if data_index == len(data):
buffer.extend(data[0:span])
data_index = span
else:
buffer.append(data[data_index])
data_index += 1
# Backtrack a little bit to avoid skipping words in the end of a batch
data_index = (data_index + len(data) - span) % len(data)
return batch, labels
batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1)
for i in range(8):
print(batch[i], reverse_dictionary[batch[i]],
'->', labels[i, 0], reverse_dictionary[labels[i, 0]])
# Step 4: Build and train a skip-gram model.
batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
num_sampled = 64 # Number of negative examples to sample.
# We pick a random validation set to sample nearest neighbors. Here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent. These 3 variables are used only for
# displaying model accuracy, they don't affect calculation.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
graph = tf.Graph()
with graph.as_default():
# Input data.
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Ops and variables pinned to the CPU because of missing GPU implementation
with tf.device('/cpu:0'):
# Look up embeddings for inputs.
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Construct the variables for the NCE loss
nce_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels each
# time we evaluate the loss.
# Explanation of the meaning of NCE loss:
# http://mccormickml.com/2016/04/19/word2vec-tutorial-the-skip-gram-model/
loss = tf.reduce_mean(
tf.nn.nce_loss(weights=nce_weights,
biases=nce_biases,
labels=train_labels,
inputs=embed,
num_sampled=num_sampled,
num_classes=vocabulary_size))
# Construct the SGD optimizer using a learning rate of 1.0.
optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
# Add variable initializer.
init = tf.global_variables_initializer()
# Step 5: Begin training.
num_steps = 100001
with tf.Session(graph=graph) as session:
# We must initialize all variables before we use them.
init.run()
print('Initialized')
average_loss = 0
for step in xrange(num_steps):
batch_inputs, batch_labels = generate_batch(
batch_size, num_skips, skip_window)
feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}
# We perform one update step by evaluating the optimizer op (including it
# in the list of returned values for session.run()
_, loss_val = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += loss_val
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print('Average loss at step ', step, ': ', average_loss)
average_loss = 0
# Note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 10000 == 0:
sim = similarity.eval()
for i in xrange(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k + 1]
log_str = 'Nearest to %s:' % valid_word
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = '%s %s,' % (log_str, close_word)
print(log_str)
final_embeddings = normalized_embeddings.eval()
# Step 6: Visualize the embeddings.
# pylint: disable=missing-docstring
# Function to draw visualization of distance between embeddings.
def plot_with_labels(low_dim_embs, labels, filename):
assert low_dim_embs.shape[0] >= len(labels), 'More labels than embeddings'
plt.figure(figsize=(18, 18)) # in inches
for i, label in enumerate(labels):
x, y = low_dim_embs[i, :]
plt.scatter(x, y)
plt.annotate(label,
xy=(x, y),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.savefig(filename)
try:
# pylint: disable=g-import-not-at-top
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000, method='exact')
plot_only = 500
low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only, :])
labels = [reverse_dictionary[i] for i in xrange(plot_only)]
plot_with_labels(low_dim_embs, labels, os.path.join(gettempdir(), 'tsne.png'))
except ImportError as ex:
print('Please install sklearn, matplotlib, and scipy to show embeddings.')
print(ex)
| apache-2.0 |
xyguo/scikit-learn | sklearn/decomposition/tests/test_kernel_pca.py | 74 | 8472 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import (assert_array_almost_equal, assert_less,
assert_equal, assert_not_equal,
assert_raises)
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.metrics.pairwise import rbf_kernel
def test_kernel_pca():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
def histogram(x, y, **kwargs):
# Histogram kernel implemented as a callable.
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
for eigen_solver in ("auto", "dense", "arpack"):
for kernel in ("linear", "rbf", "poly", histogram):
# histogram kernel produces singular matrix inside linalg.solve
# XXX use a least-squares approximation?
inv = not callable(kernel)
# transform fit data
kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver,
fit_inverse_transform=inv)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(np.abs(X_fit_transformed),
np.abs(X_fit_transformed2))
# non-regression test: previously, gamma would be 0 by default,
# forcing all eigenvalues to 0 under the poly kernel
assert_not_equal(X_fit_transformed.size, 0)
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert_equal(X_pred_transformed.shape[1],
X_fit_transformed.shape[1])
# inverse transform
if inv:
X_pred2 = kpca.inverse_transform(X_pred_transformed)
assert_equal(X_pred2.shape, X_pred.shape)
def test_kernel_pca_invalid_parameters():
assert_raises(ValueError, KernelPCA, 10, fit_inverse_transform=True,
kernel='precomputed')
def test_kernel_pca_consistent_transform():
# X_fit_ needs to retain the old, unmodified copy of X
state = np.random.RandomState(0)
X = state.rand(10, 10)
kpca = KernelPCA(random_state=state).fit(X)
transformed1 = kpca.transform(X)
X_copy = X.copy()
X[:, 0] = 666
transformed2 = kpca.transform(X_copy)
assert_array_almost_equal(transformed1, transformed2)
def test_kernel_pca_sparse():
rng = np.random.RandomState(0)
X_fit = sp.csr_matrix(rng.random_sample((5, 4)))
X_pred = sp.csr_matrix(rng.random_sample((2, 4)))
for eigen_solver in ("auto", "arpack"):
for kernel in ("linear", "rbf", "poly"):
# transform fit data
kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver,
fit_inverse_transform=False)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(np.abs(X_fit_transformed),
np.abs(X_fit_transformed2))
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert_equal(X_pred_transformed.shape[1],
X_fit_transformed.shape[1])
# inverse transform
# X_pred2 = kpca.inverse_transform(X_pred_transformed)
# assert_equal(X_pred2.shape, X_pred.shape)
def test_kernel_pca_linear_kernel():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
# for a linear kernel, kernel PCA should find the same projection as PCA
# modulo the sign (direction)
# fit only the first four components: fifth is near zero eigenvalue, so
# can be trimmed due to roundoff error
assert_array_almost_equal(
np.abs(KernelPCA(4).fit(X_fit).transform(X_pred)),
np.abs(PCA(4).fit(X_fit).transform(X_pred)))
def test_kernel_pca_n_components():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("dense", "arpack"):
for c in [1, 2, 4]:
kpca = KernelPCA(n_components=c, eigen_solver=eigen_solver)
shape = kpca.fit(X_fit).transform(X_pred).shape
assert_equal(shape, (2, c))
def test_remove_zero_eig():
X = np.array([[1 - 1e-30, 1], [1, 1], [1, 1 - 1e-20]])
# n_components=None (default) => remove_zero_eig is True
kpca = KernelPCA()
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 0))
kpca = KernelPCA(n_components=2)
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 2))
kpca = KernelPCA(n_components=2, remove_zero_eig=True)
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 0))
def test_kernel_pca_precomputed():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("dense", "arpack"):
X_kpca = KernelPCA(4, eigen_solver=eigen_solver).\
fit(X_fit).transform(X_pred)
X_kpca2 = KernelPCA(
4, eigen_solver=eigen_solver, kernel='precomputed').fit(
np.dot(X_fit, X_fit.T)).transform(np.dot(X_pred, X_fit.T))
X_kpca_train = KernelPCA(
4, eigen_solver=eigen_solver,
kernel='precomputed').fit_transform(np.dot(X_fit, X_fit.T))
X_kpca_train2 = KernelPCA(
4, eigen_solver=eigen_solver, kernel='precomputed').fit(
np.dot(X_fit, X_fit.T)).transform(np.dot(X_fit, X_fit.T))
assert_array_almost_equal(np.abs(X_kpca),
np.abs(X_kpca2))
assert_array_almost_equal(np.abs(X_kpca_train),
np.abs(X_kpca_train2))
def test_kernel_pca_invalid_kernel():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((2, 4))
kpca = KernelPCA(kernel="tototiti")
assert_raises(ValueError, kpca.fit, X_fit)
def test_gridsearch_pipeline():
# Test if we can do a grid-search to find parameters to separate
# circles with a perceptron model.
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
kpca = KernelPCA(kernel="rbf", n_components=2)
pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron())])
param_grid = dict(kernel_pca__gamma=2. ** np.arange(-2, 2))
grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
grid_search.fit(X, y)
assert_equal(grid_search.best_score_, 1)
def test_gridsearch_pipeline_precomputed():
# Test if we can do a grid-search to find parameters to separate
# circles with a perceptron model using a precomputed kernel.
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
kpca = KernelPCA(kernel="precomputed", n_components=2)
pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron())])
param_grid = dict(Perceptron__n_iter=np.arange(1, 5))
grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
X_kernel = rbf_kernel(X, gamma=2.)
grid_search.fit(X_kernel, y)
assert_equal(grid_search.best_score_, 1)
def test_nested_circles():
# Test the linear separability of the first 2D KPCA transform
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
# 2D nested circles are not linearly separable
train_score = Perceptron().fit(X, y).score(X, y)
assert_less(train_score, 0.8)
# Project the circles data into the first 2 components of a RBF Kernel
# PCA model.
# Note that the gamma value is data dependent. If this test breaks
# and the gamma value has to be updated, the Kernel PCA example will
# have to be updated too.
kpca = KernelPCA(kernel="rbf", n_components=2,
fit_inverse_transform=True, gamma=2.)
X_kpca = kpca.fit_transform(X)
# The data is perfectly linearly separable in that space
train_score = Perceptron().fit(X_kpca, y).score(X_kpca, y)
assert_equal(train_score, 1.0)
| bsd-3-clause |
rwhitt2049/nimble | tests/test_events.py | 1 | 13507 | import numpy as np
import numpy.testing as npt
import pandas as pd
from unittest import TestCase, main
from nimble import Events
class EvTestCase(TestCase):
@staticmethod
def assertStartStops(events, vstarts, vstops):
npt.assert_array_equal(events._starts, vstarts)
npt.assert_array_equal(events._stops, vstops)
class TestDebouncing(EvTestCase):
def setUp(self):
condarr = np.array([0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1])
self.cond = condarr > 0
def test_adeb(self):
vstarts = np.array([2, 7])
vstops = np.array([4, 10])
events = Events(self.cond, period=1, adeb=2).find()
self.assertStartStops(events, vstarts, vstops)
def test_ddeb(self):
vstarts = np.array([2, 7])
vstops = np.array([4, 12])
events = Events(self.cond, period=1, ddeb=2).find()
self.assertStartStops(events, vstarts, vstops)
def test_adeb_ddeb(self):
vstarts = np.array([2])
vstops = np.array([12])
events = Events(self.cond, period=1, adeb=2, ddeb=3.1).find()
self.assertStartStops(events, vstarts, vstops)
def test_nonint_deb(self):
vstarts = np.array([2, 7, 11])
vstops = np.array([4, 10, 12])
events = Events(self.cond, period=1, adeb=float(0.00000001),
ddeb=float(0.99999999)).find()
self.assertStartStops(events, vstarts, vstops)
def test_period_100ms(self):
vstarts = np.array([2, 7])
vstops = np.array([4, 12])
events = Events(self.cond, period=0.1, adeb=0.15, ddeb=0.2).find()
self.assertStartStops(events, vstarts, vstops)
def test_period_120ms(self):
vstarts = np.array([2, 7])
vstops = np.array([4, 12])
events = Events(self.cond, period=0.12, adeb=0.15, ddeb=0.2).find()
self.assertStartStops(events, vstarts, vstops)
def test_no_events_found(self):
vstarts = np.array([])
vstops = np.array([])
x = np.array([0, 0, 0, 0, 0, 0, 0, 0])
events = Events(x > 0, period=1, adeb=0.15, ddeb=0.2).find()
self.assertStartStops(events, vstarts, vstops)
def test_event_always_active(self):
vstarts = np.array([0])
vstops = np.array([8])
x = np.array([0, 0, 0, 0, 0, 0, 0, 0])
events = Events(x == 0, period=1, adeb=0.15, ddeb=0.2).find()
self.assertStartStops(events, vstarts, vstops)
def test_end_conditions(self):
vstarts = np.array([0, 6])
vstops = np.array([2, 8])
x = np.array([1, 1, 0, 0, 0, 0, 1, 1])
events = Events(x == 1, period=1, adeb=2, ddeb=2).find()
self.assertStartStops(events, vstarts, vstops)
class TestDurationFilter(EvTestCase):
def setUp(self):
condarr = np.array([0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1])
self.cond = condarr > 0
def test_mindur(self):
vstarts = np.array([2, 7])
vstops = np.array([4, 10])
events = Events(self.cond, period=1, mindur=2).find()
self.assertStartStops(events, vstarts, vstops)
def test_maxdur(self):
vstarts = np.array([2, 11])
vstops = np.array([4, 12])
events = Events(self.cond, period=1, maxdur=2).find()
self.assertStartStops(events, vstarts, vstops)
def test_mindur_maxdur(self):
vstarts = np.array([2])
vstops = np.array([4])
events = Events(self.cond, period=1, mindur=2, maxdur=2.5).find()
self.assertStartStops(events, vstarts, vstops)
def test_nonint_durs(self):
vstarts = np.array([2])
vstops = np.array([4])
events = Events(self.cond, period=1, mindur=float(1.00000001),
maxdur=float(2.99999999)).find()
self.assertStartStops(events, vstarts, vstops)
def test_period_100ms(self):
vstarts = np.array([2])
vstops = np.array([4])
events = Events(self.cond, period=0.1, mindur=0.15, maxdur=0.2).find()
self.assertStartStops(events, vstarts, vstops)
def test_period_120ms(self):
vstarts = np.array([2])
vstops = np.array([4])
events = Events(self.cond, period=0.12, mindur=0.15, maxdur=0.35).find()
self.assertStartStops(events, vstarts, vstops)
def test_no_events_found(self):
vstarts = np.array([])
vstops = np.array([])
x = np.array([0, 0, 0, 0, 0, 0, 0, 0])
events = Events(x > 0, period=1, mindur=0.15, maxdur=0.2).find()
self.assertStartStops(events, vstarts, vstops)
def test_event_always_active(self):
vstarts = np.array([0])
vstops = np.array([8])
x = np.array([0, 0, 0, 0, 0, 0, 0, 0])
events = Events(x == 0, period=1, mindur=0.15, maxdur=20).find()
self.assertStartStops(events, vstarts, vstops)
def test_end_conditions(self):
vstarts = np.array([0, 6])
vstops = np.array([2, 8])
x = np.array([1, 1, 0, 0, 0, 0, 1, 1])
events = Events(x == 1, period=1, mindur=2, maxdur=2).find()
self.assertStartStops(events, vstarts, vstops)
class TestEventOffset(EvTestCase):
def setUp(self):
condarr = np.array([0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1])
self.cond = condarr > 0
def test_startoffset(self):
vstarts = np.array([1, 6, 10])
vstops = np.array([4, 10, 12])
events = Events(self.cond, period=1, startoffset=-1).find()
self.assertStartStops(events, vstarts, vstops)
def test_stopoffset(self):
vstarts = np.array([2, 7, 11])
vstops = np.array([5, 11, 12])
events = Events(self.cond, period=1, stopoffset=1).find()
self.assertStartStops(events, vstarts, vstops)
def test_startoffset_stopoffset(self):
vstarts = np.array([1, 6, 10])
vstops = np.array([5, 11, 12])
events = Events(self.cond, period=1, startoffset=-1, stopoffset=1).find()
self.assertStartStops(events, vstarts, vstops)
def test_period_100ms(self):
vstarts = np.array([1, 6, 10])
vstops = np.array([5, 11, 12])
events = Events(self.cond, period=0.1, startoffset=-0.1, stopoffset=0.1).find()
self.assertStartStops(events, vstarts, vstops)
def test_period_120ms(self):
vstarts = np.array([1, 6, 10])
vstops = np.array([5, 11, 12])
events = Events(self.cond, period=0.12, startoffset=-0.1, stopoffset=0.1).find()
self.assertStartStops(events, vstarts, vstops)
def test_no_events_found(self):
vstarts = np.array([])
vstops = np.array([])
x = np.array([0, 0, 0, 0, 0, 0, 0, 0])
events = Events(x > 0, period=1, startoffset=-1, stopoffset=1).find()
self.assertStartStops(events, vstarts, vstops)
def test_event_always_active(self):
vstarts = np.array([0])
vstops = np.array([8])
x = np.array([0, 0, 0, 0, 0, 0, 0, 0])
events = Events(x == 0, period=1, startoffset=-1, stopoffset=1).find()
self.assertStartStops(events, vstarts, vstops)
def test_end_conditions(self):
vstarts = np.array([0, 5])
vstops = np.array([3, 8])
x = np.array([1, 1, 0, 0, 0, 0, 1, 1])
events = Events(x == 1, period=1, startoffset=-1, stopoffset=1).find()
self.assertStartStops(events, vstarts, vstops)
class TestAsArrayMethod(TestCase):
def setUp(self):
conditional_array = np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
condition = (conditional_array > 0)
self.events = Events(condition, period=1).find()
def test_default_parameters(self):
"""Test as_array() with default settings"""
validation_array = np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
npt.assert_array_equal(validation_array, self.events.as_array())
def test_as_array_false_value(self):
"""Test as_array() with low value"""
validation_array = np.array([-1, 1, 1, 1, -1, -1, -1, 1, 1, -1, 1, 1])
npt.assert_array_equal(validation_array, self.events.as_array(
false_values=-1))
def test_as_array_true_value(self):
"""Test as_array() with high value"""
validation_array = np.array([0, 5, 5, 5, 0, 0, 0, 5, 5, 0, 5, 5])
npt.assert_array_equal(validation_array, self.events.as_array(
true_values=5))
def test_as_array_false_and_true_value(self):
"""Test as_array() with low and high values"""
validation_array = np.array([-1, 5, 5, 5, -1, -1, -1, 5, 5, -1, 5, 5])
npt.assert_array_equal(validation_array, self.events.as_array(
false_values=-1,
true_values=5))
def test_type(self):
typ = type(self.events.as_array(false_values=-1, true_values=5))
self.assertEqual(typ, np.ndarray)
class TestAsSeries(TestCase):
def setUp(self):
conditional_array = np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
condition = (conditional_array > 0)
self.events = Events(condition, period=1).find()
def test_default_parameters(self):
"""Test as_array() with default settings"""
validation_series = pd.Series([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
npt.assert_array_equal(validation_series, self.events.as_series())
def test_as_array_false_value(self):
"""Test as_array() with low value"""
validation_series = np.array([-1, 1, 1, 1, -1, -1, -1, 1, 1, -1, 1, 1])
npt.assert_array_equal(validation_series, self.events.as_series(
false_values=-1))
def test_as_array_true_value(self):
"""Test as_array() with high value"""
validation_series = np.array([0, 5, 5, 5, 0, 0, 0, 5, 5, 0, 5, 5])
npt.assert_array_equal(validation_series, self.events.as_series(
true_values=5))
def test_as_array_false_and_true_value(self):
"""Test as_array() with low and high values"""
validation_series = np.array([-1, 5, 5, 5, -1, -1, -1, 5, 5, -1, 5, 5])
npt.assert_array_equal(validation_series, self.events.as_series(
false_values=-1,
true_values=5))
def test_type(self):
typ = type(self.events.as_series(false_values=-1, true_values=5))
self.assertEqual(typ, pd.core.series.Series)
class TestDurations(TestCase):
def setUp(self):
condition_array = np.array([1, 0, 1, 1, 1, 1, 0, 0, 1, 1,
0, 0, 0, 1, 0, 0, 0, 1, 0, 0])
condition = (condition_array > 0)
self.events = Events(condition, period=1/3,
adeb=0.5, ddeb=1).find()
def test_durations(self):
# validation_array = np.array([0, 0, 1, 1, 1, 1, 1, 1, 1, 1,
# 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
validation_durations = [(8 / 3)]
npt.assert_array_equal(validation_durations, self.events.durations)
class TestEventDetection(TestCase):
def test_default_parameters(self):
"""Test event detection with only a supplied condition"""
np.random.seed(10)
validation_array = np.random.random_integers(0, 1, 100)
condition = (validation_array > 0)
events = Events(condition, period=1).find()
npt.assert_array_equal(validation_array, events.as_array())
def test_multi_input_condition_event(self):
"""Test arrays that have multi-input conditions"""
x = np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 0])
y = np.array([0, 0, 1, 1, 1, 0, 0, 1, 0, 1])
validation_array = np.array([0, 0, 1, 1, 0, 0, 0, 1, 0, 0])
condition = ((x > 0) & (y > 0))
events = Events(condition, period=1).find()
npt.assert_array_equal(validation_array, events.as_array())
class TestSpecialMethods(TestCase):
def setUp(self):
condition_array = np.array([1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1])
self.condition = (condition_array > 0)
self.events = Events(self.condition, period=1).find()
def test__len__(self):
self.assertEquals(4, len(self.events))
def test__eq__(self):
other = Events(self.condition, period=1).find()
self.assertEqual(self.events, other)
class TestAttributes(TestCase):
def setUp(self):
condition_array = np.array([1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1])
self.condition = (condition_array > 0)
def test_period(self):
self.assertRaises(ValueError, Events, self.condition, period=0)
def test_startoffset(self):
self.assertRaises(ValueError, Events, self.condition,
period=1, startoffset=1)
def test_stopoffset(self):
self.assertRaises(ValueError, Events, self.condition, period=0, stopoffset=-1)
class TestProperties(TestCase):
def setUp(self):
self.events = Events(np.array([False, False]), period=0.12,
adeb=1, ddeb=1,
mindur=1, maxdur=1,
startoffset=-1, stopoffset=1)
def test_adeb(self):
self.assertEqual(self.events._adeb, 9)
def test_ddeb(self):
self.assertEqual(self.events._adeb, 9)
def test_mindur(self):
self.assertEqual(self.events._mindur, 9)
def test_maxdur(self):
self.assertEqual(self.events._maxdur, 8)
def test_startoffset(self):
self.assertEqual(self.events._startoffset, -9)
def test_stopoffset(self):
self.assertEqual(self.events._stopoffset, 9)
if __name__ == '__main__':
main()
| mit |
toenuff/treadmill | tests/reports_test.py | 1 | 5741 | """Unit test for treadmill.scheduler
"""
import datetime
import time
import unittest
# Disable W0611: Unused import
import tests.treadmill_test_deps # pylint: disable=W0611
import mock
import pandas as pd
from treadmill import scheduler
from treadmill import reports
def _construct_cell():
"""Constructs a test cell."""
cell = scheduler.Cell('top')
rack1 = scheduler.Bucket('rack:rack1', traits=0, level='rack')
rack2 = scheduler.Bucket('rack:rack2', traits=0, level='rack')
cell.add_node(rack1)
cell.add_node(rack2)
srv1 = scheduler.Server('srv1', [10, 20, 30], traits=3,
valid_until=1000)
srv2 = scheduler.Server('srv2', [10, 20, 30], traits=7,
valid_until=2000)
srv3 = scheduler.Server('srv3', [10, 20, 30], traits=0,
valid_until=3000)
srv4 = scheduler.Server('srv4', [10, 20, 30], traits=0,
valid_until=4000)
rack1.add_node(srv1)
rack1.add_node(srv2)
rack2.add_node(srv3)
rack2.add_node(srv4)
tenant1 = scheduler.Allocation()
tenant2 = scheduler.Allocation()
tenant3 = scheduler.Allocation()
alloc1 = scheduler.Allocation([10, 10, 10], rank=100, traits=0)
alloc2 = scheduler.Allocation([10, 10, 10], rank=100, traits=3)
cell.partitions[None].allocation.add_sub_alloc('t1', tenant1)
cell.partitions[None].allocation.add_sub_alloc('t2', tenant2)
tenant1.add_sub_alloc('t3', tenant3)
tenant2.add_sub_alloc('a1', alloc1)
tenant3.add_sub_alloc('a2', alloc2)
return cell
class ReportsTest(unittest.TestCase):
"""treadmill.reports tests."""
def setUp(self):
scheduler.DIMENSION_COUNT = 3
self.cell = _construct_cell()
super(ReportsTest, self).setUp()
def test_servers(self):
"""Tests servers report."""
df = reports.servers(self.cell)
# print df
# Sample data frame to see that the values are correct.
self.assertEquals(df.ix['srv1']['memory'], 10)
self.assertEquals(df.ix['srv2']['rack'], 'rack:rack1')
# check valid until
# XXX(boysson): There is a timezone bug here.
# XXX(boysson): self.assertEquals(str(df.ix['srv1']['valid_until']),
# XXX(boysson): '1969-12-31 19:16:40')
# XXX(boysson): self.assertEquals(str(df.ix['srv4']['valid_until']),
# XXX(boysson): '1969-12-31 20:06:40')
def test_allocations(self):
"""Tests allocations report."""
df = reports.allocations(self.cell)
# print df
# cpu disk max_utilization memory rank
# name
# t2/a1 10 10 inf 10 100
# t1/t3/a2 10 10 inf 10 100
self.assertEquals(df.ix['-', 't2/a1']['cpu'], 10)
self.assertEquals(df.ix['-', 't1/t3/a2']['cpu'], 10)
# TODO: not implemented.
# df_traits = reports.allocation_traits(self.cell)
@mock.patch('time.time', mock.Mock(return_value=100))
def test_applications(self):
"""Tests application queue report."""
app1 = scheduler.Application('foo.xxx#1', 100,
demand=[1, 1, 1],
affinity='foo.xxx')
app2 = scheduler.Application('foo.xxx#2', 100,
demand=[1, 1, 1],
affinity='foo.xxx')
app3 = scheduler.Application('bla.xxx#3', 50,
demand=[1, 1, 1],
affinity='bla.xxx')
(self.cell.partitions[None].allocation
.get_sub_alloc('t1')
.get_sub_alloc('t3')
.get_sub_alloc('a2').add(app1))
(self.cell.partitions[None].allocation
.get_sub_alloc('t1')
.get_sub_alloc('t3')
.get_sub_alloc('a2').add(app2))
(self.cell.partitions[None].allocation
.get_sub_alloc('t2')
.get_sub_alloc('a1').add(app3))
self.cell.schedule()
apps_df = reports.apps(self.cell)
# print apps_df
# affinity allocation cpu data_retention_timeout disk memory \
# instance
# foo.xxx#1 foo.xxx t1/t3/a2 1 0 1 1
# foo.xxx#2 foo.xxx t1/t3/a2 1 0 1 1
# bla.xxx#3 bla.xxx t2/a1 1 0 1 1
#
# order pending rank server util
# instance
# foo.xxx#1 1.458152e+15 0 99 srv1 -0.135714
# foo.xxx#2 1.458152e+15 0 99 srv1 -0.128571
# bla.xxx#3 1.458152e+15 0 100 srv1 -0.121429
time.time.return_value = 100
self.assertEquals(apps_df.ix['foo.xxx#2']['cpu'], 1)
util0 = reports.utilization(None, apps_df)
time.time.return_value = 101
util1 = reports.utilization(util0, apps_df)
# print util1
# name bla.xxx foo.xxx \
# count util disk cpu memory count util disk
# 1969-12-31 19:00:00 1 -0.121429 1 1 1 2 -0.128571 2
# 1969-12-31 19:00:01 1 -0.121429 1 1 1 2 -0.128571 2
#
# name
# cpu memory
# 1969-12-31 19:00:00 2 2
# 1969-12-31 19:00:01 2 2
time0 = pd.Timestamp(datetime.datetime.fromtimestamp(100))
time1 = pd.Timestamp(datetime.datetime.fromtimestamp(101))
self.assertEquals(util1.ix[time0]['bla.xxx']['cpu'], 1)
self.assertEquals(util1.ix[time1]['foo.xxx']['count'], 2)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
manashmndl/scikit-learn | sklearn/ensemble/tests/test_partial_dependence.py | 365 | 6996 | """
Testing for the partial dependence module.
"""
import numpy as np
from numpy.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import if_matplotlib
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn import datasets
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the boston dataset
boston = datasets.load_boston()
# also load the iris dataset
iris = datasets.load_iris()
def test_partial_dependence_classifier():
# Test partial dependence for classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
pdp, axes = partial_dependence(clf, [0], X=X, grid_resolution=5)
# only 4 grid points instead of 5 because only 4 unique X[:,0] vals
assert pdp.shape == (1, 4)
assert axes[0].shape[0] == 4
# now with our own grid
X_ = np.asarray(X)
grid = np.unique(X_[:, 0])
pdp_2, axes = partial_dependence(clf, [0], grid=grid)
assert axes is None
assert_array_equal(pdp, pdp_2)
def test_partial_dependence_multiclass():
# Test partial dependence for multi-class classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
n_classes = clf.n_classes_
pdp, axes = partial_dependence(
clf, [0], X=iris.data, grid_resolution=grid_resolution)
assert pdp.shape == (n_classes, grid_resolution)
assert len(axes) == 1
assert axes[0].shape[0] == grid_resolution
def test_partial_dependence_regressor():
# Test partial dependence for regressor
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
pdp, axes = partial_dependence(
clf, [0], X=boston.data, grid_resolution=grid_resolution)
assert pdp.shape == (1, grid_resolution)
assert axes[0].shape[0] == grid_resolution
def test_partial_dependecy_input():
# Test input validation of partial dependence.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=None, X=None)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=[0, 1], X=X)
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, partial_dependence,
{}, [0], X=X)
# Gradient boosting estimator must be fit
assert_raises(ValueError, partial_dependence,
GradientBoostingClassifier(), [0], X=X)
assert_raises(ValueError, partial_dependence, clf, [-1], X=X)
assert_raises(ValueError, partial_dependence, clf, [100], X=X)
# wrong ndim for grid
grid = np.random.rand(10, 2, 1)
assert_raises(ValueError, partial_dependence, clf, [0], grid=grid)
@if_matplotlib
def test_plot_partial_dependence():
# Test partial dependence plot function.
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, boston.data, [0, 1, (0, 1)],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with str features and array feature names
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with list feature_names
feature_names = boston.feature_names.tolist()
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
@if_matplotlib
def test_plot_partial_dependence_input():
# Test partial dependence plot function input checks.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
# not fitted yet
assert_raises(ValueError, plot_partial_dependence,
clf, X, [0])
clf.fit(X, y)
assert_raises(ValueError, plot_partial_dependence,
clf, np.array(X)[:, :0], [0])
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, plot_partial_dependence,
{}, X, [0])
# must be larger than -1
assert_raises(ValueError, plot_partial_dependence,
clf, X, [-1])
# too large feature value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [100])
# str feature but no feature_names
assert_raises(ValueError, plot_partial_dependence,
clf, X, ['foobar'])
# not valid features value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [{'foo': 'bar'}])
@if_matplotlib
def test_plot_partial_dependence_multiclass():
# Test partial dependence plot function on multi-class input.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label=0,
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# now with symbol labels
target = iris.target_names[iris.target]
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label='setosa',
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# label not in gbrt.classes_
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1], label='foobar',
grid_resolution=grid_resolution)
# label not provided
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1],
grid_resolution=grid_resolution)
| bsd-3-clause |
nrhine1/scikit-learn | sklearn/ensemble/tests/test_bagging.py | 127 | 25365 | """
Testing for the bagging ensemble module (sklearn.ensemble.bagging).
"""
# Author: Gilles Louppe
# License: BSD 3 clause
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.dummy import DummyClassifier, DummyRegressor
from sklearn.grid_search import GridSearchCV, ParameterGrid
from sklearn.ensemble import BaggingClassifier, BaggingRegressor
from sklearn.linear_model import Perceptron, LogisticRegression
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.svm import SVC, SVR
from sklearn.pipeline import make_pipeline
from sklearn.feature_selection import SelectKBest
from sklearn.cross_validation import train_test_split
from sklearn.datasets import load_boston, load_iris, make_hastie_10_2
from sklearn.utils import check_random_state
from scipy.sparse import csc_matrix, csr_matrix
rng = check_random_state(0)
# also load the iris dataset
# and randomly permute it
iris = load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
def test_classification():
# Check classification for various parameter settings.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"max_features": [1, 2, 4],
"bootstrap": [True, False],
"bootstrap_features": [True, False]})
for base_estimator in [None,
DummyClassifier(),
Perceptron(),
DecisionTreeClassifier(),
KNeighborsClassifier(),
SVC()]:
for params in grid:
BaggingClassifier(base_estimator=base_estimator,
random_state=rng,
**params).fit(X_train, y_train).predict(X_test)
def test_sparse_classification():
# Check classification for various parameter settings on sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set"""
def fit(self, X, y):
super(CustomSVC, self).fit(X, y)
self.data_type_ = type(X)
return self
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
parameter_sets = [
{"max_samples": 0.5,
"max_features": 2,
"bootstrap": True,
"bootstrap_features": True},
{"max_samples": 1.0,
"max_features": 4,
"bootstrap": True,
"bootstrap_features": True},
{"max_features": 2,
"bootstrap": False,
"bootstrap_features": True},
{"max_samples": 0.5,
"bootstrap": True,
"bootstrap_features": False},
]
for sparse_format in [csc_matrix, csr_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
for params in parameter_sets:
# Trained on sparse format
sparse_classifier = BaggingClassifier(
base_estimator=CustomSVC(),
random_state=1,
**params
).fit(X_train_sparse, y_train)
sparse_results = sparse_classifier.predict(X_test_sparse)
# Trained on dense format
dense_results = BaggingClassifier(
base_estimator=CustomSVC(),
random_state=1,
**params
).fit(X_train, y_train).predict(X_test)
sparse_type = type(X_train_sparse)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert_array_equal(sparse_results, dense_results)
assert all([t == sparse_type for t in types])
def test_regression():
# Check regression for various parameter settings.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
boston.target[:50],
random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"max_features": [0.5, 1.0],
"bootstrap": [True, False],
"bootstrap_features": [True, False]})
for base_estimator in [None,
DummyRegressor(),
DecisionTreeRegressor(),
KNeighborsRegressor(),
SVR()]:
for params in grid:
BaggingRegressor(base_estimator=base_estimator,
random_state=rng,
**params).fit(X_train, y_train).predict(X_test)
def test_sparse_regression():
# Check regression for various parameter settings on sparse input.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
boston.target[:50],
random_state=rng)
class CustomSVR(SVR):
"""SVC variant that records the nature of the training set"""
def fit(self, X, y):
super(CustomSVR, self).fit(X, y)
self.data_type_ = type(X)
return self
parameter_sets = [
{"max_samples": 0.5,
"max_features": 2,
"bootstrap": True,
"bootstrap_features": True},
{"max_samples": 1.0,
"max_features": 4,
"bootstrap": True,
"bootstrap_features": True},
{"max_features": 2,
"bootstrap": False,
"bootstrap_features": True},
{"max_samples": 0.5,
"bootstrap": True,
"bootstrap_features": False},
]
for sparse_format in [csc_matrix, csr_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
for params in parameter_sets:
# Trained on sparse format
sparse_classifier = BaggingRegressor(
base_estimator=CustomSVR(),
random_state=1,
**params
).fit(X_train_sparse, y_train)
sparse_results = sparse_classifier.predict(X_test_sparse)
# Trained on dense format
dense_results = BaggingRegressor(
base_estimator=CustomSVR(),
random_state=1,
**params
).fit(X_train, y_train).predict(X_test)
sparse_type = type(X_train_sparse)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert_array_equal(sparse_results, dense_results)
assert all([t == sparse_type for t in types])
assert_array_equal(sparse_results, dense_results)
def test_bootstrap_samples():
# Test that bootstraping samples generate non-perfect base estimators.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
base_estimator = DecisionTreeRegressor().fit(X_train, y_train)
# without bootstrap, all trees are perfect on the training set
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_samples=1.0,
bootstrap=False,
random_state=rng).fit(X_train, y_train)
assert_equal(base_estimator.score(X_train, y_train),
ensemble.score(X_train, y_train))
# with bootstrap, trees are no longer perfect on the training set
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_samples=1.0,
bootstrap=True,
random_state=rng).fit(X_train, y_train)
assert_greater(base_estimator.score(X_train, y_train),
ensemble.score(X_train, y_train))
def test_bootstrap_features():
# Test that bootstraping features may generate dupplicate features.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_features=1.0,
bootstrap_features=False,
random_state=rng).fit(X_train, y_train)
for features in ensemble.estimators_features_:
assert_equal(boston.data.shape[1], np.unique(features).shape[0])
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_features=1.0,
bootstrap_features=True,
random_state=rng).fit(X_train, y_train)
for features in ensemble.estimators_features_:
assert_greater(boston.data.shape[1], np.unique(features).shape[0])
def test_probability():
# Predict probabilities.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
with np.errstate(divide="ignore", invalid="ignore"):
# Normal case
ensemble = BaggingClassifier(base_estimator=DecisionTreeClassifier(),
random_state=rng).fit(X_train, y_train)
assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test),
axis=1),
np.ones(len(X_test)))
assert_array_almost_equal(ensemble.predict_proba(X_test),
np.exp(ensemble.predict_log_proba(X_test)))
# Degenerate case, where some classes are missing
ensemble = BaggingClassifier(base_estimator=LogisticRegression(),
random_state=rng,
max_samples=5).fit(X_train, y_train)
assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test),
axis=1),
np.ones(len(X_test)))
assert_array_almost_equal(ensemble.predict_proba(X_test),
np.exp(ensemble.predict_log_proba(X_test)))
def test_oob_score_classification():
# Check that oob prediction is a good estimation of the generalization
# error.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
for base_estimator in [DecisionTreeClassifier(), SVC()]:
clf = BaggingClassifier(base_estimator=base_estimator,
n_estimators=100,
bootstrap=True,
oob_score=True,
random_state=rng).fit(X_train, y_train)
test_score = clf.score(X_test, y_test)
assert_less(abs(test_score - clf.oob_score_), 0.1)
# Test with few estimators
assert_warns(UserWarning,
BaggingClassifier(base_estimator=base_estimator,
n_estimators=1,
bootstrap=True,
oob_score=True,
random_state=rng).fit,
X_train,
y_train)
def test_oob_score_regression():
# Check that oob prediction is a good estimation of the generalization
# error.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
clf = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
n_estimators=50,
bootstrap=True,
oob_score=True,
random_state=rng).fit(X_train, y_train)
test_score = clf.score(X_test, y_test)
assert_less(abs(test_score - clf.oob_score_), 0.1)
# Test with few estimators
assert_warns(UserWarning,
BaggingRegressor(base_estimator=DecisionTreeRegressor(),
n_estimators=1,
bootstrap=True,
oob_score=True,
random_state=rng).fit,
X_train,
y_train)
def test_single_estimator():
# Check singleton ensembles.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
clf1 = BaggingRegressor(base_estimator=KNeighborsRegressor(),
n_estimators=1,
bootstrap=False,
bootstrap_features=False,
random_state=rng).fit(X_train, y_train)
clf2 = KNeighborsRegressor().fit(X_train, y_train)
assert_array_equal(clf1.predict(X_test), clf2.predict(X_test))
def test_error():
# Test that it gives proper exception on deficient input.
X, y = iris.data, iris.target
base = DecisionTreeClassifier()
# Test max_samples
assert_raises(ValueError,
BaggingClassifier(base, max_samples=-1).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=0.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=2.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=1000).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples="foobar").fit, X, y)
# Test max_features
assert_raises(ValueError,
BaggingClassifier(base, max_features=-1).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=0.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=2.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=5).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features="foobar").fit, X, y)
# Test support of decision_function
assert_false(hasattr(BaggingClassifier(base).fit(X, y), 'decision_function'))
def test_parallel_classification():
# Check parallel classification.
rng = check_random_state(0)
# Classification
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
# predict_proba
ensemble.set_params(n_jobs=1)
y1 = ensemble.predict_proba(X_test)
ensemble.set_params(n_jobs=2)
y2 = ensemble.predict_proba(X_test)
assert_array_almost_equal(y1, y2)
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
y3 = ensemble.predict_proba(X_test)
assert_array_almost_equal(y1, y3)
# decision_function
ensemble = BaggingClassifier(SVC(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
ensemble.set_params(n_jobs=1)
decisions1 = ensemble.decision_function(X_test)
ensemble.set_params(n_jobs=2)
decisions2 = ensemble.decision_function(X_test)
assert_array_almost_equal(decisions1, decisions2)
ensemble = BaggingClassifier(SVC(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
decisions3 = ensemble.decision_function(X_test)
assert_array_almost_equal(decisions1, decisions3)
def test_parallel_regression():
# Check parallel regression.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
ensemble.set_params(n_jobs=1)
y1 = ensemble.predict(X_test)
ensemble.set_params(n_jobs=2)
y2 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y2)
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
y3 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y3)
def test_gridsearch():
# Check that bagging ensembles can be grid-searched.
# Transform iris into a binary classification task
X, y = iris.data, iris.target
y[y == 2] = 1
# Grid search with scoring based on decision_function
parameters = {'n_estimators': (1, 2),
'base_estimator__C': (1, 2)}
GridSearchCV(BaggingClassifier(SVC()),
parameters,
scoring="roc_auc").fit(X, y)
def test_base_estimator():
# Check base_estimator and its default values.
rng = check_random_state(0)
# Classification
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
ensemble = BaggingClassifier(None,
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeClassifier))
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeClassifier))
ensemble = BaggingClassifier(Perceptron(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, Perceptron))
# Regression
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(None,
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeRegressor))
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeRegressor))
ensemble = BaggingRegressor(SVR(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, SVR))
def test_bagging_with_pipeline():
estimator = BaggingClassifier(make_pipeline(SelectKBest(k=1),
DecisionTreeClassifier()),
max_features=2)
estimator.fit(iris.data, iris.target)
class DummyZeroEstimator(BaseEstimator):
def fit(self, X, y):
self.classes_ = np.unique(y)
return self
def predict(self, X):
return self.classes_[np.zeros(X.shape[0], dtype=int)]
def test_bagging_sample_weight_unsupported_but_passed():
estimator = BaggingClassifier(DummyZeroEstimator())
rng = check_random_state(0)
estimator.fit(iris.data, iris.target).predict(iris.data)
assert_raises(ValueError, estimator.fit, iris.data, iris.target,
sample_weight=rng.randint(10, size=(iris.data.shape[0])))
def test_warm_start(random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = BaggingClassifier(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = BaggingClassifier(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
def test_warm_start_smaller_n_estimators():
# Test if warm start'ed second fit with smaller n_estimators raises error.
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf = BaggingClassifier(n_estimators=5, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test that nothing happens when fitting without increasing n_estimators
X, y = make_hastie_10_2(n_samples=20, random_state=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43)
clf = BaggingClassifier(n_estimators=5, warm_start=True, random_state=83)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# modify X to nonsense values, this should not change anything
X_train += 1.
assert_warns_message(UserWarning,
"Warm-start fitting without increasing n_estimators does not",
clf.fit, X_train, y_train)
assert_array_equal(y_pred, clf.predict(X_test))
def test_warm_start_equivalence():
# warm started classifier with 5+5 estimators should be equivalent to
# one classifier with 10 estimators
X, y = make_hastie_10_2(n_samples=20, random_state=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43)
clf_ws = BaggingClassifier(n_estimators=5, warm_start=True,
random_state=3141)
clf_ws.fit(X_train, y_train)
clf_ws.set_params(n_estimators=10)
clf_ws.fit(X_train, y_train)
y1 = clf_ws.predict(X_test)
clf = BaggingClassifier(n_estimators=10, warm_start=False,
random_state=3141)
clf.fit(X_train, y_train)
y2 = clf.predict(X_test)
assert_array_almost_equal(y1, y2)
def test_warm_start_with_oob_score_fails():
# Check using oob_score and warm_start simultaneously fails
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf = BaggingClassifier(n_estimators=5, warm_start=True, oob_score=True)
assert_raises(ValueError, clf.fit, X, y)
def test_oob_score_removed_on_warm_start():
X, y = make_hastie_10_2(n_samples=2000, random_state=1)
clf = BaggingClassifier(n_estimators=50, oob_score=True)
clf.fit(X, y)
clf.set_params(warm_start=True, oob_score=False, n_estimators=100)
clf.fit(X, y)
assert_raises(AttributeError, getattr, clf, "oob_score_")
| bsd-3-clause |
castelao/AutoQC | util/dbutils.py | 1 | 7274 | import io, math
import numpy
import pandas
import sqlite3
import util.main as main
def unpack_qc(value):
'unpack a qc result from the db'
try:
qc = numpy.load(io.BytesIO(value), allow_pickle=True)
except:
print('failed to unpack qc data - check db for missing entries.')
qc = numpy.zeros(1, dtype=bool)
return qc
def summarize(levels):
'given an array of level qc decisions, return true iff any of the levels are flagged'
return numpy.any(levels)
def parse(results):
'parse the raw pickled text of a per-level qc result, and return True if any levels are flagged'
return results.apply(unpack_qc).apply(summarize)
def summarize_truth(levels):
'given an array of originator qc decisions, return true iff any of the levels are flagged, ignoring t=99 levels'
return numpy.sum( [not levels.mask[i] and (x==3 or x==4) for i, x in enumerate(levels)]) >= 1
def parse_truth(results):
return results.apply(unpack_qc).apply(summarize_truth)
def get_n_levels_before_fail(results):
nlevels = []
for result in results:
n = 0
for qc in unpack_qc(result):
if qc == False:
n += 1
else:
break
nlevels.append(n)
return nlevels
def get_reversed_n_levels_before_fail(results):
nlevels = []
for result in results:
n = 0
for qc in unpack_qc(result)[::-1]:
if qc == False:
n -= 1
else:
break
nlevels.append(n)
return nlevels
def check_for_fail(results):
fails = []
for result in results:
answer = False
for qc in unpack_qc(result):
if qc == True:
answer = True
break
fails.append(answer)
return fails
def unpack_qc_results(results):
return [unpack_qc(result) for result in results]
def db_to_df(table,
filter_on_wire_break_test=False,
filter_on_tests={},
n_to_extract=numpy.iinfo(numpy.int32).max,
applyparse=True,
targetdb='iquod.db'):
'''
Reads the table from targetdb into a pandas dataframe.
If filter_on_wire_break_test is True, the results from that test are used to exclude
levels below a wire break from the test results and the wire break test is not returned.
filter_on_tests is a generalised form of filter_on_wire_break and is used to exclude results; it takes a list of
[testname, action], where levels failing <testname> are excluded towards the surface (if action is 'up'), towards depth (if action is 'down') and the whole profile deleted (if action is 'remove').
Set n_to_extract to limit the number of rows extracted to the specified number.
'''
# what tests are available
testNames = main.importQC('qctests')
testNames.sort()
# connect to database
conn = sqlite3.connect(targetdb, isolation_level=None)
cur = conn.cursor()
# extract matrix of test results and true flags into a dataframe
query = 'SELECT uid, truth'
for test in testNames:
query += ', ' + test.lower()
query += ' FROM ' + table
query += ' WHERE uid IN (SELECT uid FROM ' + table + ' ORDER BY RANDOM() LIMIT ' + str(n_to_extract) + ')'
cur.execute(query)
rawresults = cur.fetchall()
sub = 1000
df_final = None
for i in range(math.ceil(len(rawresults)/sub)):
df = pandas.DataFrame(rawresults[i*sub:(i+1)*sub]).astype('bytes')
df.columns = ['uid', 'Truth'] + testNames
df = df.astype({'uid': 'int'})
if filter_on_wire_break_test:
nlevels = get_n_levels_before_fail(df['CSIRO_wire_break'])
del df['CSIRO_wire_break'] # No use for this now.
testNames = df.columns[2:].values.tolist()
for i in range(len(df.index)):
for j in range(1, len(df.columns)):
qc = unpack_qc(df.iloc[i, j])
# Some QC tests may return only one value so check for this.
if len(qc) > 1:
qc = qc[:nlevels[i]]
df.iat[i, j] = main.pack_array(qc)
todrop = set()
for action in filter_on_tests:
# Check if the action is relevant.
if action == 'Optional' or action == 'At least one from group': continue
# Initialise variables.
nlevels = -1
outcomes = False
qcresults = []
for testname in filter_on_tests[action]:
for i in range(0, len(df.index)):
if action == 'Remove above reject':
nlevels = get_reversed_n_levels_before_fail([df[testname][i]])[0]
elif action == 'Remove below reject':
nlevels = get_n_levels_before_fail([df[testname][i]])[0]
elif action == 'Remove profile':
outcomes = check_for_fail([df[testname][i]])[0]
elif action == 'Remove rejected levels':
qcresults = unpack_qc_results([df[testname][i]])[0]
else:
raise NameError('Unrecognised action: ' + action)
if (((action == 'Remove above reject' or action == 'Remove below reject') and nlevels == 0) or
(action == 'Remove profile' and outcomes == True) or
(action == 'Remove rejected levels' and numpy.count_nonzero(qcresults == False) == 0)):
# Completely remove a profile if it has no valid levels or if it
# has a fail and the action is to remove.
todrop.add(i)
elif (action != 'Remove profile'):
for j in range(1, len(df.columns)):
# Retain only the levels that passed testname.
# Some QC tests may return only one value so check for this.
qc = unpack_qc(df.iloc[i, j])
if len(qc) > 1:
if action == 'Remove above reject':
qc = qc[nlevels:]
elif action == 'Remove below reject':
qc = qc[:nlevels]
elif action == 'Remove rejected levels':
qc = qc[qcresults == False]
df.iat[i, j] = main.pack_array(qc)
del df[testname] # No need to keep this any longer.
df.reset_index(inplace=True, drop=True)
todrop = list(todrop)
if len(todrop) > 0:
df.drop(todrop, inplace=True)
df.reset_index(inplace=True, drop=True)
testNames = df.columns[2:].values.tolist()
if applyparse:
df[['Truth']] = df[['Truth']].apply(parse_truth)
df[testNames] = df[testNames].apply(parse)
if i == 0:
df_final = df
else:
df_final = pandas.concat([df_final, df])
return df_final.reset_index(drop=True)
| mit |
imaculate/scikit-learn | sklearn/utils/__init__.py | 17 | 12898 | """
The :mod:`sklearn.utils` module includes various utilities.
"""
from collections import Sequence
import numpy as np
from scipy.sparse import issparse
import warnings
from .murmurhash import murmurhash3_32
from .validation import (as_float_array,
assert_all_finite,
check_random_state, column_or_1d, check_array,
check_consistent_length, check_X_y, indexable,
check_symmetric)
from .deprecation import deprecated
from .class_weight import compute_class_weight, compute_sample_weight
from ..externals.joblib import cpu_count
from ..exceptions import ConvergenceWarning as _ConvergenceWarning
from ..exceptions import DataConversionWarning
@deprecated("ConvergenceWarning has been moved into the sklearn.exceptions "
"module. It will not be available here from version 0.19")
class ConvergenceWarning(_ConvergenceWarning):
pass
__all__ = ["murmurhash3_32", "as_float_array",
"assert_all_finite", "check_array",
"check_random_state",
"compute_class_weight", "compute_sample_weight",
"column_or_1d", "safe_indexing",
"check_consistent_length", "check_X_y", 'indexable',
"check_symmetric"]
def safe_mask(X, mask):
"""Return a mask which is safe to use on X.
Parameters
----------
X : {array-like, sparse matrix}
Data on which to apply mask.
mask: array
Mask to be used on X.
Returns
-------
mask
"""
mask = np.asarray(mask)
if np.issubdtype(mask.dtype, np.int):
return mask
if hasattr(X, "toarray"):
ind = np.arange(mask.shape[0])
mask = ind[mask]
return mask
def axis0_safe_slice(X, mask, len_mask):
"""
This mask is safer than safe_mask since it returns an
empty array, when a sparse matrix is sliced with a boolean mask
with all False, instead of raising an unhelpful error in older
versions of SciPy.
See: https://github.com/scipy/scipy/issues/5361
Also note that we can avoid doing the dot product by checking if
the len_mask is not zero in _huber_loss_and_gradient but this
is not going to be the bottleneck, since the number of outliers
and non_outliers are typically non-zero and it makes the code
tougher to follow.
"""
if len_mask != 0:
return X[safe_mask(X, mask), :]
return np.zeros(shape=(0, X.shape[1]))
def safe_indexing(X, indices):
"""Return items or rows from X using indices.
Allows simple indexing of lists or arrays.
Parameters
----------
X : array-like, sparse-matrix, list.
Data from which to sample rows or items.
indices : array-like, list
Indices according to which X will be subsampled.
"""
if hasattr(X, "iloc"):
# Pandas Dataframes and Series
try:
return X.iloc[indices]
except ValueError:
# Cython typed memoryviews internally used in pandas do not support
# readonly buffers.
warnings.warn("Copying input dataframe for slicing.",
DataConversionWarning)
return X.copy().iloc[indices]
elif hasattr(X, "shape"):
if hasattr(X, 'take') and (hasattr(indices, 'dtype') and
indices.dtype.kind == 'i'):
# This is often substantially faster than X[indices]
return X.take(indices, axis=0)
else:
return X[indices]
else:
return [X[idx] for idx in indices]
def resample(*arrays, **options):
"""Resample arrays or sparse matrices in a consistent way
The default strategy implements one step of the bootstrapping
procedure.
Parameters
----------
*arrays : sequence of indexable data-structures
Indexable data-structures can be arrays, lists, dataframes or scipy
sparse matrices with consistent first dimension.
replace : boolean, True by default
Implements resampling with replacement. If False, this will implement
(sliced) random permutations.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
If replace is False it should not be larger than the length of
arrays.
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
Returns
-------
resampled_arrays : sequence of indexable data-structures
Sequence of resampled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import resample
>>> X, X_sparse, y = resample(X, X_sparse, y, random_state=0)
>>> X
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 4 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([0, 1, 0])
>>> resample(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.shuffle`
"""
random_state = check_random_state(options.pop('random_state', None))
replace = options.pop('replace', True)
max_n_samples = options.pop('n_samples', None)
if options:
raise ValueError("Unexpected kw arguments: %r" % options.keys())
if len(arrays) == 0:
return None
first = arrays[0]
n_samples = first.shape[0] if hasattr(first, 'shape') else len(first)
if max_n_samples is None:
max_n_samples = n_samples
elif (max_n_samples > n_samples) and (not replace):
raise ValueError("Cannot sample %d out of arrays with dim %d"
"when replace is False" % (max_n_samples,
n_samples))
check_consistent_length(*arrays)
if replace:
indices = random_state.randint(0, n_samples, size=(max_n_samples,))
else:
indices = np.arange(n_samples)
random_state.shuffle(indices)
indices = indices[:max_n_samples]
# convert sparse matrices to CSR for row-based indexing
arrays = [a.tocsr() if issparse(a) else a for a in arrays]
resampled_arrays = [safe_indexing(a, indices) for a in arrays]
if len(resampled_arrays) == 1:
# syntactic sugar for the unit argument case
return resampled_arrays[0]
else:
return resampled_arrays
def shuffle(*arrays, **options):
"""Shuffle arrays or sparse matrices in a consistent way
This is a convenience alias to ``resample(*arrays, replace=False)`` to do
random permutations of the collections.
Parameters
----------
*arrays : sequence of indexable data-structures
Indexable data-structures can be arrays, lists, dataframes or scipy
sparse matrices with consistent first dimension.
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
Returns
-------
shuffled_arrays : sequence of indexable data-structures
Sequence of shuffled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import shuffle
>>> X, X_sparse, y = shuffle(X, X_sparse, y, random_state=0)
>>> X
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 3 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([2, 1, 0])
>>> shuffle(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.resample`
"""
options['replace'] = False
return resample(*arrays, **options)
def safe_sqr(X, copy=True):
"""Element wise squaring of array-likes and sparse matrices.
Parameters
----------
X : array like, matrix, sparse matrix
copy : boolean, optional, default True
Whether to create a copy of X and operate on it or to perform
inplace computation (default behaviour).
Returns
-------
X ** 2 : element wise square
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], ensure_2d=False)
if issparse(X):
if copy:
X = X.copy()
X.data **= 2
else:
if copy:
X = X ** 2
else:
X **= 2
return X
def gen_batches(n, batch_size):
"""Generator to create slices containing batch_size elements, from 0 to n.
The last slice may contain less than batch_size elements, when batch_size
does not divide n.
Examples
--------
>>> from sklearn.utils import gen_batches
>>> list(gen_batches(7, 3))
[slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)]
>>> list(gen_batches(6, 3))
[slice(0, 3, None), slice(3, 6, None)]
>>> list(gen_batches(2, 3))
[slice(0, 2, None)]
"""
start = 0
for _ in range(int(n // batch_size)):
end = start + batch_size
yield slice(start, end)
start = end
if start < n:
yield slice(start, n)
def gen_even_slices(n, n_packs, n_samples=None):
"""Generator to create n_packs slices going up to n.
Pass n_samples when the slices are to be used for sparse matrix indexing;
slicing off-the-end raises an exception, while it works for NumPy arrays.
Examples
--------
>>> from sklearn.utils import gen_even_slices
>>> list(gen_even_slices(10, 1))
[slice(0, 10, None)]
>>> list(gen_even_slices(10, 10)) #doctest: +ELLIPSIS
[slice(0, 1, None), slice(1, 2, None), ..., slice(9, 10, None)]
>>> list(gen_even_slices(10, 5)) #doctest: +ELLIPSIS
[slice(0, 2, None), slice(2, 4, None), ..., slice(8, 10, None)]
>>> list(gen_even_slices(10, 3))
[slice(0, 4, None), slice(4, 7, None), slice(7, 10, None)]
"""
start = 0
if n_packs < 1:
raise ValueError("gen_even_slices got n_packs=%s, must be >=1"
% n_packs)
for pack_num in range(n_packs):
this_n = n // n_packs
if pack_num < n % n_packs:
this_n += 1
if this_n > 0:
end = start + this_n
if n_samples is not None:
end = min(n_samples, end)
yield slice(start, end, None)
start = end
def _get_n_jobs(n_jobs):
"""Get number of jobs for the computation.
This function reimplements the logic of joblib to determine the actual
number of jobs depending on the cpu count. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is useful
for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used.
Thus for n_jobs = -2, all CPUs but one are used.
Parameters
----------
n_jobs : int
Number of jobs stated in joblib convention.
Returns
-------
n_jobs : int
The actual number of jobs as positive integer.
Examples
--------
>>> from sklearn.utils import _get_n_jobs
>>> _get_n_jobs(4)
4
>>> jobs = _get_n_jobs(-2)
>>> assert jobs == max(cpu_count() - 1, 1)
>>> _get_n_jobs(0)
Traceback (most recent call last):
...
ValueError: Parameter n_jobs == 0 has no meaning.
"""
if n_jobs < 0:
return max(cpu_count() + 1 + n_jobs, 1)
elif n_jobs == 0:
raise ValueError('Parameter n_jobs == 0 has no meaning.')
else:
return n_jobs
def tosequence(x):
"""Cast iterable x to a Sequence, avoiding a copy if possible."""
if isinstance(x, np.ndarray):
return np.asarray(x)
elif isinstance(x, Sequence):
return x
else:
return list(x)
| bsd-3-clause |
berkeley-stat159/project-iota | code/utils/linear_modeling/block_linear_modeling_script.py | 1 | 8205 | from __future__ import division
import numpy as np
import pandas as pd
import numpy.linalg as npl
import matplotlib.pyplot as plt
import matplotlib.colors
import nibabel as nib
from scipy.stats import t as t_dist
from nilearn import image
from nilearn.plotting import plot_stat_map
import linear_modeling
from sys import argv
"""
set the directory to project-iota and run below command on terminal:
python block_linear_modeling_script.py task001_run001
"""
f1 = argv[1] # task001_run001
# Load the image as an image object
img = nib.load('../../../data/sub001/BOLD/' + f1 + '/filtered_func_data_mni.nii.gz')
# Load the pre-written convolved time course
start = np.loadtxt('../../../data/convo/' + f1 + '_conv001.txt')[4:]
end = np.loadtxt('../../../data/convo/' + f1 + '_conv004.txt')[4:]
convo = np.loadtxt('../../../data/convo/' + f1 + '_conv005.txt')[4:]
# Load the image data as an array and drop the first 4 3D volumes from the array
data = img.get_data()[..., 4:]
# Building design X matrix
design = np.ones((len(convo), 4))
design[:, 1] = start
design[:, 2] = end
design[:, 3] = convo
# Show the design matrix graphically
plt.figure(figsize=(8,6))
plt.imshow(design, aspect=0.05, cmap='gray', interpolation = 'nearest')
plt.savefig('../../../data/design_matrix/block_design_mat.png')
# reshape data to 2D
vol_shape, n_time = data.shape[:-1], data.shape[-1]
# Smoothing raw data set
mean_data = np.mean(data, -1)
plt.figure(0)
plt.hist(np.ravel(mean_data), bins=100)
line = plt.axvline(8000, ls='--', color = 'red')
plt.xlabel('Voxels')
plt.ylabel('Frequency')
plt.title('Mean Volume Over Time')
plt.savefig('../../../data/design_matrix/block_mean_data.png')
mask = mean_data > 8000
smooth_data = linear_modeling.smoothing(data, mask)
# test on OLS
residual = linear_modeling.OLS(design, smooth_data)
# Block generalized linear regression
betas_hat, errors, s2, df = linear_modeling.beta_est(smooth_data, design) #(4, 194287)
np.savetxt('../../../data/beta/' + f1 + '_betas_hat_block.txt', betas_hat.T, newline='\r\n')
print('The mean MRSS across all voxels using block study conditions is ' + str(np.mean(s2)))
# Filling back to raw data shape
beta_vols = np.zeros(vol_shape + (betas_hat.shape[0],)) #(91, 109, 91, 4)
beta_vols[mask] = betas_hat.T
# set regions outside mask as missing with np.nan
mean_data[~mask] = np.nan
beta_vols[~mask] = np.nan
# T-test on null hypothesis, assume only input variance of beta3 [0,0,0,1]
t_value, p_value = linear_modeling.t_stat(design, [0,1,1,1], betas_hat, s2, df) #(1, 194287) (1, 194287)
np.savetxt('../../../data/beta/' + f1 + '_p-value_block.txt', p_value, newline='\r\n')
np.savetxt('../../../data/beta/' + f1 + '_T-value_block.txt', t_value, newline='\r\n')
# Filling T, P value back to raw data shape (91, 109, 91)
t_vols = np.zeros(vol_shape + (t_value.shape[0],))
t_vols[mask, :] = t_value.T
p_vols = np.ones(vol_shape + (p_value.shape[0],))
p_vols[mask, :] = p_value.T
# Hypothesis test on heteroskedasticity
stat_table = linear_modeling.white_test(residual, design) # 3868
print("Total are", len(stat_table), "Voxels, but there are only :", sum(stat_table < (0.05/133)), "voxels whose variance of errors keep constant")
np.savetxt('../../../data/GLS/' + f1 + '_white_test_block.txt', stat_table, newline='\r\n')
#========================================================================================================
# Loading color value for cmap
cmap_values = np.loadtxt('../../../data/color_map.txt')
nice_cmap = matplotlib.colors.ListedColormap(cmap_values, 'color_map')
# Visualizing beta_hat for the middle slice in gray
fig, axes = plt.subplots(nrows=4, ncols=4)
for i, ax in zip(range(25,58,2), axes.flat):
im = ax.imshow(mean_data[:, i, :,], cmap='gray', alpha=0.5)
io = ax.imshow(beta_vols[:, i, :, 3], cmap=nice_cmap, alpha=0.5)
fig.subplots_adjust(right=0.85)
cax = fig.add_axes([0.9, 0.15, 0.03, 0.7])
fig.suptitle("Middle Level for beta_hat", fontsize=20)
fig.colorbar(io, cax=cax)
plt.savefig("../../../data/maps/block_beta_middle_map.png")
# Visualizing p values for the middle slice in gray
fig, axes = plt.subplots(nrows=4, ncols=4)
for i, ax in zip(range(25,58,2), axes.flat):
im = ax.imshow(mean_data[:, i, :,], cmap='gray', alpha=0.5)
io = ax.imshow(p_vols[:, i, :, 0], cmap=nice_cmap, alpha=0.5)
fig.subplots_adjust(right=0.85)
cax = fig.add_axes([0.9, 0.15, 0.03, 0.7])
fig.suptitle("Middle Level for P-value", fontsize=20)
fig.colorbar(io, cax=cax)
plt.savefig("../../../data/maps/block_p_middle_map.png")
# Visualizing t values for the middle slice
fig, axes = plt.subplots(nrows=4, ncols=4)
for i, ax in zip(range(25,58,2), axes.flat):
im = ax.imshow(mean_data[:, i, :,], cmap='gray', alpha=0.5)
i0 = ax.imshow(t_vols[:, i, :, 0], cmap = nice_cmap, alpha=0.5)
fig.subplots_adjust(right=0.85)
cax = fig.add_axes([0.9, 0.15, 0.03, 0.7])
fig.colorbar(io, cax=cax)
plt.savefig("../../../data/maps/block_t_middle_map.png")
# Visualizing beta_hat for the front slice in gray
fig, axes = plt.subplots(nrows=4, ncols=4)
for i, ax in zip(range(25,58,2), axes.flat):
im = ax.imshow(mean_data[i, :, :,], cmap='gray', alpha=0.5)
io = ax.imshow(beta_vols[i, :, :, 3], cmap=nice_cmap, alpha=0.5)
fig.subplots_adjust(right=0.85)
cax = fig.add_axes([0.9, 0.15, 0.03, 0.7])
fig.suptitle("Front Level for beta_hat", fontsize=20)
fig.colorbar(io, cax=cax)
plt.savefig("../../../data/maps/block_beta_front_map.png")
# Visualizing p values for the front slice in gray
fig, axes = plt.subplots(nrows=4, ncols=4)
for i, ax in zip(range(25,58,2), axes.flat):
im = ax.imshow(mean_data[i, :, :,], cmap='gray', alpha=0.5)
io = ax.imshow(p_vols[i, :, :, 0], cmap=nice_cmap, alpha=0.5)
fig.subplots_adjust(right=0.85)
cax = fig.add_axes([0.9, 0.15, 0.03, 0.7])
fig.suptitle("Front Level for P-value", fontsize=20)
fig.colorbar(io, cax=cax)
plt.savefig("../../../data/maps/block_p_front_map.png")
# Visualizing t values for the front slice
fig, axes = plt.subplots(nrows=4, ncols=4)
for i, ax in zip(range(25,58,2), axes.flat):
im = ax.imshow(mean_data[i, :, :,], cmap='gray', alpha=0.5)
i0 = ax.imshow(t_vols[i, :, :, 0], cmap = nice_cmap, alpha=0.5)
fig.subplots_adjust(right=0.85)
cax = fig.add_axes([0.9, 0.15, 0.03, 0.7])
fig.colorbar(io, cax=cax)
plt.savefig("../../../data/maps/block_t_front_map.png")
# Visualizing beta_hat for the back slice in gray
fig, axes = plt.subplots(nrows=4, ncols=4)
for i, ax in zip(range(25,58,2), axes.flat):
im = ax.imshow(mean_data[:, :, i,], cmap='gray', alpha=0.5)
io = ax.imshow(beta_vols[:, :, i, 3], cmap=nice_cmap, alpha=0.5)
fig.subplots_adjust(right=0.85)
cax = fig.add_axes([0.9, 0.15, 0.03, 0.7])
fig.suptitle("Back Level for beta_hat", fontsize=20)
fig.colorbar(io, cax=cax)
plt.savefig("../../../data/maps/block_beta_back_map.png")
# Visualizing p values for the back slice in gray
fig, axes = plt.subplots(nrows=4, ncols=4)
for i, ax in zip(range(25,58,2), axes.flat):
im = ax.imshow(mean_data[:, :, i,], cmap='gray', alpha=0.5)
io = ax.imshow(p_vols[:, :, i, 0], cmap=nice_cmap, alpha=0.5)
fig.subplots_adjust(right=0.85)
cax = fig.add_axes([0.9, 0.15, 0.03, 0.7])
fig.suptitle("Back Level for P-value", fontsize=20)
fig.colorbar(io, cax=cax)
plt.savefig("../../../data/maps/block_p_back_map.png")
# Visualizing t values for the back slice
fig, axes = plt.subplots(nrows=4, ncols=4)
for i, ax in zip(range(25,58,2), axes.flat):
im = ax.imshow(mean_data[:, :, i,], cmap='gray', alpha=0.5)
i0 = ax.imshow(t_vols[:, :, i, 0], cmap = nice_cmap, alpha=0.5)
fig.subplots_adjust(right=0.85)
cax = fig.add_axes([0.9, 0.15, 0.03, 0.7])
fig.colorbar(io, cax=cax)
plt.savefig("../../../data/maps/block_t_back_map.png")
# generate p-map
linear_modeling.p_map(1, 1, p_vols, 0.05)
plt.savefig("../../../data/maps/block_p_map_0_05.png")
linear_modeling.p_map(1, 1, p_vols, 0.05/133)
plt.savefig("../../../data/maps/block_p_map.png")
# P-value of auxilliary regression
plt.figure()
plt.plot(range(stat_table.shape[0]), stat_table)
plt.xlabel('Voxel')
plt.ylabel('P-value of auxilliary regression')
line = plt.axhline(0.01, ls='--', color = 'red')
plt.title('Hypothesis test on Heteroscedasticity')
plt.savefig("../../../data/GLS/block_p_auxi.png")
| bsd-3-clause |
ky822/scikit-learn | sklearn/decomposition/tests/test_nmf.py | 130 | 6059 | import numpy as np
from scipy import linalg
from sklearn.decomposition import nmf
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
random_state = np.random.mtrand.RandomState(0)
@raises(ValueError)
def test_initialize_nn_input():
# Test NNDSVD behaviour on negative input
nmf._initialize_nmf(-np.ones((2, 2)), 2)
def test_initialize_nn_output():
# Test that NNDSVD does not return negative values
data = np.abs(random_state.randn(10, 10))
for var in (None, 'a', 'ar'):
W, H = nmf._initialize_nmf(data, 10, random_state=0)
assert_false((W < 0).any() or (H < 0).any())
def test_initialize_close():
# Test NNDSVD error
# Test that _initialize_nmf error is less than the standard deviation of
# the entries in the matrix.
A = np.abs(random_state.randn(10, 10))
W, H = nmf._initialize_nmf(A, 10)
error = linalg.norm(np.dot(W, H) - A)
sdev = linalg.norm(A - A.mean())
assert_true(error <= sdev)
def test_initialize_variants():
# Test NNDSVD variants correctness
# Test that the variants 'a' and 'ar' differ from basic NNDSVD only where
# the basic version has zeros.
data = np.abs(random_state.randn(10, 10))
W0, H0 = nmf._initialize_nmf(data, 10, variant=None)
Wa, Ha = nmf._initialize_nmf(data, 10, variant='a')
War, Har = nmf._initialize_nmf(data, 10, variant='ar', random_state=0)
for ref, evl in ((W0, Wa), (W0, War), (H0, Ha), (H0, Har)):
assert_true(np.allclose(evl[ref != 0], ref[ref != 0]))
@raises(ValueError)
def test_projgrad_nmf_fit_nn_input():
# Test model fit behaviour on negative input
A = -np.ones((2, 2))
m = nmf.ProjectedGradientNMF(n_components=2, init=None, random_state=0)
m.fit(A)
def test_projgrad_nmf_fit_nn_output():
# Test that the decomposition does not contain negative values
A = np.c_[5 * np.ones(5) - np.arange(1, 6),
5 * np.ones(5) + np.arange(1, 6)]
for init in (None, 'nndsvd', 'nndsvda', 'nndsvdar'):
model = nmf.ProjectedGradientNMF(n_components=2, init=init,
random_state=0)
transf = model.fit_transform(A)
assert_false((model.components_ < 0).any() or
(transf < 0).any())
def test_projgrad_nmf_fit_close():
# Test that the fit is not too far away
pnmf = nmf.ProjectedGradientNMF(5, init='nndsvda', random_state=0)
X = np.abs(random_state.randn(6, 5))
assert_less(pnmf.fit(X).reconstruction_err_, 0.05)
def test_nls_nn_output():
# Test that NLS solver doesn't return negative values
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, -A), A.T, A, 0.001, 100)
assert_false((Ap < 0).any())
def test_nls_close():
# Test that the NLS results should be close
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, A), A.T, np.zeros_like(A),
0.001, 100)
assert_true((np.abs(Ap - A) < 0.01).all())
def test_projgrad_nmf_transform():
# Test that NMF.transform returns close values
# (transform uses scipy.optimize.nnls for now)
A = np.abs(random_state.randn(6, 5))
m = nmf.ProjectedGradientNMF(n_components=5, init='nndsvd', random_state=0)
transf = m.fit_transform(A)
assert_true(np.allclose(transf, m.transform(A), atol=1e-2, rtol=0))
def test_n_components_greater_n_features():
# Smoke test for the case of more components than features.
A = np.abs(random_state.randn(30, 10))
nmf.ProjectedGradientNMF(n_components=15, sparseness='data',
random_state=0).fit(A)
def test_projgrad_nmf_sparseness():
# Test sparseness
# Test that sparsity constraints actually increase sparseness in the
# part where they are applied.
A = np.abs(random_state.randn(10, 10))
m = nmf.ProjectedGradientNMF(n_components=5, random_state=0).fit(A)
data_sp = nmf.ProjectedGradientNMF(n_components=5, sparseness='data',
random_state=0).fit(A).data_sparseness_
comp_sp = nmf.ProjectedGradientNMF(n_components=5, sparseness='components',
random_state=0).fit(A).comp_sparseness_
assert_greater(data_sp, m.data_sparseness_)
assert_greater(comp_sp, m.comp_sparseness_)
def test_sparse_input():
# Test that sparse matrices are accepted as input
from scipy.sparse import csc_matrix
A = np.abs(random_state.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
T1 = nmf.ProjectedGradientNMF(n_components=5, init='random',
random_state=999).fit_transform(A)
A_sparse = csc_matrix(A)
pg_nmf = nmf.ProjectedGradientNMF(n_components=5, init='random',
random_state=999)
T2 = pg_nmf.fit_transform(A_sparse)
assert_array_almost_equal(pg_nmf.reconstruction_err_,
linalg.norm(A - np.dot(T2, pg_nmf.components_),
'fro'))
assert_array_almost_equal(T1, T2)
# same with sparseness
T2 = nmf.ProjectedGradientNMF(
n_components=5, init='random', sparseness='data',
random_state=999).fit_transform(A_sparse)
T1 = nmf.ProjectedGradientNMF(
n_components=5, init='random', sparseness='data',
random_state=999).fit_transform(A)
def test_sparse_transform():
# Test that transform works on sparse data. Issue #2124
from scipy.sparse import csc_matrix
A = np.abs(random_state.randn(5, 4))
A[A > 1.0] = 0
A = csc_matrix(A)
model = nmf.NMF(random_state=42)
A_fit_tr = model.fit_transform(A)
A_tr = model.transform(A)
# This solver seems pretty inconsistent
assert_array_almost_equal(A_fit_tr, A_tr, decimal=2)
| bsd-3-clause |
mjudsp/Tsallis | sklearn/linear_model/setup.py | 146 | 1713 | import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('linear_model', parent_package, top_path)
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
config.add_extension('cd_fast', sources=['cd_fast.c'],
libraries=cblas_libs,
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args',
[]), **blas_info)
config.add_extension('sgd_fast',
sources=['sgd_fast.c'],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
libraries=cblas_libs,
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
**blas_info)
config.add_extension('sag_fast',
sources=['sag_fast.c'],
include_dirs=numpy.get_include())
# add other directories
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
orbingol/NURBS-Python | docs/conf.py | 1 | 6225 | # -*- coding: utf-8 -*-
#
# NURBS-Python documentation build configuration file, created by
# sphinx-quickstart on Fri Mar 10 21:16:25 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import geomdl
sys.path.insert(0, os.path.abspath('../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.imgmath',
'sphinx.ext.ifconfig',
'sphinx.ext.githubpages',
'sphinx.ext.autosummary',
'sphinx.ext.graphviz',
'sphinx.ext.inheritance_diagram',
'matplotlib.sphinxext.plot_directive']
# Inheritance diagram configuration
inheritance_graph_attrs = dict(rankdir="LR", ratio='compress')
# List of modules to be mocked
# autodoc_mock_imports = ['enum', 'numpy', 'matplotlib', 'mpl_toolkits', 'plotly', 'vtk']
autodoc_mock_imports = ['vtk']
# Set MPL backend for visualization
os.environ['MPLBACKEND'] = "Agg"
# # Order of functions in the documentation; default is 'alphabetical'
# autodoc_member_order = 'bysource'
autosummary_generate = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'NURBS-Python'
copyright = u'2016-2019, Onur Rauf Bingol'
author = geomdl.__author__
description = geomdl.__description__
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = geomdl.__version__
# The full version, including alpha/beta/rc tags.
release = geomdl.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_copy_source = False
html_show_sourcelink = False
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'NURBS-Python_doc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'NURBS-Python.tex', u'NURBS-Python Documentation',
u'Onur Rauf Bingol', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'nurbs-python', u'NURBS-Python Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'NURBS-Python', u'NURBS-Python Documentation',
author, 'NURBS-Python', description,
'Miscellaneous'),
]
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
| mit |
f3r/scikit-learn | sklearn/cross_decomposition/tests/test_pls.py | 23 | 14318 | import numpy as np
from sklearn.utils.testing import (assert_array_almost_equal,
assert_array_equal, assert_true,
assert_raise_message)
from sklearn.datasets import load_linnerud
from sklearn.cross_decomposition import pls_, CCA
from nose.tools import assert_equal
def test_pls():
d = load_linnerud()
X = d.data
Y = d.target
# 1) Canonical (symmetric) PLS (PLS 2 blocks canonical mode A)
# ===========================================================
# Compare 2 algo.: nipals vs. svd
# ------------------------------
pls_bynipals = pls_.PLSCanonical(n_components=X.shape[1])
pls_bynipals.fit(X, Y)
pls_bysvd = pls_.PLSCanonical(algorithm="svd", n_components=X.shape[1])
pls_bysvd.fit(X, Y)
# check equalities of loading (up to the sign of the second column)
assert_array_almost_equal(
pls_bynipals.x_loadings_,
pls_bysvd.x_loadings_, decimal=5,
err_msg="nipals and svd implementations lead to different x loadings")
assert_array_almost_equal(
pls_bynipals.y_loadings_,
pls_bysvd.y_loadings_, decimal=5,
err_msg="nipals and svd implementations lead to different y loadings")
# Check PLS properties (with n_components=X.shape[1])
# ---------------------------------------------------
plsca = pls_.PLSCanonical(n_components=X.shape[1])
plsca.fit(X, Y)
T = plsca.x_scores_
P = plsca.x_loadings_
Wx = plsca.x_weights_
U = plsca.y_scores_
Q = plsca.y_loadings_
Wy = plsca.y_weights_
def check_ortho(M, err_msg):
K = np.dot(M.T, M)
assert_array_almost_equal(K, np.diag(np.diag(K)), err_msg=err_msg)
# Orthogonality of weights
# ~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(Wx, "x weights are not orthogonal")
check_ortho(Wy, "y weights are not orthogonal")
# Orthogonality of latent scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(T, "x scores are not orthogonal")
check_ortho(U, "y scores are not orthogonal")
# Check X = TP' and Y = UQ' (with (p == q) components)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# center scale X, Y
Xc, Yc, x_mean, y_mean, x_std, y_std =\
pls_._center_scale_xy(X.copy(), Y.copy(), scale=True)
assert_array_almost_equal(Xc, np.dot(T, P.T), err_msg="X != TP'")
assert_array_almost_equal(Yc, np.dot(U, Q.T), err_msg="Y != UQ'")
# Check that rotations on training data lead to scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Xr = plsca.transform(X)
assert_array_almost_equal(Xr, plsca.x_scores_,
err_msg="rotation on X failed")
Xr, Yr = plsca.transform(X, Y)
assert_array_almost_equal(Xr, plsca.x_scores_,
err_msg="rotation on X failed")
assert_array_almost_equal(Yr, plsca.y_scores_,
err_msg="rotation on Y failed")
# "Non regression test" on canonical PLS
# --------------------------------------
# The results were checked against the R-package plspm
pls_ca = pls_.PLSCanonical(n_components=X.shape[1])
pls_ca.fit(X, Y)
x_weights = np.array(
[[-0.61330704, 0.25616119, -0.74715187],
[-0.74697144, 0.11930791, 0.65406368],
[-0.25668686, -0.95924297, -0.11817271]])
# x_weights_sign_flip holds columns of 1 or -1, depending on sign flip
# between R and python
x_weights_sign_flip = pls_ca.x_weights_ / x_weights
x_rotations = np.array(
[[-0.61330704, 0.41591889, -0.62297525],
[-0.74697144, 0.31388326, 0.77368233],
[-0.25668686, -0.89237972, -0.24121788]])
x_rotations_sign_flip = pls_ca.x_rotations_ / x_rotations
y_weights = np.array(
[[+0.58989127, 0.7890047, 0.1717553],
[+0.77134053, -0.61351791, 0.16920272],
[-0.23887670, -0.03267062, 0.97050016]])
y_weights_sign_flip = pls_ca.y_weights_ / y_weights
y_rotations = np.array(
[[+0.58989127, 0.7168115, 0.30665872],
[+0.77134053, -0.70791757, 0.19786539],
[-0.23887670, -0.00343595, 0.94162826]])
y_rotations_sign_flip = pls_ca.y_rotations_ / y_rotations
# x_weights = X.dot(x_rotation)
# Hence R/python sign flip should be the same in x_weight and x_rotation
assert_array_almost_equal(x_rotations_sign_flip, x_weights_sign_flip)
# This test that R / python give the same result up to colunm
# sign indeterminacy
assert_array_almost_equal(np.abs(x_rotations_sign_flip), 1, 4)
assert_array_almost_equal(np.abs(x_weights_sign_flip), 1, 4)
assert_array_almost_equal(y_rotations_sign_flip, y_weights_sign_flip)
assert_array_almost_equal(np.abs(y_rotations_sign_flip), 1, 4)
assert_array_almost_equal(np.abs(y_weights_sign_flip), 1, 4)
# 2) Regression PLS (PLS2): "Non regression test"
# ===============================================
# The results were checked against the R-packages plspm, misOmics and pls
pls_2 = pls_.PLSRegression(n_components=X.shape[1])
pls_2.fit(X, Y)
x_weights = np.array(
[[-0.61330704, -0.00443647, 0.78983213],
[-0.74697144, -0.32172099, -0.58183269],
[-0.25668686, 0.94682413, -0.19399983]])
x_weights_sign_flip = pls_2.x_weights_ / x_weights
x_loadings = np.array(
[[-0.61470416, -0.24574278, 0.78983213],
[-0.65625755, -0.14396183, -0.58183269],
[-0.51733059, 1.00609417, -0.19399983]])
x_loadings_sign_flip = pls_2.x_loadings_ / x_loadings
y_weights = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
y_weights_sign_flip = pls_2.y_weights_ / y_weights
y_loadings = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
y_loadings_sign_flip = pls_2.y_loadings_ / y_loadings
# x_loadings[:, i] = Xi.dot(x_weights[:, i]) \forall i
assert_array_almost_equal(x_loadings_sign_flip, x_weights_sign_flip, 4)
assert_array_almost_equal(np.abs(x_loadings_sign_flip), 1, 4)
assert_array_almost_equal(np.abs(x_weights_sign_flip), 1, 4)
assert_array_almost_equal(y_loadings_sign_flip, y_weights_sign_flip, 4)
assert_array_almost_equal(np.abs(y_loadings_sign_flip), 1, 4)
assert_array_almost_equal(np.abs(y_weights_sign_flip), 1, 4)
# 3) Another non-regression test of Canonical PLS on random dataset
# =================================================================
# The results were checked against the R-package plspm
n = 500
p_noise = 10
q_noise = 5
# 2 latents vars:
np.random.seed(11)
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X = np.concatenate(
(X, np.random.normal(size=p_noise * n).reshape(n, p_noise)), axis=1)
Y = np.concatenate(
(Y, np.random.normal(size=q_noise * n).reshape(n, q_noise)), axis=1)
np.random.seed(None)
pls_ca = pls_.PLSCanonical(n_components=3)
pls_ca.fit(X, Y)
x_weights = np.array(
[[0.65803719, 0.19197924, 0.21769083],
[0.7009113, 0.13303969, -0.15376699],
[0.13528197, -0.68636408, 0.13856546],
[0.16854574, -0.66788088, -0.12485304],
[-0.03232333, -0.04189855, 0.40690153],
[0.1148816, -0.09643158, 0.1613305],
[0.04792138, -0.02384992, 0.17175319],
[-0.06781, -0.01666137, -0.18556747],
[-0.00266945, -0.00160224, 0.11893098],
[-0.00849528, -0.07706095, 0.1570547],
[-0.00949471, -0.02964127, 0.34657036],
[-0.03572177, 0.0945091, 0.3414855],
[0.05584937, -0.02028961, -0.57682568],
[0.05744254, -0.01482333, -0.17431274]])
x_weights_sign_flip = pls_ca.x_weights_ / x_weights
x_loadings = np.array(
[[0.65649254, 0.1847647, 0.15270699],
[0.67554234, 0.15237508, -0.09182247],
[0.19219925, -0.67750975, 0.08673128],
[0.2133631, -0.67034809, -0.08835483],
[-0.03178912, -0.06668336, 0.43395268],
[0.15684588, -0.13350241, 0.20578984],
[0.03337736, -0.03807306, 0.09871553],
[-0.06199844, 0.01559854, -0.1881785],
[0.00406146, -0.00587025, 0.16413253],
[-0.00374239, -0.05848466, 0.19140336],
[0.00139214, -0.01033161, 0.32239136],
[-0.05292828, 0.0953533, 0.31916881],
[0.04031924, -0.01961045, -0.65174036],
[0.06172484, -0.06597366, -0.1244497]])
x_loadings_sign_flip = pls_ca.x_loadings_ / x_loadings
y_weights = np.array(
[[0.66101097, 0.18672553, 0.22826092],
[0.69347861, 0.18463471, -0.23995597],
[0.14462724, -0.66504085, 0.17082434],
[0.22247955, -0.6932605, -0.09832993],
[0.07035859, 0.00714283, 0.67810124],
[0.07765351, -0.0105204, -0.44108074],
[-0.00917056, 0.04322147, 0.10062478],
[-0.01909512, 0.06182718, 0.28830475],
[0.01756709, 0.04797666, 0.32225745]])
y_weights_sign_flip = pls_ca.y_weights_ / y_weights
y_loadings = np.array(
[[0.68568625, 0.1674376, 0.0969508],
[0.68782064, 0.20375837, -0.1164448],
[0.11712173, -0.68046903, 0.12001505],
[0.17860457, -0.6798319, -0.05089681],
[0.06265739, -0.0277703, 0.74729584],
[0.0914178, 0.00403751, -0.5135078],
[-0.02196918, -0.01377169, 0.09564505],
[-0.03288952, 0.09039729, 0.31858973],
[0.04287624, 0.05254676, 0.27836841]])
y_loadings_sign_flip = pls_ca.y_loadings_ / y_loadings
assert_array_almost_equal(x_loadings_sign_flip, x_weights_sign_flip, 4)
assert_array_almost_equal(np.abs(x_weights_sign_flip), 1, 4)
assert_array_almost_equal(np.abs(x_loadings_sign_flip), 1, 4)
assert_array_almost_equal(y_loadings_sign_flip, y_weights_sign_flip, 4)
assert_array_almost_equal(np.abs(y_weights_sign_flip), 1, 4)
assert_array_almost_equal(np.abs(y_loadings_sign_flip), 1, 4)
# Orthogonality of weights
# ~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(pls_ca.x_weights_, "x weights are not orthogonal")
check_ortho(pls_ca.y_weights_, "y weights are not orthogonal")
# Orthogonality of latent scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(pls_ca.x_scores_, "x scores are not orthogonal")
check_ortho(pls_ca.y_scores_, "y scores are not orthogonal")
def test_PLSSVD():
# Let's check the PLSSVD doesn't return all possible component but just
# the specificied number
d = load_linnerud()
X = d.data
Y = d.target
n_components = 2
for clf in [pls_.PLSSVD, pls_.PLSRegression, pls_.PLSCanonical]:
pls = clf(n_components=n_components)
pls.fit(X, Y)
assert_equal(n_components, pls.y_scores_.shape[1])
def test_univariate_pls_regression():
# Ensure 1d Y is correctly interpreted
d = load_linnerud()
X = d.data
Y = d.target
clf = pls_.PLSRegression()
# Compare 1d to column vector
model1 = clf.fit(X, Y[:, 0]).coef_
model2 = clf.fit(X, Y[:, :1]).coef_
assert_array_almost_equal(model1, model2)
def test_predict_transform_copy():
# check that the "copy" keyword works
d = load_linnerud()
X = d.data
Y = d.target
clf = pls_.PLSCanonical()
X_copy = X.copy()
Y_copy = Y.copy()
clf.fit(X, Y)
# check that results are identical with copy
assert_array_almost_equal(clf.predict(X), clf.predict(X.copy(), copy=False))
assert_array_almost_equal(clf.transform(X), clf.transform(X.copy(), copy=False))
# check also if passing Y
assert_array_almost_equal(clf.transform(X, Y),
clf.transform(X.copy(), Y.copy(), copy=False))
# check that copy doesn't destroy
# we do want to check exact equality here
assert_array_equal(X_copy, X)
assert_array_equal(Y_copy, Y)
# also check that mean wasn't zero before (to make sure we didn't touch it)
assert_true(np.all(X.mean(axis=0) != 0))
def test_scale_and_stability():
# We test scale=True parameter
# This allows to check numerical stability over platforms as well
d = load_linnerud()
X1 = d.data
Y1 = d.target
# causes X[:, -1].std() to be zero
X1[:, -1] = 1.0
# From bug #2821
# Test with X2, T2 s.t. clf.x_score[:, 1] == 0, clf.y_score[:, 1] == 0
# This test robustness of algorithm when dealing with value close to 0
X2 = np.array([[0., 0., 1.],
[1., 0., 0.],
[2., 2., 2.],
[3., 5., 4.]])
Y2 = np.array([[0.1, -0.2],
[0.9, 1.1],
[6.2, 5.9],
[11.9, 12.3]])
for (X, Y) in [(X1, Y1), (X2, Y2)]:
X_std = X.std(axis=0, ddof=1)
X_std[X_std == 0] = 1
Y_std = Y.std(axis=0, ddof=1)
Y_std[Y_std == 0] = 1
X_s = (X - X.mean(axis=0)) / X_std
Y_s = (Y - Y.mean(axis=0)) / Y_std
for clf in [CCA(), pls_.PLSCanonical(), pls_.PLSRegression(),
pls_.PLSSVD()]:
clf.set_params(scale=True)
X_score, Y_score = clf.fit_transform(X, Y)
clf.set_params(scale=False)
X_s_score, Y_s_score = clf.fit_transform(X_s, Y_s)
assert_array_almost_equal(X_s_score, X_score)
assert_array_almost_equal(Y_s_score, Y_score)
# Scaling should be idempotent
clf.set_params(scale=True)
X_score, Y_score = clf.fit_transform(X_s, Y_s)
assert_array_almost_equal(X_s_score, X_score)
assert_array_almost_equal(Y_s_score, Y_score)
def test_pls_errors():
d = load_linnerud()
X = d.data
Y = d.target
for clf in [pls_.PLSCanonical(), pls_.PLSRegression(),
pls_.PLSSVD()]:
clf.n_components = 4
assert_raise_message(ValueError, "Invalid number of components", clf.fit, X, Y)
| bsd-3-clause |
prisae/empymod | empymod/scripts/fdesign.py | 1 | 42420 | r"""
:mod:`empymod.scripts.fdesign` -- Digital Linear Filter (DLF) design
====================================================================
The add-on fdesign can be used to design digital linear filters for the Hankel
or Fourier transform, or for any linear transform ([Ghos70]_). For this
included or provided theoretical transform pairs can be used. Alternatively,
one can use the EM modeller empymod to use the responses to an arbitrary 1D
model as numerical transform pair.
More information can be found in the following places:
- The article about fdesign is in the repo
https://github.com/emsig/article-fdesign
- Example notebooks to design a filter can be found in the repo
https://empymod.readthedocs.io/en/stable/examples
This filter designing tool uses the direct matrix inversion method as described
in [Kong07]_ and is based on scripts by [Key12]_. The whole project of
`fdesign` started with the Matlab scripts from Kerry Key, which he used to
design his filters for [Key09]_, [Key12]_. Fruitful discussions with Evert Slob
and Kerry Key improved the add-on substantially.
Note that the use of empymod to create numerical transform pairs is, as of now,
only implemented for the Hankel transform.
Implemented analytical transform pairs
--------------------------------------
The following tables list the transform pairs which are implemented by default.
Any other transform pair can be provided as input. A transform pair is defined
in the following way:
.. code-block:: python
from empymod.scripts import fdesign
def my_tp_pair(var):
'''My transform pair.'''
def lhs(l):
return func(l, var)
def rhs(r):
return func(r, var)
return fdesign.Ghosh(name, lhs, rhs)
Here, `name` must be one of `j0`, `j1`, `sin`, or `cos`, depending what type of
transform pair it is. Additional variables are provided with `var`. The
evaluation points of the `lhs` are denoted by `l`, and the evaluation points of
the `rhs` are denoted as `r`. As an example here the implemented transform pair
`j0_1`
.. code-block:: python
def j0_1(a=1):
'''Hankel transform pair J0_1 ([Ande75]_).'''
def lhs(l):
return l*np.exp(-a*l**2)
def rhs(r):
return np.exp(-r**2/(4*a))/(2*a)
return Ghosh('j0', lhs, rhs)
Implemented Hankel transforms
-----------------------------
- `j0_1` [Ande75]_
.. math::
:label: j01
\int^\infty_0 l \exp\left(-al^2\right) J_0(lr) dl =
\frac{\exp\left(\frac{-r^2}{4a}\right)}{2a}
- `j0_2` [Ande75]_
.. math::
:label: j02
\int^\infty_0 \exp\left(-al\right) J_0(lr) dl =
\frac{1}{\sqrt{a^2+r^2}}
- `j0_3` [GuSi97]_
.. math::
:label: j03
\int^\infty_0 l\exp\left(-al\right) J_0(lr) dl =
\frac{a}{(a^2 + r^2)^{3/2}}
- `j0_4` [ChCo82]_
.. math::
:label: j04
\int^\infty_0 \frac{l}{\beta} \exp\left(-\beta z_v \right)
J_0(lr) dl =
\frac{\exp\left(-\gamma R\right)}{R}
- `j0_5` [ChCo82]_
.. math::
:label: j05
\int^\infty_0 l \exp\left(-\beta z_v \right)
J_0(lr) dl =
\frac{ z_v (\gamma R + 1)}{R^3}\exp\left(-\gamma R\right)
- `j1_1` [Ande75]_
.. math::
:label: j11
\int^\infty_0 l^2 \exp\left(-al^2\right) J_1(lr) dl =
\frac{r}{4a^2} \exp\left(-\frac{r^2}{4a}\right)
- `j1_2` [Ande75]_
.. math::
:label: j12
\int^\infty_0 \exp\left(-al\right) J_1(lr) dl =
\frac{\sqrt{a^2+r^2}-a}{r\sqrt{a^2 + r^2}}
- `j1_3` [Ande75]_
.. math::
:label: j13
\int^\infty_0 l \exp\left(-al\right) J_1(lr) dl =
\frac{r}{(a^2 + r^2)^{3/2}}
- `j1_4` [ChCo82]_
.. math::
:label: j14
\int^\infty_0 \frac{l^2}{\beta} \exp\left(-\beta z_v \right)
J_1(lr) dl =
\frac{r(\gamma R+1)}{R^3}\exp\left(-\gamma R\right)
- `j1_5` [ChCo82]_
.. math::
:label: j15
\int^\infty_0 l^2 \exp\left(-\beta z_v \right)
J_1(lr) dl =
\frac{r z_v(\gamma^2R^2+3\gamma R+3)}{R^5}\exp\left(-\gamma R\right)
Where
.. math:: a >0, r>0
.. math:: z_v = |z_{rec} - z_{src}|
.. math:: R = \sqrt{r^2 + z_v^2}
.. math:: \gamma = \sqrt{2j\pi\mu_0f/\rho}
.. math:: \beta = \sqrt{l^2 + \gamma^2}
Implemented Fourier transforms
------------------------------
- `sin_1` [Ande75]_
.. math::
:label: sin1
\int^\infty_0 l\exp\left(-a^2l^2\right) \sin(lr) dl =
\frac{\sqrt{\pi}r}{4a^3} \exp\left(-\frac{r^2}{4a^2}\right)
- `sin_2` [Ande75]_
.. math::
:label: sin2
\int^\infty_0 \exp\left(-al\right) \sin(lr) dl =
\frac{r}{a^2 + r^2}
- `sin_3` [Ande75]_
.. math::
:label: sin3
\int^\infty_0 \frac{l}{a^2+l^2} \sin(lr) dl =
\frac{\pi}{2} \exp\left(-ar\right)
- `cos_1` [Ande75]_
.. math::
:label: cos1
\int^\infty_0 \exp\left(-a^2l^2\right) \cos(lr) dl =
\frac{\sqrt{\pi}}{2a} \exp\left(-\frac{r^2}{4a^2}\right)
- `cos_2` [Ande75]_
.. math::
:label: cos2
\int^\infty_0 \exp\left(-al\right) \cos(lr) dl =
\frac{a}{a^2 + r^2}
- `cos_3` [Ande75]_
.. math::
:label: cos3
\int^\infty_0 \frac{1}{a^2+l^2} \cos(lr) dl =
\frac{\pi}{2a} \exp\left(-ar\right)
"""
# Copyright 2016-2021 The empymod Developers.
#
# This file is part of empymod.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import os
import numpy as np
from copy import deepcopy as dc
from scipy.constants import mu_0
from scipy.optimize import brute, fmin_powell
# Optional imports
try:
import matplotlib.pyplot as plt
except ImportError:
plt = False
plt_msg = "* WARNING :: `matplotlib` is not installed, no figures shown."
from empymod.filters import DigitalFilter
from empymod.model import dipole, dipole_k
from empymod.filters import key_201_2009 as j0j1filt
from empymod.filters import key_201_CosSin_2012 as sincosfilt
from empymod.utils import printstartfinish, timedelta, default_timer
__all__ = ['design', 'save_filter', 'load_filter', 'plot_result',
'print_result', 'Ghosh', 'j0_1', 'j0_2', 'j0_3', 'j0_4', 'j0_5',
'j1_1', 'j1_2', 'j1_3', 'j1_4', 'j1_5', 'sin_1', 'sin_2', 'sin_3',
'cos_1', 'cos_2', 'cos_3', 'empy_hankel']
# 1. PRINCIPAL FILTER DESIGNING ROUTINES
def design(n, spacing, shift, fI, fC=False, r=None, r_def=(1, 1, 2), reim=None,
cvar='amp', error=0.01, name=None, full_output=False, finish=False,
save=True, path='filters', verb=2, plot=1):
r"""Digital linear filter (DLF) design
This routine can be used to design digital linear filters for the Hankel or
Fourier transform, or for any linear transform ([Ghos70]_). For this
included or provided theoretical transform pairs can be used.
Alternatively, one can use the EM modeller empymod to use the responses to
an arbitrary 1D model as numerical transform pair.
This filter designing tool uses the direct matrix inversion method as
described in [Kong07]_ and is based on scripts by [Key12]_. The tool is an
add-on to the electromagnetic modeller empymod [Wert17]_. Fruitful
discussions with Evert Slob and Kerry Key improved the add-on
substantially.
Example notebooks of its usage can be found in the documentation-gallery,
https://empymod.readthedocs.io/en/stable/examples
Parameters
----------
n : int
Filter length.
spacing: float or tuple (start, stop, num)
Spacing between filter points. If tuple, it corresponds to the input
for np.linspace with endpoint=True.
shift: float or tuple (start, stop, num)
Shift of base from zero. If tuple, it corresponds to the input for
np.linspace with endpoint=True.
fI, fC : transform pairs
Theoretical or numerical transform pair(s) for the inversion (I) and
for the check of goodness (fC). fC is optional. If not provided, fI is
used for both fI and fC.
r : array, optional
Right-hand side evaluation points for the check of goodness (fC).
Defaults to r = np.logspace(0, 5, 1000), which are a lot of evaluation
points, and depending on the transform pair way too long r's.
r_def : tuple (add_left, add_right, factor), optional
Definition of the right-hand side evaluation points r of the inversion.
r is derived from the base values, default is (1, 1, 2).
- rmin = log10(1/max(base)) - add_left
- rmax = log10(1/min(base)) + add_right
- r = logspace(rmin, rmax, factor*n)
reim : np.real or np.imag, optional
Which part of complex transform pairs is used for the inversion.
Defaults to np.real.
cvar : string {'amp', 'r'}, optional
If 'amp', the inversion minimizes the amplitude. If 'r', the inversion
maximizes the right-hand side evaluation point r. Defaults is 'amp'.
error : float, optional
Up to which relative error the transformation is considered good in the
evaluation of the goodness. Default is 0.01 (1 %).
name : str, optional
Name of the filter. Defaults to dlf_+str(n).
full_output : bool, optional
If True, returns best filter and output from scipy.optimize.brute; else
only filter. Default is False.
finish : None, True, or callable, optional
If callable, it is passed through to scipy.optimize.brute: minimization
function to find minimize best result from brute-force approach.
Default is None. You can simply provide True in order to use
scipy.optimize.fmin_powell(). Set this to None if you are only
interested in the actually provided spacing/shift-values.
save : bool, optional
If True, best filter is saved to plain text files in ./filters/. Can be
loaded with fdesign.load_filter(name). If full, the inversion output is
stored too. You can add '.gz' to `name`, which will then save the full
inversion output in a compressed file instead of plain text.
path : string, optional
Absolute or relative path where output will be saved if `save=True`.
Default is 'filters'.
verb : {0, 1, 2}, optional
Level of verbosity, default is 2:
- 0: Print nothing.
- 1: Print warnings.
- 2: Print additional time, progress, and result
plot : {0, 1, 2, 3}, optional
Level of plot-verbosity, default is 1:
- 0: Plot nothing.
- 1: Plot brute-force result
- 2: Plot additional theoretical transform pairs, and best inv.
- 3: Plot additional inversion result
(can result in lots of plots depending on spacing and shift)
If you are using a notebook, use %matplotlib notebook to have
all inversion results appear in the same plot.
Returns
-------
filter : empymod.filter.DigitalFilter instance
Best filter for the input parameters.
full : tuple
Output from scipy.optimize.brute with full_output=True. (Returned when
`full_output` is True.)
"""
# === 1. LET'S START ============
t0 = printstartfinish(verb)
# Check plot with matplotlib (soft dependency)
if plot > 0 and not plt:
plot = 0
if verb > 0:
print(plt_msg)
# Ensure fI, fC are lists
def check_f(f):
if hasattr(f, 'name'): # put into list if single tp
f = [f, ]
else: # ensure list (works for lists, tuples, arrays)
f = list(f)
return f
if not fC: # copy fI if fC not provided
fC = dc(fI)
fI = check_f(fI)
if fI[0].name == 'j2':
raise ValueError("j2 (jointly j0 and j1) is only implemented for "
"fC, not for fI!")
fC = check_f(fC)
# Check default input values
if finish and not callable(finish):
finish = fmin_powell
if name is None:
name = 'dlf_'+str(n)
if r is None:
r = np.logspace(0, 5, 1000)
if reim not in [np.real, np.imag]:
reim = np.real
# Get spacing and shift slices, cast r
ispacing = _ls2ar(spacing, 'spacing')
ishift = _ls2ar(shift, 'shift')
r = np.atleast_1d(r)
# Initialize log-dict to keep track in brute-force minimization-function.
log = {'cnt1': -1, # Counter
'cnt2': -1, # %-counter; v Total number of iterations v
'totnr': np.arange(*ispacing).size*np.arange(*ishift).size,
'time': t0, # Timer
'warn-r': 0} # Warning for short r
# === 2. THEORETICAL MODEL rhs ============
# Calculate rhs
for i, f in enumerate(fC):
fC[i].rhs = f.rhs(r)
# Plot
if plot > 1:
_call_qc_transform_pairs(n, ispacing, ishift, fI, fC, r, r_def, reim)
# === 3. RUN BRUTE FORCE OVER THE GRID ============
full = brute(_get_min_val, (ispacing, ishift), full_output=True,
args=(n, fI, fC, r, r_def, error, reim, cvar, verb, plot,
log), finish=finish)
# Add cvar-information to full: 0 for 'amp', 1 for 'r'
if cvar == 'r':
full += (1, )
else:
full += (0, )
# Finish output from brute/fmin; depending if finish or not
if verb > 1:
print("")
if callable(finish):
print("")
# Get best filter (full[0] contains spacing/shift of the best result).
dlf = _calculate_filter(n, full[0][0], full[0][1], fI, r_def, reim, name)
# If verbose, print result
if verb > 1:
print_result(dlf, full)
# === 4. FINISHED ============
printstartfinish(verb, t0)
# If plot, show result
if plot > 0:
print("* QC: Overview of brute-force inversion:")
plot_result(dlf, full, False)
if plot > 1:
print("* QC: Inversion result of best filter (minimum amplitude):")
_get_min_val(full[0], n, fI, fC, r, r_def, error, reim, cvar, 0,
plot+1, log)
# Save if desired
if save:
if full_output:
save_filter(name, dlf, full, path=path)
else:
save_filter(name, dlf, path=path)
# Output, depending on full_output
if full_output:
return dlf, full
else:
return dlf
def save_filter(name, filt, full=None, path='filters'):
r"""Save DLF-filter and inversion output to plain text files."""
# First we'll save the filter using its internal routine.
# This will create the directory ./filters if it doesn't exist already.
filt.tofile(path)
# If full, we store the inversion output
if full:
# Get file name
path = os.path.abspath(path)
if len(name.split('.')) == 2:
suffix = '.gz'
else:
suffix = ''
fullfile = os.path.join(path, name.split('.')[0]+'_full.txt' + suffix)
# Get number of spacing and shift values
nspace, nshift = full[3].shape
# Create header
header = 'Full inversion output from empymod.fdesign.design\n'
header += 'Line 11: Nr of spacing values\n'
header += 'Line 12: Nr of shift values\n'
header += 'Line 13: Best spacing value\n'
header += 'Line 14: Best shift value\n'
header += 'Line 15: Min amplitude or max offset\n'
header += f'Lines 16-{nspace+15}: Spacing matrix '
header += f'({nspace} x {nshift})\n'
header += f'Lines {nspace+16}-{2*nspace+15}: Spacing matrix '
header += f'({nspace} x {nshift})\n'
header += f'Lines {2*nspace+16}-{3*nspace+15}: Spacing '
header += f'matrix ({nspace} x {nshift})\n'
header += f'Line {3*nspace+16}: Integer: 0: min amp, 1: max r'
# Create arrays; put single values in arrays of nshift values
nr_spacing = np.r_[nspace, np.zeros(nshift-1)]
nr_shift = np.r_[nshift, np.zeros(nshift-1)]
best_spacing = np.r_[full[0][0], np.zeros(nshift-1)]
best_shift = np.r_[full[0][1], np.zeros(nshift-1)]
min_value = np.r_[np.atleast_1d(full[1]), np.zeros(nshift-1)]
min_max = np.r_[full[4], np.zeros(nshift-1)]
# Collect all in one array
fullsave = np.vstack((nr_spacing, nr_shift, best_spacing, best_shift,
min_value, full[2][0], full[2][1], full[3],
min_max))
# Save array
np.savetxt(fullfile, fullsave, header=header)
def load_filter(name, full=False, path='filters', filter_coeff=None):
r"""Load saved DLF-filter and inversion output from text files."""
# First we'll get the filter using its internal routine.
if filter_coeff is None:
filt = DigitalFilter(name.split('.')[0])
else:
filt = DigitalFilter(name.split('.')[0], filter_coeff=filter_coeff)
filt.fromfile(path)
# If full, we get the inversion output
if full:
# Try to get the inversion result. If files are not found, most likely
# because they were not stored, we only return the filter
try:
# Get file name
path = os.path.abspath(path)
if len(name.split('.')) == 2:
suffix = '.gz'
else:
suffix = ''
fullfile = os.path.join(path, name.split('.')[0] +
'_full.txt' + suffix)
# Read data
out = np.loadtxt(fullfile)
except IOError:
return filt
# Collect inversion-result tuple
nspace = int(out[0][0])
nshift = int(out[1][0])
space_shift_matrix = np.zeros((2, nspace, nshift))
space_shift_matrix[0, :, :] = out[5:nspace+5, :]
space_shift_matrix[1, :, :] = out[nspace+5:2*nspace+5, :]
out = (np.array([out[2][0], out[3][0]]), out[4][0], space_shift_matrix,
out[2*nspace+5:3*nspace+5, :], int(out[3*nspace+5, 0]))
return filt, out
else:
return filt
# 2 PLOTTING ROUTINES (for QC or direct use)
# # 2.a Public plotting routines for QC or direct use
def plot_result(filt, full, prntres=True):
r"""QC the inversion result.
Parameters
----------
- filt, full as returned from fdesign.design with full_output=True
- If prntres is True, it calls fdesign.print_result as well.
"""
# Check matplotlib (soft dependency)
if not plt:
print(plt_msg)
return
if prntres:
print_result(filt, full)
# Get spacing and shift values from full output of brute
spacing = full[2][0, :, 0]
shift = full[2][1, 0, :]
# Get minimum field values from full output of brute
minfield = np.squeeze(full[3])
plt.figure("Brute force result", figsize=(9.5, 4.5))
plt.subplots_adjust(wspace=.4, bottom=0.2)
# Figure 1: Only if more than 1 spacing or more than 1 shift
# Figure of minfield, depending if spacing/shift are vectors or floats
if spacing.size > 1 or shift.size > 1:
plt.subplot(121)
if full[4] == 0: # Min amp
plt.title("Minimal recovered fields")
ylabel = 'Minimal recovered amplitude (log10)'
field = np.log10(minfield)
cmap = plt.cm.viridis
else: # Max r
plt.title("Maximum recovered r")
ylabel = 'Maximum recovered r'
field = 1/minfield
cmap = plt.cm.viridis_r
if shift.size == 1: # (a) if only one shift value,
plt.plot(spacing, field)
plt.xlabel('Spacing')
plt.ylabel(ylabel)
elif spacing.size == 1: # (b) if only one spacing value
plt.plot(shift, field)
plt.xlabel('Shift')
plt.ylabel(ylabel)
else: # (c) if several spacing and several shift values
field = np.ma.masked_where(np.isinf(minfield), field)
plt.pcolormesh(shift, spacing, field, cmap=cmap, shading='nearest')
plt.xlim([shift[0]-np.diff(shift[:2])/2,
shift[-1]+np.diff(shift[-2:])/2])
plt.ylim([spacing[0]-np.diff(spacing[:2])/2,
spacing[-1]+np.diff(spacing[-2:])/2])
plt.ylabel('Spacing')
plt.xlabel('Shift')
plt.colorbar()
# Figure 2: Filter values
if spacing.size > 1 or shift.size > 1:
plt.subplot(122)
plt.title('Filter values of best filter')
# Check for old filters without `filt.filter_coeff`.
if hasattr(filt, 'filter_coeff'):
filter_coeff = filt.filter_coeff
else:
filter_coeff = ['j0', 'j1', 'sin', 'cos']
# Loop over filters.
for attr in filter_coeff:
if hasattr(filt, attr):
plt.plot(np.log10(filt.base),
np.log10(np.abs(getattr(filt, attr))), '.-', lw=.5,
label='abs('+attr+')')
plt.plot(np.log10(filt.base), np.log10(-getattr(filt, attr)), '.',
color='k', ms=4)
plt.plot(np.inf, 0, '.', color='k', ms=4, label='Neg. values')
plt.xlabel('Base (log10)')
plt.ylabel('Abs(Amplitude) (log10)')
plt.legend(loc='best')
plt.gcf().canvas.draw() # To force draw in notebook while running
plt.show()
def print_result(filt, full=None):
r"""Print best filter information.
Parameters
----------
- filt, full as returned from fdesign.design with full_output=True
"""
print(f" Filter length : {filt.base.size}")
print(" Best filter")
if full: # If full provided, we have more information
if full[4] == 0: # Min amp
print(f" > Min field : {full[1]:g}")
else: # Max amp
r = 1/full[1]
print(f" > Max r : {r:g}")
spacing = full[0][0]
shift = full[0][1]
else: # Print what we can without full
n = filt.base.size
a = filt.base[-1]
b = filt.base[-2]
spacing = np.log(a)-np.log(b)
shift = np.log(a)-spacing*(n//2)
print(f" > Spacing : {spacing:1.10g}")
print(f" > Shift : {shift:1.10g}")
print(f" > Base min/max : {filt.base.min():e} / {filt.base.max():e}")
# # 2.b Private plotting routines for QC
def _call_qc_transform_pairs(n, ispacing, ishift, fI, fC, r, r_def, reim):
r"""QC the input transform pairs."""
print("* QC: Input transform-pairs:")
print(" fC: x-range defined through `n`, `spacing`, `shift`, and "
"`r`-parameters; b-range defined through `r`-parameter.")
print(" fI: x- and b-range defined through `n`, `spacing`, "
"`shift`, and `r_def`-parameters.")
# Calculate min/max k, from minimum and maximum spacing/shift
minspace = np.arange(*ispacing).min()
maxspace = np.arange(*ispacing).max()
minshift = np.arange(*ishift).min()
maxshift = np.arange(*ishift).max()
maxbase = np.exp(maxspace*(n//2) + maxshift)
minbase = np.exp(maxspace*(-n//2+1) + minshift)
# For fC-r (k defined with same amount of points as r)
kmax = maxbase/r.min()
kmin = minbase/r.max()
k = np.logspace(np.log10(kmin), np.log10(kmax) + minspace, r.size)
# For fI-r
rI = np.logspace(np.log10(1/maxbase) - r_def[0],
np.log10(1/minbase) + r_def[1], r_def[2]*n)
kmaxI = maxbase/rI.min()
kminI = minbase/rI.max()
kI = np.logspace(np.log10(kminI), np.log10(kmaxI) + minspace,
r_def[2]*n)
# Plot QC
fig, axs = plt.subplots(figsize=(9.5, 6), nrows=2, ncols=2,
num="Transform pairs")
axs = axs.ravel()
plt.subplots_adjust(wspace=.3, hspace=.4)
_plot_transform_pairs(fC, r, k, axs[:2], 'fC')
if reim == np.real:
tit = 'RE(fI)'
else:
tit = 'IM(fI)'
_plot_transform_pairs(fI, rI, kI, axs[2:], tit)
fig.canvas.draw() # To force draw in notebook while running
plt.show()
def _plot_transform_pairs(fCI, r, k, axes, tit):
r"""Plot the input transform pairs."""
# Plot lhs
plt.sca(axes[0])
plt.title('|' + tit + ' lhs|')
for f in fCI:
if f.name == 'j2':
lhs = f.lhs(k)
plt.loglog(k, np.abs(lhs[0]), lw=2, label='j0')
plt.loglog(k, np.abs(lhs[1]), lw=2, label='j1')
else:
plt.loglog(k, np.abs(f.lhs(k)), lw=2, label=f.name)
if tit != 'fC':
plt.xlabel('l')
plt.legend(loc='best')
# Plot rhs
plt.sca(axes[1])
plt.title('|' + tit + ' rhs|')
# Transform pair rhs
for f in fCI:
if tit == 'fC':
plt.loglog(r, np.abs(f.rhs), lw=2, label=f.name)
else:
plt.loglog(r, np.abs(f.rhs(r)), lw=2, label=f.name)
# Transform with Key in the case of Hankel or Fourier transform.
for f in fCI:
if f.name in ['j0', 'j1', 'j2', 'cos', 'sin']:
if f.name[1] in ['0', '1', '2'] and f.name[0] == 'j':
filt = j0j1filt()
else:
filt = sincosfilt()
kk = filt.base/r[:, None]
if f.name == 'j2':
lhs = f.lhs(kk)
kr0 = np.dot(lhs[0], getattr(filt, 'j0'))/r
kr1 = np.dot(lhs[1], getattr(filt, 'j1'))/r**2
kr = kr0+kr1
else:
kr = np.dot(f.lhs(kk), getattr(filt, f.name))/r
plt.loglog(r, np.abs(kr), '-.', lw=2, label=filt.name)
if tit != 'fC':
plt.xlabel('r')
plt.legend(loc='best')
def _plot_inversion(f, rhs, r, k, imin, spacing, shift, cvar):
r"""QC the resulting filter."""
# Check matplotlib (soft dependency)
if not plt:
print(plt_msg)
return
plt.figure("Inversion result "+f.name, figsize=(9.5, 4))
plt.subplots_adjust(wspace=.3, bottom=0.2)
plt.clf()
tk = np.logspace(np.log10(k.min()), np.log10(k.max()), r.size)
plt.suptitle(f.name+'; Spacing ::'+str(spacing)+'; Shift ::'+str(shift))
# Plot lhs
plt.subplot(121)
plt.title('|lhs|')
if f.name == 'j2':
lhs = f.lhs(tk)
plt.loglog(tk, np.abs(lhs[0]), lw=2, label='Theoretical J0')
plt.loglog(tk, np.abs(lhs[1]), lw=2, label='Theoretical J1')
else:
plt.loglog(tk, np.abs(f.lhs(tk)), lw=2, label='Theoretical')
plt.xlabel('l')
plt.legend(loc='best')
# Plot rhs
plt.subplot(122)
plt.title('|rhs|')
# Transform pair rhs
plt.loglog(r, np.abs(f.rhs), lw=2, label='Theoretical')
# Transform with filter
plt.loglog(r, np.abs(rhs), '-.', lw=2, label='This filter')
# Plot minimum amplitude or max r, respectively
if cvar == 'amp':
label = 'Min. Amp'
else:
label = 'Max. r'
plt.loglog(r[imin], np.abs(rhs[imin]), 'go', label=label)
plt.xlabel('r')
plt.legend(loc='best')
plt.gcf().canvas.draw() # To force draw in notebook while running
plt.show()
# 3. ANALYTICAL TRANSFORM PAIRS
class Ghosh:
r"""Simple Class for Theoretical Transform Pairs.
Named after D. P. Ghosh, honouring his 1970 Ph.D. thesis with which he
introduced the digital filter method to geophysics ([Ghos70]_).
"""
def __init__(self, name, lhs, rhs):
r"""Add the filter name, lhs, and rhs."""
self.name = name
self.lhs = lhs
self.rhs = rhs
# # 3.a Hankel J0 transform pairs
def j0_1(a=1):
r"""Hankel transform pair J0_1 ([Ande75]_)."""
def lhs(x):
return x*np.exp(-a*x**2)
def rhs(b):
return np.exp(-b**2/(4*a))/(2*a)
return Ghosh('j0', lhs, rhs)
def j0_2(a=1):
r"""Hankel transform pair J0_2 ([Ande75]_)."""
def lhs(x):
return np.exp(-a*x)
def rhs(b):
return 1/np.sqrt(b**2 + a**2)
return Ghosh('j0', lhs, rhs)
def j0_3(a=1):
r"""Hankel transform pair J0_3 ([GuSi97]_)."""
def lhs(x):
return x*np.exp(-a*x)
def rhs(b):
return a/(b**2 + a**2)**1.5
return Ghosh('j0', lhs, rhs)
def j0_4(f=1, rho=0.3, z=50):
r"""Hankel transform pair J0_4 ([ChCo82]_).
Parameters
----------
f : float
Frequency (Hz)
rho : float
Resistivity (Ohm.m)
z : float
Vertical distance between source and receiver (m)
"""
gam = np.sqrt(2j*np.pi*mu_0*f/rho)
def lhs(x):
beta = np.sqrt(x**2 + gam**2)
return x*np.exp(-beta*np.abs(z))/beta
def rhs(b):
R = np.sqrt(b**2 + z**2)
return np.exp(-gam*R)/R
return Ghosh('j0', lhs, rhs)
def j0_5(f=1, rho=0.3, z=50):
r"""Hankel transform pair J0_5 ([ChCo82]_).
Parameters
----------
f : float
Frequency (Hz)
rho : float
Resistivity (Ohm.m)
z : float
Vertical distance between source and receiver (m)
"""
gam = np.sqrt(2j*np.pi*mu_0*f/rho)
def lhs(x):
beta = np.sqrt(x**2 + gam**2)
return x*np.exp(-beta*np.abs(z))
def rhs(b):
R = np.sqrt(b**2 + z**2)
return np.abs(z)*(gam*R + 1)*np.exp(-gam*R)/R**3
return Ghosh('j0', lhs, rhs)
# # 3.b Hankel J1 transform pairs
def j1_1(a=1):
r"""Hankel transform pair J1_1 ([Ande75]_)."""
def lhs(x):
return x**2*np.exp(-a*x**2)
def rhs(b):
return b/(4*a**2)*np.exp(-b**2/(4*a))
return Ghosh('j1', lhs, rhs)
def j1_2(a=1):
r"""Hankel transform pair J1_2 ([Ande75]_)."""
def lhs(x):
return np.exp(-a*x)
def rhs(b):
return (np.sqrt(b**2 + a**2) - a)/(b*np.sqrt(b**2 + a**2))
return Ghosh('j1', lhs, rhs)
def j1_3(a=1):
r"""Hankel transform pair J1_3 ([Ande75]_)."""
def lhs(x):
return x*np.exp(-a*x)
def rhs(b):
return b/(b**2 + a**2)**1.5
return Ghosh('j1', lhs, rhs)
def j1_4(f=1, rho=0.3, z=50):
r"""Hankel transform pair J1_4 ([ChCo82]_).
Parameters
----------
f : float
Frequency (Hz)
rho : float
Resistivity (Ohm.m)
z : float
Vertical distance between source and receiver (m)
"""
gam = np.sqrt(2j*np.pi*mu_0*f/rho)
def lhs(x):
beta = np.sqrt(x**2 + gam**2)
return x**2*np.exp(-beta*np.abs(z))/beta
def rhs(b):
R = np.sqrt(b**2 + z**2)
return b*(gam*R + 1)*np.exp(-gam*R)/R**3
return Ghosh('j1', lhs, rhs)
def j1_5(f=1, rho=0.3, z=50):
r"""Hankel transform pair J1_5 ([ChCo82]_).
Parameters
----------
f : float
Frequency (Hz)
rho : float
Resistivity (Ohm.m)
z : float
Vertical distance between source and receiver (m)
"""
gam = np.sqrt(2j*np.pi*mu_0*f/rho)
def lhs(x):
beta = np.sqrt(x**2 + gam**2)
return x**2*np.exp(-beta*np.abs(z))
def rhs(b):
R = np.sqrt(b**2 + z**2)
return np.abs(z)*b*(gam**2*R**2 + 3*gam*R + 3)*np.exp(-gam*R)/R**5
return Ghosh('j1', lhs, rhs)
# # 3.c Fourier sine transform pairs
def sin_1(a=1, inverse=False):
r"""Fourier sine transform pair sin_1 ([Ande75]_)."""
def lhs(x):
return x*np.exp(-a**2*x**2)
def rhs(b):
return np.sqrt(np.pi)*b*np.exp(-b**2/(4*a**2))/(4*a**3)
if inverse:
return Ghosh('sin', rhs, lhs)
else:
return Ghosh('sin', lhs, rhs)
def sin_2(a=1, inverse=False):
r"""Fourier sine transform pair sin_2 ([Ande75]_)."""
def lhs(x):
return np.exp(-a*x)
def rhs(b):
return b/(b**2 + a**2)
if inverse:
return Ghosh('sin', rhs, lhs)
else:
return Ghosh('sin', lhs, rhs)
def sin_3(a=1, inverse=False):
r"""Fourier sine transform pair sin_3 ([Ande75]_)."""
def lhs(x):
return x/(a**2 + x**2)
def rhs(b):
return np.pi*np.exp(-a*b)/2
if inverse:
return Ghosh('sin', rhs, lhs)
else:
return Ghosh('sin', lhs, rhs)
# # 3.d Fourier cosine transform pairs
def cos_1(a=1, inverse=False):
r"""Fourier cosine transform pair cos_1 ([Ande75]_)."""
def lhs(x):
return np.exp(-a**2*x**2)
def rhs(b):
return np.sqrt(np.pi)*np.exp(-b**2/(4*a**2))/(2*a)
if inverse:
return Ghosh('cos', rhs, lhs)
else:
return Ghosh('cos', lhs, rhs)
def cos_2(a=1, inverse=False):
r"""Fourier cosine transform pair cos_2 ([Ande75]_)."""
def lhs(x):
return np.exp(-a*x)
def rhs(b):
return a/(b**2 + a**2)
if inverse:
return Ghosh('cos', rhs, lhs)
else:
return Ghosh('cos', lhs, rhs)
def cos_3(a=1, inverse=False):
r"""Fourier cosine transform pair cos_3 ([Ande75]_)."""
def lhs(x):
return 1/(a**2 + x**2)
def rhs(b):
return np.pi*np.exp(-a*b)/(2*a)
if inverse:
return Ghosh('cos', rhs, lhs)
else:
return Ghosh('cos', lhs, rhs)
# # 3.e Modeller
def empy_hankel(ftype, zsrc, zrec, res, freqtime, depth=None, aniso=None,
epermH=None, epermV=None, mpermH=None, mpermV=None,
htarg=None, verblhs=0, verbrhs=0):
r"""Numerical transform pair with empymod.
All parameters except `ftype`, `verblhs`, and `verbrhs` correspond to the
input parameters to `empymod.dipole`. See there for more information.
Note that if depth=None or [], the analytical full-space solutions will be
used (much faster).
Parameters
----------
ftype : str or list of strings
Either of: {'j0', 'j1', 'j2', ['j0', 'j1']}
- `'j0'`: Analyze J0-term with ab=11, angle=45°
- `'j1'`: Analyze J1-term with ab=31, angle=0°
- `'j2'`: Analyze J0- and J1-terms jointly with ab=12, angle=45°
- `['j0', 'j1']` : Same as calling empy_hankel twice, once with 'j0'
and one with 'j1'; can be provided like this to fdesign.design.
Note that ftype='j2' only works for fC, not for fI.
verblhs, verbrhs: int
verb-values provided to empymod for lhs and rhs.
"""
if htarg is None:
htarg = {}
# Loop over ftypes, if there are several
if isinstance(ftype, list):
out = []
for f in ftype:
out.append(empy_hankel(f, zsrc, zrec, res, freqtime, depth, aniso,
epermH, epermV, mpermH, mpermV, htarg,
verblhs, verbrhs))
return out
# Collect model
model = {'src': [0, 0, zsrc],
'depth': depth,
'res': res,
'aniso': aniso,
'epermH': epermH,
'epermV': epermV,
'mpermH': mpermH,
'mpermV': mpermV}
# Finalize model depending on ftype
if ftype == 'j0': # J0: 11, 45°
model['ab'] = 11
x = 1/np.sqrt(2)
y = 1/np.sqrt(2)
elif ftype == 'j1': # J1: 31, 0°
model['ab'] = 31
x = 1
y = 0
elif ftype == 'j2': # J2: 12, 45°
model['ab'] = 12
x = 1/np.sqrt(2)
y = 1/np.sqrt(2)
# rhs: empymod.model.dipole
# If depth=[], the analytical full-space solution will be used internally
def rhs(r):
out = dipole(rec=[r*x, r*y, zrec], ht='qwe', xdirect=True,
verb=verbrhs, htarg=htarg, freqtime=freqtime, **model)
return out
# lhs: empymod.model.dipole_k
def lhs(k):
lhs0, lhs1 = dipole_k(rec=[x, y, zrec], wavenumber=k, verb=verblhs,
freq=freqtime, **model)
if ftype == 'j0':
return lhs0
elif ftype == 'j1':
return lhs1
elif ftype == 'j2':
return (lhs0, lhs1)
return Ghosh(ftype, lhs, rhs)
# 4. NON-USER-FACING ROUTINES
def _get_min_val(spaceshift, *params):
r"""Calculate minimum resolved amplitude or maximum r."""
# Get parameters from tuples
spacing, shift = spaceshift
n, fI, fC, r, r_def, error, reim, cvar, verb, plot, log = params
# Get filter for these parameters
dlf = _calculate_filter(n, spacing, shift, fI, r_def, reim, 'filt')
# Calculate rhs-response with this filter
k = dlf.base/r[:, None]
# Loop over transforms
for i, f in enumerate(fC):
# Calculate lhs and rhs; rhs depends on ftype
lhs = f.lhs(k)
if f.name == 'j2':
rhs0 = np.dot(lhs[0], getattr(dlf, 'j0'))/r
rhs1 = np.dot(lhs[1], getattr(dlf, 'j1'))/r**2
rhs = rhs0 + rhs1
else:
rhs = np.dot(lhs, getattr(dlf, f.name))/r
# Get relative error
rel_error = np.abs((rhs - f.rhs)/f.rhs)
# Get indices where relative error is bigger than error
imin0 = np.where(rel_error > error)[0]
# Find first occurrence of failure
if np.all(rhs == 0) or np.all(np.isnan(rhs)):
# if all rhs are zeros or nans, the filter is useless
imin0 = 0
elif imin0.size == 0:
# if imin0.size == 0: # empty array, all rel_error < error.
imin0 = rhs.size-1 # set to last r
if verb > 0 and log['warn-r'] == 0:
print(f"* WARNING :: all data have error < {error}; "
"choose larger r or set error-level higher.")
log['warn-r'] = 1 # Only do this once
else:
# Kind of a dirty hack: Permit to jump up to four bad values,
# resulting for instance from high rel_error from zero crossings
# of the transform pair. Should be made an input argument or
# generally improved.
if imin0.size > 4:
imin0 = np.max([0, imin0[4]-5])
else: # just take the first one (no jumping allowed; normal case)
imin0 = np.max([0, imin0[0]-1])
# Note that both version yield the same result if the failure is
# consistent.
# Depending on cvar, store minimum amplitude or 1/maxr
if cvar == 'amp':
min_val0 = np.abs(rhs[imin0])
else:
min_val0 = 1/r[imin0]
# Check if this inversion is better than previous ones
if i == 0: # First run, store these values
imin = dc(imin0)
min_val = dc(min_val0)
else: # Replace imin, min_val if this one is better
if min_val0 > min_val:
min_val = dc(min_val0)
imin = dc(imin0)
# QC plot
if plot > 2:
_plot_inversion(f, rhs, r, k, imin0, spacing, shift, cvar)
# If verbose, print progress
if verb > 1:
log = _print_count(log)
# If there is no point with rel_error < error (imin=0) it returns np.inf.
return np.where(imin == 0, np.inf, min_val)
def _calculate_filter(n, spacing, shift, fI, r_def, reim, name):
r"""Calculate filter for this spacing, shift, n."""
# Base :: For this n/spacing/shift
base = np.exp(spacing*(np.arange(n)-n//2) + shift)
# r :: Start/end is defined by base AND r_def[0]/r_def[1]
# Overdetermined system if r_def[2] > 1
r = np.logspace(np.log10(1/np.max(base)) - r_def[0],
np.log10(1/np.min(base)) + r_def[1], int(r_def[2]*n))
# k :: Get required k-values (matrix of shape (r.size, base.size))
k = base/r[:, None]
# Create filter instance
dlf = DigitalFilter(name.split('.')[0])
dlf.base = base
dlf.factor = np.around(np.average(base[1:]/base[:-1]), 15)
dlf.filter_coeff = []
# Loop over transforms
for f in fI:
# Add current filter name.
dlf.filter_coeff.append(f.name)
# Calculate lhs and rhs for inversion
lhs = reim(f.lhs(k))
rhs = reim(f.rhs(r)*r)
# Calculate filter values: Solve lhs*J=rhs using linalg.qr.
# If factoring fails (qr) or if matrix is singular or square (solve) it
# will raise a LinAlgError. Error is ignored and zeros are returned
# instead.
try:
qq, rr = np.linalg.qr(lhs)
J = np.linalg.solve(rr, rhs.dot(qq))
except np.linalg.LinAlgError:
J = np.zeros((base.size,))
setattr(dlf, f.name, J)
return dlf
def _ls2ar(inp, strinp):
r"""Convert float or linspace-input to arange/slice-input for brute."""
# Check if input is 1 or 3 elements (float, list, tuple, array)
if np.size(inp) == 1:
start = np.squeeze(inp)
stop = start+1
num = 1
elif np.size(inp) == 3:
start = inp[0]
stop = inp[1]
num = inp[2]
else:
raise ValueError(f"<{strinp}> must be a float or a tuple of 3 elements"
f" (start, stop, num); <{strinp} provided: {inp}")
# Re-arrange it to be compatible with np.arange/slice for brute
if num < 2 or start == stop:
stop = start
step = 1
else:
step = (stop-start)/(num-1)
return (start, stop+step/2, step)
def _print_count(log):
r"""Print run-count information."""
log['cnt2'] += 1 # Current number
cp = log['cnt2']/log['totnr']*100 # Percentage
if log['cnt2'] == 0: # Not sure about this; brute seems to call the
pass # function with the first arguments twice...
elif log['cnt2'] > log['totnr']: # fmin-status
print(f" fmin fct calls : {log['cnt2']-log['totnr']}", end='\r')
elif int(cp) > log['cnt1'] or cp < 1 or log['cnt2'] == log['totnr']:
# Get seconds since start
sec = int(default_timer() - log['time'])
# Get estimate of remaining time, as string
tleft = str(timedelta(seconds=int(100*sec/cp - sec)))
# Print progress
pstr = f" brute fct calls : {log['cnt2']}/{log['totnr']}"
if log['totnr'] > 100:
pstr += f" ({int(cp)} %); est: {tleft} "
print(pstr, end='\r')
if log['cnt2'] == log['totnr']:
# Empty previous line
print(" "*len(pstr), end='\r')
# Print final brute-message
print(f" brute fct calls : {log['totnr']}")
# Update percentage cnt1
log['cnt1'] = cp
return log
| apache-2.0 |
fabioticconi/scikit-learn | sklearn/utils/random.py | 37 | 10511 | # Author: Hamzeh Alsalhi <ha258@cornell.edu>
#
# License: BSD 3 clause
from __future__ import division
import numpy as np
import scipy.sparse as sp
import operator
import array
from sklearn.utils import check_random_state
from sklearn.utils.fixes import astype
from ._random import sample_without_replacement
__all__ = ['sample_without_replacement', 'choice']
# This is a backport of np.random.choice from numpy 1.7
# The function can be removed when we bump the requirements to >=1.7
def choice(a, size=None, replace=True, p=None, random_state=None):
"""
choice(a, size=None, replace=True, p=None)
Generates a random sample from a given 1-D array
.. versionadded:: 1.7.0
Parameters
-----------
a : 1-D array-like or int
If an ndarray, a random sample is generated from its elements.
If an int, the random sample is generated as if a was np.arange(n)
size : int or tuple of ints, optional
Output shape. Default is None, in which case a single value is
returned.
replace : boolean, optional
Whether the sample is with or without replacement.
p : 1-D array-like, optional
The probabilities associated with each entry in a.
If not given the sample assumes a uniform distribution over all
entries in a.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
--------
samples : 1-D ndarray, shape (size,)
The generated random samples
Raises
-------
ValueError
If a is an int and less than zero, if a or p are not 1-dimensional,
if a is an array-like of size 0, if p is not a vector of
probabilities, if a and p have different lengths, or if
replace=False and the sample size is greater than the population
size
See Also
---------
randint, shuffle, permutation
Examples
---------
Generate a uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3) # doctest: +SKIP
array([0, 3, 4])
>>> #This is equivalent to np.random.randint(0,5,3)
Generate a non-uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3, p=[0.1, 0, 0.3, 0.6, 0]) # doctest: +SKIP
array([3, 3, 0])
Generate a uniform random sample from np.arange(5) of size 3 without
replacement:
>>> np.random.choice(5, 3, replace=False) # doctest: +SKIP
array([3,1,0])
>>> #This is equivalent to np.random.shuffle(np.arange(5))[:3]
Generate a non-uniform random sample from np.arange(5) of size
3 without replacement:
>>> np.random.choice(5, 3, replace=False, p=[0.1, 0, 0.3, 0.6, 0])
... # doctest: +SKIP
array([2, 3, 0])
Any of the above can be repeated with an arbitrary array-like
instead of just integers. For instance:
>>> aa_milne_arr = ['pooh', 'rabbit', 'piglet', 'Christopher']
>>> np.random.choice(aa_milne_arr, 5, p=[0.5, 0.1, 0.1, 0.3])
... # doctest: +SKIP
array(['pooh', 'pooh', 'pooh', 'Christopher', 'piglet'],
dtype='|S11')
"""
random_state = check_random_state(random_state)
# Format and Verify input
a = np.array(a, copy=False)
if a.ndim == 0:
try:
# __index__ must return an integer by python rules.
pop_size = operator.index(a.item())
except TypeError:
raise ValueError("a must be 1-dimensional or an integer")
if pop_size <= 0:
raise ValueError("a must be greater than 0")
elif a.ndim != 1:
raise ValueError("a must be 1-dimensional")
else:
pop_size = a.shape[0]
if pop_size is 0:
raise ValueError("a must be non-empty")
if None != p:
p = np.array(p, dtype=np.double, ndmin=1, copy=False)
if p.ndim != 1:
raise ValueError("p must be 1-dimensional")
if p.size != pop_size:
raise ValueError("a and p must have same size")
if np.any(p < 0):
raise ValueError("probabilities are not non-negative")
if not np.allclose(p.sum(), 1):
raise ValueError("probabilities do not sum to 1")
shape = size
if shape is not None:
size = np.prod(shape, dtype=np.intp)
else:
size = 1
# Actual sampling
if replace:
if None != p:
cdf = p.cumsum()
cdf /= cdf[-1]
uniform_samples = random_state.random_sample(shape)
idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
idx = np.array(idx, copy=False)
else:
idx = random_state.randint(0, pop_size, size=shape)
else:
if size > pop_size:
raise ValueError("Cannot take a larger sample than "
"population when 'replace=False'")
if None != p:
if np.sum(p > 0) < size:
raise ValueError("Fewer non-zero entries in p than size")
n_uniq = 0
p = p.copy()
found = np.zeros(shape, dtype=np.int)
flat_found = found.ravel()
while n_uniq < size:
x = random_state.rand(size - n_uniq)
if n_uniq > 0:
p[flat_found[0:n_uniq]] = 0
cdf = np.cumsum(p)
cdf /= cdf[-1]
new = cdf.searchsorted(x, side='right')
_, unique_indices = np.unique(new, return_index=True)
unique_indices.sort()
new = new.take(unique_indices)
flat_found[n_uniq:n_uniq + new.size] = new
n_uniq += new.size
idx = found
else:
idx = random_state.permutation(pop_size)[:size]
if shape is not None:
idx.shape = shape
if shape is None and isinstance(idx, np.ndarray):
# In most cases a scalar will have been made an array
idx = idx.item(0)
# Use samples as indices for a if a is array-like
if a.ndim == 0:
return idx
if shape is not None and idx.ndim == 0:
# If size == () then the user requested a 0-d array as opposed to
# a scalar object when size is None. However a[idx] is always a
# scalar and not an array. So this makes sure the result is an
# array, taking into account that np.array(item) may not work
# for object arrays.
res = np.empty((), dtype=a.dtype)
res[()] = a[idx]
return res
return a[idx]
def random_choice_csc(n_samples, classes, class_probability=None,
random_state=None):
"""Generate a sparse random matrix given column class distributions
Parameters
----------
n_samples : int,
Number of samples to draw in each column.
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
class_probability : list of size n_outputs of arrays of size (n_classes,)
Optional (default=None). Class distribution of each column. If None the
uniform distribution is assumed.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
random_matrix : sparse csc matrix of size (n_samples, n_outputs)
"""
data = array.array('i')
indices = array.array('i')
indptr = array.array('i', [0])
for j in range(len(classes)):
classes[j] = np.asarray(classes[j])
if classes[j].dtype.kind != 'i':
raise ValueError("class dtype %s is not supported" %
classes[j].dtype)
classes[j] = astype(classes[j], np.int64, copy=False)
# use uniform distribution if no class_probability is given
if class_probability is None:
class_prob_j = np.empty(shape=classes[j].shape[0])
class_prob_j.fill(1 / classes[j].shape[0])
else:
class_prob_j = np.asarray(class_probability[j])
if np.sum(class_prob_j) != 1.0:
raise ValueError("Probability array at index {0} does not sum to "
"one".format(j))
if class_prob_j.shape[0] != classes[j].shape[0]:
raise ValueError("classes[{0}] (length {1}) and "
"class_probability[{0}] (length {2}) have "
"different length.".format(j,
classes[j].shape[0],
class_prob_j.shape[0]))
# If 0 is not present in the classes insert it with a probability 0.0
if 0 not in classes[j]:
classes[j] = np.insert(classes[j], 0, 0)
class_prob_j = np.insert(class_prob_j, 0, 0.0)
# If there are nonzero classes choose randomly using class_probability
rng = check_random_state(random_state)
if classes[j].shape[0] > 1:
p_nonzero = 1 - class_prob_j[classes[j] == 0]
nnz = int(n_samples * p_nonzero)
ind_sample = sample_without_replacement(n_population=n_samples,
n_samples=nnz,
random_state=random_state)
indices.extend(ind_sample)
# Normalize probabilites for the nonzero elements
classes_j_nonzero = classes[j] != 0
class_probability_nz = class_prob_j[classes_j_nonzero]
class_probability_nz_norm = (class_probability_nz /
np.sum(class_probability_nz))
classes_ind = np.searchsorted(class_probability_nz_norm.cumsum(),
rng.rand(nnz))
data.extend(classes[j][classes_j_nonzero][classes_ind])
indptr.append(len(indices))
return sp.csc_matrix((data, indices, indptr),
(n_samples, len(classes)),
dtype=int)
| bsd-3-clause |
oemof/feedinlib | src/feedinlib/open_FRED.py | 1 | 17002 | from itertools import chain
from itertools import groupby
from typing import Dict
from typing import List
from typing import Tuple
from typing import Union
import oedialect # noqa: F401
import open_FRED.cli as ofr
import pandas as pd
import sqlalchemy as sqla
from geoalchemy2.elements import WKTElement as WKTE
from geoalchemy2.shape import to_shape
from pandas import DataFrame as DF
from pandas import Series
from pandas import Timedelta as TD
from pandas import to_datetime as tdt
from shapely.geometry import Point
from sqlalchemy.orm import sessionmaker
from .dedup import deduplicate
#: The type of variable selectors. A selector should always contain the
#: name of the variable to select and optionally the height to select,
#: if only a specific one is desired.
Selector = Union[Tuple[str], Tuple[str, int]]
TRANSLATIONS: Dict[str, Dict[str, List[Selector]]] = {
"windpowerlib": {
"wind_speed": [("VABS_AV",)],
"temperature": [("T",)],
"roughness_length": [("Z0",)],
"pressure": [("P",)],
},
"pvlib": {
"wind_speed": [("VABS_AV", 10)],
"temp_air": [("T", 10)],
"pressure": [("P", 10)],
"dhi": [("ASWDIFD_S", 0)],
"ghi": [("ASWDIFD_S", 0), ("ASWDIR_S", 0)],
"dni": [("ASWDIRN_S", 0)],
},
}
def defaultdb():
engine = getattr(defaultdb, "engine", None) or sqla.create_engine(
"postgresql+oedialect://openenergy-platform.org"
)
defaultdb.engine = engine
session = (
getattr(defaultdb, "session", None) or sessionmaker(bind=engine)()
)
defaultdb.session = session
metadata = sqla.MetaData(schema="climate", bind=engine, reflect=False)
return {"session": session, "db": ofr.mapped_classes(metadata)}
class Weather:
"""
Load weather measurements from an openFRED conforming database.
Note that you need a database storing weather data using the openFRED
schema in order to use this class. There is one publicly available at
https://openenergy-platform.org
Now you can simply instantiate a `Weather` object via e.g.:
Examples
--------
>>> from shapely.geometry import Point
>>> point = Point(9.7311, 53.3899)
>>> weather = Weather(
... start="2007-04-05 06:00",
... stop="2007-04-05 07:31",
... locations=[point],
... heights=[10],
... variables="pvlib",
... **defaultdb()
... )
Instead of the special values `"pvlib"` and `"windpowerlib"` you can
also supply a list of variables, like e.g. `["P", "T", "Z0"]`, to
retrieve from the database.
After initialization, you can use e.g. `weather.df(point, "pvlib")`
to retrieve a `DataFrame` with weather data from the measurement
location closest to the given `point`.
Parameters
----------
start : Anything `pandas.to_datetime` can convert to a timestamp
Load weather data starting from this date.
stop : Anything `pandas.to_datetime` can convert to a timestamp
Don't load weather data before this date.
locations : list of :shapely:`Point`
Weather measurements are collected from measurement locations closest
to the the given points.
location_ids : list of int
Weather measurements are collected from measurement locations having
primary keys, i.e. IDs, in this list. Use this e.g. if you know you're
using the same location(s) for multiple queries and you don't want
the overhead of doing the same nearest point query multiple times.
heights : list of numeric
Limit selected timeseries to these heights. If `variables` contains a
variable which isn't height dependent, i.e. it has only one height,
namely `0`, the corresponding timeseries is always
selected. Don't select the correspoding variable, in order to avoid
this.
Defaults to `None` which means no restriction on height levels.
variables : list of str or one of "pvlib" or "windpowerlib"
Load the weather variables specified in the given list, or the
variables necessary to calculate a feedin using `"pvlib"` or
`"windpowerlib"`.
Defaults to `None` which means no restriction on loaded variables.
regions : list of :shapely:`Polygon`
Weather measurements are collected from measurement locations
contained within the given polygons.
session : `sqlalchemy.orm.Session`
db : dict of mapped classes
"""
def __init__(
self,
start,
stop,
locations,
location_ids=None,
heights=None,
variables=None,
regions=None,
session=None,
db=None,
):
self.session = session
self.db = db
if self.session is None and self.db is None:
return
variables = {
"windpowerlib": ["P", "T", "VABS_AV", "Z0"],
"pvlib": [
"ASWDIFD_S",
"ASWDIRN_S",
"ASWDIR_S",
"P",
"T",
"VABS_AV",
],
None: variables,
}[variables if variables in ["pvlib", "windpowerlib"] else None]
self.locations = (
{(p.x, p.y): self.location(p) for p in locations}
if locations is not None
else {}
)
self.regions = (
{WKTE(r, srid=4326): self.within(r) for r in regions}
if regions is not None
else {}
)
if location_ids is None:
location_ids = []
self.location_ids = set(
[
d.id
for d in chain(self.locations.values(), *self.regions.values())
]
+ location_ids
)
self.locations = {
k: to_shape(self.locations[k].point) for k in self.locations
}
self.locations.update(
{
(p.x, p.y): p
for p in chain(
self.locations.values(),
(
to_shape(location.point)
for region in self.regions.values()
for location in region
),
)
}
)
self.regions = {
k: [to_shape(location.point) for location in self.regions[k]]
for k in self.regions
}
series = sorted(
session.query(
db["Series"], db["Variable"], db["Timespan"], db["Location"]
)
.join(db["Series"].variable)
.join(db["Series"].timespan)
.join(db["Series"].location)
.filter((db["Series"].location_id.in_(self.location_ids)))
.filter(
True
if variables is None
else db["Variable"].name.in_(variables)
)
.filter(
True
if heights is None
else (db["Series"].height.in_(chain([0], heights)))
)
.filter(
(db["Timespan"].stop >= tdt(start))
& (db["Timespan"].start <= tdt(stop))
)
.all(),
key=lambda p: (
p[3].id,
p[1].name,
p[0].height,
p[2].start,
p[2].stop,
),
)
self.series = {
k: [
(
segment_start.tz_localize("UTC")
if segment_start.tz is None
else segment_start,
segment_stop.tz_localize("UTC")
if segment_stop.tz is None
else segment_stop,
value,
)
for (series, variable, timespan, location) in g
for (segment, value) in zip(timespan.segments, series.values)
for segment_start in [tdt(segment[0])]
for segment_stop in [tdt(segment[1])]
if segment_start >= tdt(start) and segment_stop <= tdt(stop)
]
for k, g in groupby(
series,
key=lambda p: (
(to_shape(p[3].point).x, to_shape(p[3].point).y),
p[1].name,
p[0].height,
),
)
}
self.series = {k: deduplicate(self.series[k]) for k in self.series}
self.variables = {
k: sorted(set(h for _, h in g))
for k, g in groupby(
sorted((name, height) for _, name, height in self.series),
key=lambda p: p[0],
)
}
self.variables = {k: {"heights": v} for k, v in self.variables.items()}
@classmethod
def from_df(klass, df):
assert isinstance(df.columns, pd.MultiIndex), (
"DataFrame's columns aren't a `pandas.indexes.multi.MultiIndex`.\n"
"Got `{}` instead."
).format(type(df.columns))
assert len(df.columns.levels) == 2, (
"DataFrame's columns have more than two levels.\nGot: {}.\n"
"Should be exactly two, the first containing variable names and "
"the\n"
"second containing matching height levels."
)
variables = {
variable: {"heights": [vhp[1] for vhp in variable_height_pairs]}
for variable, variable_height_pairs in groupby(
df.columns.values,
key=lambda variable_height_pair: variable_height_pair[0],
)
}
locations = {xy: Point(xy[0], xy[1]) for xy in df.index.values}
series = {
(xy, *variable_height_pair): df.loc[xy, variable_height_pair]
for xy in df.index.values
for variable_height_pair in df.columns.values
}
instance = klass(start=None, stop=None, locations=None)
instance.locations = locations
instance.series = series
instance.variables = variables
return instance
def location(self, point: Point):
""" Get the measurement location closest to the given `point`.
"""
point = WKTE(point.to_wkt(), srid=4326)
return (
self.session.query(self.db["Location"])
.order_by(self.db["Location"].point.distance_centroid(point))
.first()
)
def within(self, region=None):
""" Get all measurement locations within the given `region`.
"""
region = WKTE(region.to_wkt(), srid=4326)
return (
self.session.query(self.db["Location"])
.filter(self.db["Location"].point.ST_Within(region))
.all()
)
def to_csv(self, path):
df = self.df()
df = df.applymap(
# Unzip, i.e. convert a list of tuples to a tuple of lists, the
# list of triples in each DataFrame cell and convert the result to
# a JSON string. Unzipping is necessary because the pandas'
# `to_json` wouldn't format the `Timestamps` correctly otherwise.
lambda s: pd.Series(pd.Series(xs) for xs in zip(*s)).to_json(
date_format="iso"
)
)
return df.to_csv(path, quotechar="'")
@classmethod
def from_csv(cls, path_or_buffer):
df = pd.read_csv(
path_or_buffer,
# This is necessary because the automatic conversion isn't precise
# enough.
converters={0: float, 1: float},
header=[0, 1],
index_col=[0, 1],
quotechar="'",
)
df.columns.set_levels(
[df.columns.levels[0], [float(c) for c in df.columns.levels[1]]],
inplace=True,
)
df = df.applymap(lambda s: pd.read_json(s, typ="series"))
# Reading the JSON string back in yields a weird format. Instead of a
# nested `Series` we get a `Series` containing three dictionaries. The
# `dict`s "emulate" a `Series` since their keys are string
# representations of integers and their values are the actual values
# that would be stored at the corresponding position in a `Series`. So
# we have to manually reformat the data we get back. Since there's no
# point in doing two conversions, we don't convert it back to nested
# `Series`, but immediately to `list`s of `(start, stop, value)`
# triples.
df = df.applymap(
lambda s: list(
zip(
*[
[
# The `Timestamp`s in the inner `Series`/`dict`s
# where also not converted, so we have to do this
# manually, too.
pd.to_datetime(v, utc=True) if n in [0, 1] else v
for k, v in sorted(
s[n].items(), key=lambda kv: int(kv[0])
)
]
for n in s.index
]
)
)
)
return cls.from_df(df)
def df(self, location=None, lib=None):
if lib is None and location is None:
columns = sorted(set((n, h) for (xy, n, h) in self.series))
index = sorted(xy for xy in set(xy for (xy, n, h) in self.series))
data = {
(n, h): [self.series[xy, n, h] for xy in index]
for (n, h) in columns
}
return DF(index=pd.MultiIndex.from_tuples(index), data=data)
if lib is None:
raise NotImplementedError(
"Arbitrary dataframes not supported yet.\n"
'Please use one of `lib="pvlib"` or `lib="windpowerlib"`.'
)
xy = (location.x, location.y)
location = (
self.locations[xy]
if xy in self.locations
else to_shape(self.location(location).point)
if self.session is not None
else min(
self.locations.values(),
key=lambda point: location.distance(point),
)
)
point = (location.x, location.y)
index = (
[
dhi[0] + (dhi[1] - dhi[0]) / 2
for dhi in self.series[point, "ASWDIFD_S", 0]
]
if lib == "pvlib"
else [
wind_speed[0]
for wind_speed in self.series[
point, "VABS_AV", self.variables["VABS_AV"]["heights"][0]
]
]
if lib == "windpowerlib"
else []
)
def to_series(v, h):
s = self.series[point, v, h]
return Series([p[2] for p in s], index=[p[0] for p in s])
series = {
(k[0] if lib == "pvlib" else k): sum(
to_series(*p, *k[1:]) for p in TRANSLATIONS[lib][k[0]]
)
for k in (
[
("dhi",),
("dni",),
("ghi",),
("pressure",),
("temp_air",),
("wind_speed",),
]
if lib == "pvlib"
else [
(v, h)
for v in [
"pressure",
"roughness_length",
"temperature",
"wind_speed",
]
for h in self.variables[TRANSLATIONS[lib][v][0][0]][
"heights"
]
]
if lib == "windpowerlib"
else [(v,) for v in self.variables]
)
}
if lib == "pvlib":
series["temp_air"] = (
(series["temp_air"] - 273.15)
.resample("15min")
.interpolate()[series["dhi"].index]
)
series["pressure"] = (
series["pressure"]
.resample("15min")
.interpolate()[series["dhi"].index]
)
ws = series["wind_speed"]
for k in series["wind_speed"].keys():
ws[k + TD("15min")] = ws[k]
ws.sort_index(inplace=True)
if lib == "windpowerlib":
roughness = TRANSLATIONS[lib]["roughness_length"][0][0]
series.update(
{
("roughness_length", h): series["roughness_length", h]
.resample("30min")
.interpolate()[index]
for h in self.variables[roughness]["heights"]
}
)
series.update({k: series[k][index] for k in series})
return DF(index=index, data={k: series[k].values for k in series})
| mit |
dhennes/pykep | PyKEP/trajopt/_mga_lt_nep.py | 2 | 15675 | from PyGMO.problem import base as base_problem
from PyKEP.core import epoch, fb_con, EARTH_VELOCITY, AU, MU_SUN
from PyKEP.planet import jpl_lp
from PyKEP.sims_flanagan import leg, spacecraft, sc_state
class mga_lt_nep(base_problem):
"""
This class is a PyGMO (http://esa.github.io/pygmo/) problem representing a low-thrust
interplanetary trajectory modelled as a Multiple Gravity Assist trajectory with sims_flanagan legs
- Yam, C.H., di Lorenzo, D., and Izzo, D., Low-Thrust Trajectory Design as a Constrained Global Optimization Problem, Proceedings of the Institution of Mechanical Engineers, Part G: Journal of Aerospace Engineering, 225(11), pp.1243-1251, 2011.
The decision vector (chromosome) is::
[t0] + [T1, mf1, Vxi1, Vyi1, Vzi1, Vxf1, Vyf1, Vzf1] + [T2, mf2, Vxi2, Vyi2, Vzi2, Vxf2, Vyf2, Vzf2] + ... + [throttles1] + [throttles2] + ...
.. note::
The resulting problem is non linearly constrained. The resulting trajectory is not time-bounded.
"""
def __init__(self,
seq=[jpl_lp('earth'), jpl_lp('venus'), jpl_lp('earth')],
n_seg=[10] * 2,
t0=[epoch(0), epoch(1000)],
tof=[[200, 500], [200, 500]],
vinf_dep=2.5,
vinf_arr=2.0,
mass=4000.0,
Tmax=1.0,
Isp=2000.0,
fb_rel_vel=6,
multi_objective=False,
high_fidelity=False):
"""
prob = mga_lt_nep(seq = [jpl_lp('earth'),jpl_lp('venus'),jpl_lp('earth')], n_seg = [10]*2,
t0 = [epoch(0),epoch(1000)], tof = [[200,500],[200,500]], Vinf_dep=2.5, Vinf_arr=2.0, mass=4000.0, Tmax=1.0, Isp=2000.0,
multi_objective = False, fb_rel_vel = 6, high_fidelity=False)
- seq: list of PyKEP.planet defining the encounter sequence for the trajectoty (including the initial planet)
- n_seg: list of integers containing the number of segments to be used for each leg (len(n_seg) = len(seq)-1)
- t0: list of PyKEP epochs defining the launch window
- tof: minimum and maximum time of each leg (days)
- vinf_dep: maximum launch hyperbolic velocity allowed (in km/sec)
- vinf_arr: maximum arrival hyperbolic velocity allowed (in km/sec)
- mass: spacecraft starting mass
- Tmax: maximum thrust
- Isp: engine specific impulse
- fb_rel_vel = determines the bounds on the maximum allowed relative velocity at all fly-bys (in km/sec)
- multi-objective: when True defines the problem as a multi-objective problem, returning total DV and time of flight
- high_fidelity = makes the trajectory computations slower, but actually dynamically feasible.
"""
# 1) We compute the problem dimensions .... and call the base problem constructor
self.__n_legs = len(seq) - 1
n_fb = self.__n_legs - 1
# 1a) The decision vector length
dim = 1 + self.__n_legs * 8 + sum(n_seg) * 3
# 1b) The total number of constraints (mismatch + fly-by + boundary + throttles
c_dim = self.__n_legs * 7 + n_fb * 2 + 2 + sum(n_seg)
# 1c) The number of inequality constraints (boundary + fly-by angle + throttles)
c_ineq_dim = 2 + n_fb + sum(n_seg)
# 1d) the number of objectives
f_dim = multi_objective + 1
# First we call the constructor for the base PyGMO problem
# As our problem is n dimensional, box-bounded (may be multi-objective), we write
# (dim, integer dim, number of obj, number of con, number of inequality con, tolerance on con violation)
super(mga_lt_nep, self).__init__(dim, 0, f_dim, c_dim, c_ineq_dim, 1e-4)
# 2) We then define some class data members
# public:
self.seq = seq
# private:
self.__n_seg = n_seg
self.__vinf_dep = vinf_dep * 1000
self.__vinf_arr = vinf_arr * 1000
self.__sc = spacecraft(mass, Tmax, Isp)
self.__leg = leg()
self.__leg.set_mu(MU_SUN)
self.__leg.set_spacecraft(self.__sc)
self.__leg.high_fidelity = high_fidelity
fb_rel_vel *= 1000
# 3) We compute the bounds
lb = [t0[0].mjd2000] + [0, mass / 2, -fb_rel_vel, -fb_rel_vel, -fb_rel_vel, -fb_rel_vel, -fb_rel_vel, -fb_rel_vel] * self.__n_legs + [-1, -1, -1] * sum(self.__n_seg)
ub = [t0[1].mjd2000] + [1, mass, fb_rel_vel, fb_rel_vel, fb_rel_vel, fb_rel_vel, fb_rel_vel, fb_rel_vel] * self.__n_legs + [1, 1, 1] * sum(self.__n_seg)
# 3a ... and account for the bounds on the vinfs......
lb[3:6] = [-self.__vinf_dep] * 3
ub[3:6] = [self.__vinf_dep] * 3
lb[-sum(self.__n_seg) * 3 - 3:-sum(self.__n_seg) * 3] = [-self.__vinf_arr] * 3
ub[-sum(self.__n_seg) * 3 - 3:-sum(self.__n_seg) * 3] = [self.__vinf_arr] * 3
# 3b... and for the time of flight
lb[1:1 + 8 * self.__n_legs:8] = [el[0] for el in tof]
ub[1:1 + 8 * self.__n_legs:8] = [el[1] for el in tof]
# 4) And we set the bounds
self.set_bounds(lb, ub)
# Objective function
def _objfun_impl(self, x):
if self.f_dimension == 1:
return (-x[2 + (self.__n_legs - 1) * 8],)
else:
return (-x[2 + (self.__n_legs - 1) * 8], sum(x[1:1 + 8 * self.__n_legs:8]))
# Constraints function
def _compute_constraints_impl(self, x):
# 1 - We decode the chromosome extracting the time of flights
T = list([0] * (self.__n_legs))
for i in range(self.__n_legs):
T[i] = x[1 + i * 8]
# 2 - We compute the epochs and ephemerides of the planetary encounters
t_P = list([None] * (self.__n_legs + 1))
r_P = list([None] * (self.__n_legs + 1))
v_P = list([None] * (self.__n_legs + 1))
for i, planet in enumerate(self.seq):
t_P[i] = epoch(x[0] + sum(T[0:i]))
r_P[i], v_P[i] = self.seq[i].eph(t_P[i])
# 3 - We iterate through legs to compute mismatches and throttles constraints
ceq = list()
cineq = list()
m0 = self.__sc.mass
for i in range(self.__n_legs):
# First Leg
v = [a + b for a, b in zip(v_P[i], x[(3 + i * 8):(6 + i * 8)])]
x0 = sc_state(r_P[i], v, m0)
v = [a + b for a, b in zip(v_P[i + 1], x[(6 + i * 8):(9 + i * 8)])]
xe = sc_state(r_P[i + 1], v, x[2 + i * 8])
throttles = x[(1 + 8 * self.__n_legs + 3 * sum(self.__n_seg[:i])):(1 + 8 * self.__n_legs + 3 * sum(self.__n_seg[:i]) + 3 * self.__n_seg[i])]
self.__leg.set(t_P[i], x0, throttles, t_P[i + 1], xe)
# update mass!
m0 = x[2 + 8 * i]
ceq.extend(self.__leg.mismatch_constraints())
cineq.extend(self.__leg.throttles_constraints())
# Adding the boundary constraints
# departure
v_dep_con = (x[3] ** 2 + x[4] ** 2 + x[5] ** 2 - self.__vinf_dep ** 2) / (EARTH_VELOCITY ** 2)
# arrival
v_arr_con = (x[6 + (self.__n_legs - 1) * 8] ** 2 + x[7 + (self.__n_legs - 1) * 8] ** 2 + x[8 + (self.__n_legs - 1) * 8] ** 2 - self.__vinf_arr ** 2) / (EARTH_VELOCITY ** 2)
cineq.append(v_dep_con * 100)
cineq.append(v_arr_con * 100)
# We add the fly-by constraints
for i in range(self.__n_legs - 1):
DV_eq, alpha_ineq = fb_con(x[6 + i * 8:9 + i * 8], x[11 + i * 8:14 + i * 8], self.seq[i + 1])
ceq.append(DV_eq / (EARTH_VELOCITY ** 2))
cineq.append(alpha_ineq)
# Making the mismatches non dimensional
for i in range(self.__n_legs):
ceq[0 + i * 7] /= AU
ceq[1 + i * 7] /= AU
ceq[2 + i * 7] /= AU
ceq[3 + i * 7] /= EARTH_VELOCITY
ceq[4 + i * 7] /= EARTH_VELOCITY
ceq[5 + i * 7] /= EARTH_VELOCITY
ceq[6 + i * 7] /= self.__sc.mass
# We assemble the constraint vector
retval = list()
retval.extend(ceq)
retval.extend(cineq)
return retval
# And this helps visualizing the trajectory
def plot(self, x, ax=None):
"""
ax = prob.plot(x, ax=None)
- x: encoded trajectory
- ax: matplotlib axis where to plot. If None figure and axis will be created
- [out] ax: matplotlib axis where to plot
Plots the trajectory represented by a decision vector x on the 3d axis ax
Example::
ax = prob.plot(x)
"""
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from PyKEP import epoch, AU
from PyKEP.sims_flanagan import sc_state
from PyKEP.orbit_plots import plot_planet, plot_sf_leg
# Creating the axis if necessary
if ax is None:
mpl.rcParams['legend.fontsize'] = 10
fig = plt.figure()
axis = fig.gca(projection='3d')
else:
axis = ax
# Plotting the Sun ........
axis.scatter([0], [0], [0], color='y')
# Plotting the legs .......
# 1 - We decode the chromosome extracting the time of flights
T = list([0] * (self.__n_legs))
for i in range(self.__n_legs):
T[i] = x[1 + i * 8]
# 2 - We compute the epochs and ephemerides of the planetary encounters
t_P = list([None] * (self.__n_legs + 1))
r_P = list([None] * (self.__n_legs + 1))
v_P = list([None] * (self.__n_legs + 1))
for i, planet in enumerate(self.seq):
t_P[i] = epoch(x[0] + sum(T[0:i]))
r_P[i], v_P[i] = self.seq[i].eph(t_P[i])
# 3 - We iterate through legs to compute mismatches and throttles constraints
ceq = list()
cineq = list()
m0 = self.__sc.mass
for i in range(self.__n_legs):
# First Leg
v = [a + b for a, b in zip(v_P[i], x[(3 + i * 8):(6 + i * 8)])]
x0 = sc_state(r_P[i], v, m0)
v = [a + b for a, b in zip(v_P[i + 1], x[(6 + i * 8):(11 + i * 8)])]
xe = sc_state(r_P[i + 1], v, x[2 + i * 8])
throttles = x[(1 + 8 * self.__n_legs + 3 * sum(self.__n_seg[:i])):(1 + 8 * self.__n_legs + 3 * sum(self.__n_seg[:i]) + 3 * self.__n_seg[i])]
self.__leg.set(t_P[i], x0, throttles, t_P[i + 1], xe)
# update mass!
m0 = x[2 + 8 * i]
plot_sf_leg(self.__leg, units=AU, N=10, ax=axis)
# Plotting planets
for i, planet in enumerate(self.seq):
plot_planet(planet, t_P[i], units=AU, legend=True, color=(0.7, 0.7, 1), ax = axis)
plt.show()
return axis
def high_fidelity(self, boolean):
"""
prob.high_fidelity(status)
- status: either True or False (True sets high fidelity on)
Sets the trajectory high fidelity mode
Example::
prob.high_fidelity(True)
"""
# We avoid here that objfun and constraint are kept that have been evaluated wrt a different fidelity
self.reset_caches()
# We set the propagation fidelity
self.__leg.high_fidelity = boolean
def ic_from_mga_1dsm(self, x):
"""
x_lt = prob.ic_from_mga_1dsm(x_mga)
- x_mga: compatible trajectory as encoded by an mga_1dsm problem
Returns an initial guess for the low-thrust trajectory, converting the mga_1dsm solution x_dsm. The user
is responsible that x_mga makes sense (i.e. it is a viable mga_1dsm representation). The conversion is done by importing in the
low-thrust encoding a) the launch date b) all the legs durations, c) the in and out relative velocities at each planet.
All throttles are put to zero.
Example::
x_lt= prob.ic_from_mga_1dsm(x_mga)
"""
from math import pi, cos, sin, acos
from scipy.linalg import norm
from PyKEP import propagate_lagrangian, lambert_problem, DAY2SEC, fb_prop
retval = list([0.0] * self.dimension)
# 1 - we 'decode' the chromosome recording the various times of flight (days) in the list T
T = list([0] * (self.__n_legs))
for i in range(len(T)):
T[i] = log(x[2 + 4 * i])
total = sum(T)
T = [x[1] * time / total for time in T]
retval[0] = x[0]
for i in range(self.__n_legs):
retval[1 + 8 * i] = T[i]
retval[2 + 8 * i] = self.__sc.mass
# 2 - We compute the epochs and ephemerides of the planetary encounters
t_P = list([None] * (self.__n_legs + 1))
r_P = list([None] * (self.__n_legs + 1))
v_P = list([None] * (self.__n_legs + 1))
DV = list([None] * (self.__n_legs + 1))
for i, planet in enumerate(self.seq):
t_P[i] = epoch(x[0] + sum(T[0:i]))
r_P[i], v_P[i] = self.seq[i].eph(t_P[i])
# 3 - We start with the first leg
theta = 2 * pi * x[1]
phi = acos(2 * x[2] - 1) - pi / 2
Vinfx = x[3] * cos(phi) * cos(theta)
Vinfy = x[3] * cos(phi) * sin(theta)
Vinfz = x[3] * sin(phi)
retval[3:6] = [Vinfx, Vinfy, Vinfz]
v0 = [a + b for a, b in zip(v_P[0], [Vinfx, Vinfy, Vinfz])]
r, v = propagate_lagrangian(r_P[0], v0, x[4] * T[0] * DAY2SEC, MU_SUN)
# Lambert arc to reach seq[1]
dt = (1 - x[4]) * T[0] * DAY2SEC
l = lambert_problem(r, r_P[1], dt, MU_SUN)
v_end_l = l.get_v2()[0]
v_beg_l = l.get_v1()[0]
retval[6:9] = [a - b for a, b in zip(v_end_l, v_P[1])]
# 4 - And we proceed with each successive leg
for i in range(1, self.__n_legs):
# Fly-by
v_out = fb_prop(v_end_l, v_P[i], x[7 + (i - 1) * 4] * self.seq[i].radius, x[6 + (i - 1) * 4], self.seq[i].mu_self)
retval[3 + i * 8:6 + i * 8] = [a - b for a, b in zip(v_out, v_P[i])]
# s/c propagation before the DSM
r, v = propagate_lagrangian(r_P[i], v_out, x[8 + (i - 1) * 4] * T[i] * DAY2SEC, MU_SUN)
# Lambert arc to reach Earth during (1-nu2)*T2 (second segment)
dt = (1 - x[8 + (i - 1) * 4]) * T[i] * DAY2SEC
l = lambert_problem(r, r_P[i + 1], dt, MU_SUN)
v_end_l = l.get_v2()[0]
v_beg_l = l.get_v1()[0]
# DSM occuring at time nu2*T2
DV[i] = norm([a - b for a, b in zip(v_beg_l, v)])
retval[6 + i * 8:9 + i * 8] = [a - b for a, b in zip(v_end_l, v_P[i + 1])]
return retval
def double_segments(self, x):
"""
x_doubled = prob.double_segments(x)
- x: compatible trajectory as encoded by an mga_1dsm mga_lt_nep
Returns the decision vector encoding a low trust trajectory having double the number of segments with respect to x
and a 'similar' throttle history. In case high fidelity is True, and x is a feasible trajectory, the returned decision vector
also encodes a feasible trajectory that can be further optimized
Example::
prob = traj.mga_lt_nep(nseg=[[10],[20]])
pop = population(prob,1)
.......OPTIMIZE.......
x = prob.double_segments(pop.champion.x)
prob = traj.mga_lt_nep(nseg=[[20],[40]])
pop = population(prob)
pop.push_back(x)
.......OPTIMIZE AGAIN......
"""
y = list()
y.extend(x[:-sum(self.__n_seg) * 3])
for i in range(sum(self.__n_seg)):
y.extend(x[-(sum(self.__n_seg) - i) * 3:-(sum(self.__n_seg) - 1 - i) * 3] * 2)
y.extend(x[-3:] * 2)
return y
| gpl-3.0 |
astocko/statsmodels | statsmodels/stats/anova.py | 25 | 13433 | from statsmodels.compat.python import lrange, lmap
import numpy as np
from scipy import stats
from pandas import DataFrame, Index
from statsmodels.formula.formulatools import (_remove_intercept_patsy,
_has_intercept, _intercept_idx)
def _get_covariance(model, robust):
if robust is None:
return model.cov_params()
elif robust == "hc0":
se = model.HC0_se
return model.cov_HC0
elif robust == "hc1":
se = model.HC1_se
return model.cov_HC1
elif robust == "hc2":
se = model.HC2_se
return model.cov_HC2
elif robust == "hc3":
se = model.HC3_se
return model.cov_HC3
else: # pragma: no cover
raise ValueError("robust options %s not understood" % robust)
#NOTE: these need to take into account weights !
def anova_single(model, **kwargs):
"""
ANOVA table for one fitted linear model.
Parameters
----------
model : fitted linear model results instance
A fitted linear model
typ : int or str {1,2,3} or {"I","II","III"}
Type of sum of squares to use.
**kwargs**
scale : float
Estimate of variance, If None, will be estimated from the largest
model. Default is None.
test : str {"F", "Chisq", "Cp"} or None
Test statistics to provide. Default is "F".
Notes
-----
Use of this function is discouraged. Use anova_lm instead.
"""
test = kwargs.get("test", "F")
scale = kwargs.get("scale", None)
typ = kwargs.get("typ", 1)
robust = kwargs.get("robust", None)
if robust:
robust = robust.lower()
endog = model.model.endog
exog = model.model.exog
nobs = exog.shape[0]
response_name = model.model.endog_names
design_info = model.model.data.design_info
exog_names = model.model.exog_names
# +1 for resids
n_rows = (len(design_info.terms) - _has_intercept(design_info) + 1)
pr_test = "PR(>%s)" % test
names = ['df', 'sum_sq', 'mean_sq', test, pr_test]
table = DataFrame(np.zeros((n_rows, 5)), columns = names)
if typ in [1,"I"]:
return anova1_lm_single(model, endog, exog, nobs, design_info, table,
n_rows, test, pr_test, robust)
elif typ in [2, "II"]:
return anova2_lm_single(model, design_info, n_rows, test, pr_test,
robust)
elif typ in [3, "III"]:
return anova3_lm_single(model, design_info, n_rows, test, pr_test,
robust)
elif typ in [4, "IV"]:
raise NotImplemented("Type IV not yet implemented")
else: # pragma: no cover
raise ValueError("Type %s not understood" % str(typ))
def anova1_lm_single(model, endog, exog, nobs, design_info, table, n_rows, test,
pr_test, robust):
"""
ANOVA table for one fitted linear model.
Parameters
----------
model : fitted linear model results instance
A fitted linear model
**kwargs**
scale : float
Estimate of variance, If None, will be estimated from the largest
model. Default is None.
test : str {"F", "Chisq", "Cp"} or None
Test statistics to provide. Default is "F".
Notes
-----
Use of this function is discouraged. Use anova_lm instead.
"""
#maybe we should rethink using pinv > qr in OLS/linear models?
effects = getattr(model, 'effects', None)
if effects is None:
q,r = np.linalg.qr(exog)
effects = np.dot(q.T, endog)
arr = np.zeros((len(design_info.terms), len(design_info.column_names)))
slices = [design_info.slice(name) for name in design_info.term_names]
for i,slice_ in enumerate(slices):
arr[i, slice_] = 1
sum_sq = np.dot(arr, effects**2)
#NOTE: assumes intercept is first column
idx = _intercept_idx(design_info)
sum_sq = sum_sq[~idx]
term_names = np.array(design_info.term_names) # want boolean indexing
term_names = term_names[~idx]
index = term_names.tolist()
table.index = Index(index + ['Residual'])
table.ix[index, ['df', 'sum_sq']] = np.c_[arr[~idx].sum(1), sum_sq]
if test == 'F':
table.ix[:n_rows, test] = ((table['sum_sq']/table['df'])/
(model.ssr/model.df_resid))
table.ix[:n_rows, pr_test] = stats.f.sf(table["F"], table["df"],
model.df_resid)
# fill in residual
table.ix['Residual', ['sum_sq','df', test, pr_test]] = (model.ssr,
model.df_resid,
np.nan, np.nan)
table['mean_sq'] = table['sum_sq'] / table['df']
return table
#NOTE: the below is not agnostic about formula...
def anova2_lm_single(model, design_info, n_rows, test, pr_test, robust):
"""
ANOVA type II table for one fitted linear model.
Parameters
----------
model : fitted linear model results instance
A fitted linear model
**kwargs**
scale : float
Estimate of variance, If None, will be estimated from the largest
model. Default is None.
test : str {"F", "Chisq", "Cp"} or None
Test statistics to provide. Default is "F".
Notes
-----
Use of this function is discouraged. Use anova_lm instead.
Type II
Sum of Squares compares marginal contribution of terms. Thus, it is
not particularly useful for models with significant interaction terms.
"""
terms_info = design_info.terms[:] # copy
terms_info = _remove_intercept_patsy(terms_info)
names = ['sum_sq', 'df', test, pr_test]
table = DataFrame(np.zeros((n_rows, 4)), columns = names)
cov = _get_covariance(model, None)
robust_cov = _get_covariance(model, robust)
col_order = []
index = []
for i, term in enumerate(terms_info):
# grab all varaibles except interaction effects that contain term
# need two hypotheses matrices L1 is most restrictive, ie., term==0
# L2 is everything except term==0
cols = design_info.slice(term)
L1 = lrange(cols.start, cols.stop)
L2 = []
term_set = set(term.factors)
for t in terms_info: # for the term you have
other_set = set(t.factors)
if term_set.issubset(other_set) and not term_set == other_set:
col = design_info.slice(t)
# on a higher order term containing current `term`
L1.extend(lrange(col.start, col.stop))
L2.extend(lrange(col.start, col.stop))
L1 = np.eye(model.model.exog.shape[1])[L1]
L2 = np.eye(model.model.exog.shape[1])[L2]
if L2.size:
LVL = np.dot(np.dot(L1,robust_cov),L2.T)
from scipy import linalg
orth_compl,_ = linalg.qr(LVL)
r = L1.shape[0] - L2.shape[0]
# L1|2
# use the non-unique orthogonal completion since L12 is rank r
L12 = np.dot(orth_compl[:,-r:].T, L1)
else:
L12 = L1
r = L1.shape[0]
#from IPython.core.debugger import Pdb; Pdb().set_trace()
if test == 'F':
f = model.f_test(L12, cov_p=robust_cov)
table.ix[i, test] = test_value = f.fvalue
table.ix[i, pr_test] = f.pvalue
# need to back out SSR from f_test
table.ix[i, 'df'] = r
col_order.append(cols.start)
index.append(term.name())
table.index = Index(index + ['Residual'])
table = table.ix[np.argsort(col_order + [model.model.exog.shape[1]+1])]
# back out sum of squares from f_test
ssr = table[test] * table['df'] * model.ssr/model.df_resid
table['sum_sq'] = ssr
# fill in residual
table.ix['Residual', ['sum_sq','df', test, pr_test]] = (model.ssr,
model.df_resid,
np.nan, np.nan)
return table
def anova3_lm_single(model, design_info, n_rows, test, pr_test, robust):
n_rows += _has_intercept(design_info)
terms_info = design_info.terms
names = ['sum_sq', 'df', test, pr_test]
table = DataFrame(np.zeros((n_rows, 4)), columns = names)
cov = _get_covariance(model, robust)
col_order = []
index = []
for i, term in enumerate(terms_info):
# grab term, hypothesis is that term == 0
cols = design_info.slice(term)
L1 = np.eye(model.model.exog.shape[1])[cols]
L12 = L1
r = L1.shape[0]
if test == 'F':
f = model.f_test(L12, cov_p=cov)
table.ix[i, test] = test_value = f.fvalue
table.ix[i, pr_test] = f.pvalue
# need to back out SSR from f_test
table.ix[i, 'df'] = r
#col_order.append(cols.start)
index.append(term.name())
table.index = Index(index + ['Residual'])
#NOTE: Don't need to sort because terms are an ordered dict now
#table = table.ix[np.argsort(col_order + [model.model.exog.shape[1]+1])]
# back out sum of squares from f_test
ssr = table[test] * table['df'] * model.ssr/model.df_resid
table['sum_sq'] = ssr
# fill in residual
table.ix['Residual', ['sum_sq','df', test, pr_test]] = (model.ssr,
model.df_resid,
np.nan, np.nan)
return table
def anova_lm(*args, **kwargs):
"""
ANOVA table for one or more fitted linear models.
Parameters
----------
args : fitted linear model results instance
One or more fitted linear models
scale : float
Estimate of variance, If None, will be estimated from the largest
model. Default is None.
test : str {"F", "Chisq", "Cp"} or None
Test statistics to provide. Default is "F".
typ : str or int {"I","II","III"} or {1,2,3}
The type of ANOVA test to perform. See notes.
robust : {None, "hc0", "hc1", "hc2", "hc3"}
Use heteroscedasticity-corrected coefficient covariance matrix.
If robust covariance is desired, it is recommended to use `hc3`.
Returns
-------
anova : DataFrame
A DataFrame containing.
Notes
-----
Model statistics are given in the order of args. Models must have
been fit using the formula api.
See Also
--------
model_results.compare_f_test, model_results.compare_lm_test
Examples
--------
>>> import statsmodels.api as sm
>>> from statsmodels.formula.api import ols
>>> moore = sm.datasets.get_rdataset("Moore", "car",
... cache=True) # load data
>>> data = moore.data
>>> data = data.rename(columns={"partner.status" :
... "partner_status"}) # make name pythonic
>>> moore_lm = ols('conformity ~ C(fcategory, Sum)*C(partner_status, Sum)',
... data=data).fit()
>>> table = sm.stats.anova_lm(moore_lm, typ=2) # Type 2 ANOVA DataFrame
>>> print table
"""
typ = kwargs.get('typ', 1)
### Farm Out Single model ANOVA Type I, II, III, and IV ###
if len(args) == 1:
model = args[0]
return anova_single(model, **kwargs)
try:
assert typ in [1,"I"]
except:
raise ValueError("Multiple models only supported for type I. "
"Got type %s" % str(typ))
### COMPUTE ANOVA TYPE I ###
# if given a single model
if len(args) == 1:
return anova_single(*args, **kwargs)
# received multiple fitted models
test = kwargs.get("test", "F")
scale = kwargs.get("scale", None)
n_models = len(args)
model_formula = []
pr_test = "Pr(>%s)" % test
names = ['df_resid', 'ssr', 'df_diff', 'ss_diff', test, pr_test]
table = DataFrame(np.zeros((n_models, 6)), columns = names)
if not scale: # assume biggest model is last
scale = args[-1].scale
table["ssr"] = lmap(getattr, args, ["ssr"]*n_models)
table["df_resid"] = lmap(getattr, args, ["df_resid"]*n_models)
table.ix[1:, "df_diff"] = -np.diff(table["df_resid"].values)
table["ss_diff"] = -table["ssr"].diff()
if test == "F":
table["F"] = table["ss_diff"] / table["df_diff"] / scale
table[pr_test] = stats.f.sf(table["F"], table["df_diff"],
table["df_resid"])
# for earlier scipy - stats.f.sf(np.nan, 10, 2) -> 0 not nan
table[pr_test][table['F'].isnull()] = np.nan
return table
if __name__ == "__main__":
import pandas
from statsmodels.formula.api import ols
# in R
#library(car)
#write.csv(Moore, "moore.csv", row.names=FALSE)
moore = pandas.read_table('moore.csv', delimiter=",", skiprows=1,
names=['partner_status','conformity',
'fcategory','fscore'])
moore_lm = ols('conformity ~ C(fcategory, Sum)*C(partner_status, Sum)',
data=moore).fit()
mooreB = ols('conformity ~ C(partner_status, Sum)', data=moore).fit()
# for each term you just want to test vs the model without its
# higher-order terms
# using Monette-Fox slides and Marden class notes for linear algebra /
# orthogonal complement
# https://netfiles.uiuc.edu/jimarden/www/Classes/STAT324/
table = anova_lm(moore_lm, typ=2)
| bsd-3-clause |
vortex-ape/scikit-learn | sklearn/ensemble/partial_dependence.py | 5 | 15362 | """Partial dependence plots for tree ensembles. """
# Authors: Peter Prettenhofer
# License: BSD 3 clause
from itertools import count
import numbers
import numpy as np
from scipy.stats.mstats import mquantiles
from ..utils.extmath import cartesian
from ..utils import Parallel, delayed
from ..externals import six
from ..externals.six.moves import map, range, zip
from ..utils import check_array
from ..utils.validation import check_is_fitted
from ..tree._tree import DTYPE
from ._gradient_boosting import _partial_dependence_tree
from .gradient_boosting import BaseGradientBoosting
def _grid_from_X(X, percentiles=(0.05, 0.95), grid_resolution=100):
"""Generate a grid of points based on the ``percentiles of ``X``.
The grid is generated by placing ``grid_resolution`` equally
spaced points between the ``percentiles`` of each column
of ``X``.
Parameters
----------
X : ndarray
The data
percentiles : tuple of floats
The percentiles which are used to construct the extreme
values of the grid axes.
grid_resolution : int
The number of equally spaced points that are placed
on the grid.
Returns
-------
grid : ndarray
All data points on the grid; ``grid.shape[1] == X.shape[1]``
and ``grid.shape[0] == grid_resolution * X.shape[1]``.
axes : seq of ndarray
The axes with which the grid has been created.
"""
if len(percentiles) != 2:
raise ValueError('percentile must be tuple of len 2')
if not all(0. <= x <= 1. for x in percentiles):
raise ValueError('percentile values must be in [0, 1]')
axes = []
emp_percentiles = mquantiles(X, prob=percentiles, axis=0)
for col in range(X.shape[1]):
uniques = np.unique(X[:, col])
if uniques.shape[0] < grid_resolution:
# feature has low resolution use unique vals
axis = uniques
else:
# create axis based on percentiles and grid resolution
axis = np.linspace(emp_percentiles[0, col],
emp_percentiles[1, col],
num=grid_resolution, endpoint=True)
axes.append(axis)
return cartesian(axes), axes
def partial_dependence(gbrt, target_variables, grid=None, X=None,
percentiles=(0.05, 0.95), grid_resolution=100):
"""Partial dependence of ``target_variables``.
Partial dependence plots show the dependence between the joint values
of the ``target_variables`` and the function represented
by the ``gbrt``.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
target_variables : array-like, dtype=int
The target features for which the partial dependecy should be
computed (size should be smaller than 3 for visual renderings).
grid : array-like, shape=(n_points, len(target_variables))
The grid of ``target_variables`` values for which the
partial dependecy should be evaluated (either ``grid`` or ``X``
must be specified).
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained. It is used to generate
a ``grid`` for the ``target_variables``. The ``grid`` comprises
``grid_resolution`` equally spaced points between the two
``percentiles``.
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used create the extreme values
for the ``grid``. Only if ``X`` is not None.
grid_resolution : int, default=100
The number of equally spaced points on the ``grid``.
Returns
-------
pdp : array, shape=(n_classes, n_points)
The partial dependence function evaluated on the ``grid``.
For regression and binary classification ``n_classes==1``.
axes : seq of ndarray or None
The axes with which the grid has been created or None if
the grid has been given.
Examples
--------
>>> samples = [[0, 0, 2], [1, 0, 0]]
>>> labels = [0, 1]
>>> from sklearn.ensemble import GradientBoostingClassifier
>>> gb = GradientBoostingClassifier(random_state=0).fit(samples, labels)
>>> kwargs = dict(X=samples, percentiles=(0, 1), grid_resolution=2)
>>> partial_dependence(gb, [0], **kwargs) # doctest: +SKIP
(array([[-4.52..., 4.52...]]), [array([ 0., 1.])])
"""
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
check_is_fitted(gbrt, 'estimators_')
if (grid is None and X is None) or (grid is not None and X is not None):
raise ValueError('Either grid or X must be specified')
target_variables = np.asarray(target_variables, dtype=np.int32,
order='C').ravel()
if any([not (0 <= fx < gbrt.n_features_) for fx in target_variables]):
raise ValueError('target_variables must be in [0, %d]'
% (gbrt.n_features_ - 1))
if X is not None:
X = check_array(X, dtype=DTYPE, order='C')
grid, axes = _grid_from_X(X[:, target_variables], percentiles,
grid_resolution)
else:
assert grid is not None
# dont return axes if grid is given
axes = None
# grid must be 2d
if grid.ndim == 1:
grid = grid[:, np.newaxis]
if grid.ndim != 2:
raise ValueError('grid must be 2d but is %dd' % grid.ndim)
grid = np.asarray(grid, dtype=DTYPE, order='C')
assert grid.shape[1] == target_variables.shape[0]
n_trees_per_stage = gbrt.estimators_.shape[1]
n_estimators = gbrt.estimators_.shape[0]
pdp = np.zeros((n_trees_per_stage, grid.shape[0],), dtype=np.float64,
order='C')
for stage in range(n_estimators):
for k in range(n_trees_per_stage):
tree = gbrt.estimators_[stage, k].tree_
_partial_dependence_tree(tree, grid, target_variables,
gbrt.learning_rate, pdp[k])
return pdp, axes
def plot_partial_dependence(gbrt, X, features, feature_names=None,
label=None, n_cols=3, grid_resolution=100,
percentiles=(0.05, 0.95), n_jobs=None,
verbose=0, ax=None, line_kw=None,
contour_kw=None, **fig_kw):
"""Partial dependence plots for ``features``.
The ``len(features)`` plots are arranged in a grid with ``n_cols``
columns. Two-way partial dependence plots are plotted as contour
plots.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained.
features : seq of ints, strings, or tuples of ints or strings
If seq[i] is an int or a tuple with one int value, a one-way
PDP is created; if seq[i] is a tuple of two ints, a two-way
PDP is created.
If feature_names is specified and seq[i] is an int, seq[i]
must be < len(feature_names).
If seq[i] is a string, feature_names must be specified, and
seq[i] must be in feature_names.
feature_names : seq of str
Name of each feature; feature_names[i] holds
the name of the feature with index i.
label : object
The class label for which the PDPs should be computed.
Only if gbrt is a multi-class model. Must be in ``gbrt.classes_``.
n_cols : int
The number of columns in the grid plot (default: 3).
grid_resolution : int, default=100
The number of equally spaced points on the axes.
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used to create the extreme values
for the PDP axes.
n_jobs : int or None, optional (default=None)
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
verbose : int
Verbose output during PD computations. Defaults to 0.
ax : Matplotlib axis object, default None
An axis object onto which the plots will be drawn.
line_kw : dict
Dict with keywords passed to the ``matplotlib.pyplot.plot`` call.
For one-way partial dependence plots.
contour_kw : dict
Dict with keywords passed to the ``matplotlib.pyplot.plot`` call.
For two-way partial dependence plots.
**fig_kw : dict
Dict with keywords passed to the figure() call.
Note that all keywords not recognized above will be automatically
included here.
Returns
-------
fig : figure
The Matplotlib Figure object.
axs : seq of Axis objects
A seq of Axis objects, one for each subplot.
Examples
--------
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.ensemble import GradientBoostingRegressor
>>> X, y = make_friedman1()
>>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)
>>> fig, axs = plot_partial_dependence(clf, X, [0, (0, 1)]) #doctest: +SKIP
...
"""
import matplotlib.pyplot as plt
from matplotlib import transforms
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import ScalarFormatter
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
check_is_fitted(gbrt, 'estimators_')
# set label_idx for multi-class GBRT
if hasattr(gbrt, 'classes_') and np.size(gbrt.classes_) > 2:
if label is None:
raise ValueError('label is not given for multi-class PDP')
label_idx = np.searchsorted(gbrt.classes_, label)
if gbrt.classes_[label_idx] != label:
raise ValueError('label %s not in ``gbrt.classes_``' % str(label))
else:
# regression and binary classification
label_idx = 0
X = check_array(X, dtype=DTYPE, order='C')
if gbrt.n_features_ != X.shape[1]:
raise ValueError('X.shape[1] does not match gbrt.n_features_')
if line_kw is None:
line_kw = {'color': 'green'}
if contour_kw is None:
contour_kw = {}
# convert feature_names to list
if feature_names is None:
# if not feature_names use fx indices as name
feature_names = [str(i) for i in range(gbrt.n_features_)]
elif isinstance(feature_names, np.ndarray):
feature_names = feature_names.tolist()
def convert_feature(fx):
if isinstance(fx, six.string_types):
try:
fx = feature_names.index(fx)
except ValueError:
raise ValueError('Feature %s not in feature_names' % fx)
return fx
# convert features into a seq of int tuples
tmp_features = []
for fxs in features:
if isinstance(fxs, (numbers.Integral,) + six.string_types):
fxs = (fxs,)
try:
fxs = np.array([convert_feature(fx) for fx in fxs], dtype=np.int32)
except TypeError:
raise ValueError('features must be either int, str, or tuple '
'of int/str')
if not (1 <= np.size(fxs) <= 2):
raise ValueError('target features must be either one or two')
tmp_features.append(fxs)
features = tmp_features
names = []
try:
for fxs in features:
l = []
# explicit loop so "i" is bound for exception below
for i in fxs:
l.append(feature_names[i])
names.append(l)
except IndexError:
raise ValueError('All entries of features must be less than '
'len(feature_names) = {0}, got {1}.'
.format(len(feature_names), i))
# compute PD functions
pd_result = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(partial_dependence)(gbrt, fxs, X=X,
grid_resolution=grid_resolution,
percentiles=percentiles)
for fxs in features)
# get global min and max values of PD grouped by plot type
pdp_lim = {}
for pdp, axes in pd_result:
min_pd, max_pd = pdp[label_idx].min(), pdp[label_idx].max()
n_fx = len(axes)
old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd))
min_pd = min(min_pd, old_min_pd)
max_pd = max(max_pd, old_max_pd)
pdp_lim[n_fx] = (min_pd, max_pd)
# create contour levels for two-way plots
if 2 in pdp_lim:
Z_level = np.linspace(*pdp_lim[2], num=8)
if ax is None:
fig = plt.figure(**fig_kw)
else:
fig = ax.get_figure()
fig.clear()
n_cols = min(n_cols, len(features))
n_rows = int(np.ceil(len(features) / float(n_cols)))
axs = []
for i, fx, name, (pdp, axes) in zip(count(), features, names,
pd_result):
ax = fig.add_subplot(n_rows, n_cols, i + 1)
if len(axes) == 1:
ax.plot(axes[0], pdp[label_idx].ravel(), **line_kw)
else:
# make contour plot
assert len(axes) == 2
XX, YY = np.meshgrid(axes[0], axes[1])
Z = pdp[label_idx].reshape(list(map(np.size, axes))).T
CS = ax.contour(XX, YY, Z, levels=Z_level, linewidths=0.5,
colors='k')
ax.contourf(XX, YY, Z, levels=Z_level, vmax=Z_level[-1],
vmin=Z_level[0], alpha=0.75, **contour_kw)
ax.clabel(CS, fmt='%2.2f', colors='k', fontsize=10, inline=True)
# plot data deciles + axes labels
deciles = mquantiles(X[:, fx[0]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transData,
ax.transAxes)
ylim = ax.get_ylim()
ax.vlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_xlabel(name[0])
ax.set_ylim(ylim)
# prevent x-axis ticks from overlapping
ax.xaxis.set_major_locator(MaxNLocator(nbins=6, prune='lower'))
tick_formatter = ScalarFormatter()
tick_formatter.set_powerlimits((-3, 4))
ax.xaxis.set_major_formatter(tick_formatter)
if len(axes) > 1:
# two-way PDP - y-axis deciles + labels
deciles = mquantiles(X[:, fx[1]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transAxes,
ax.transData)
xlim = ax.get_xlim()
ax.hlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_ylabel(name[1])
# hline erases xlim
ax.set_xlim(xlim)
else:
ax.set_ylabel('Partial dependence')
if len(axes) == 1:
ax.set_ylim(pdp_lim[1])
axs.append(ax)
fig.subplots_adjust(bottom=0.15, top=0.7, left=0.1, right=0.95, wspace=0.4,
hspace=0.3)
return fig, axs
| bsd-3-clause |
simiden/BangsimonStocks | stockGUI.py | 2 | 14430 | import wx
import warnings
import os
import matplotlib
import wx.lib.hyperlink as hl
import wx.lib.scrolledpanel
import wx.lib.calendar as cal
matplotlib.use('WXAgg')
from matplotlib.figure import Figure
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigCanvas, NavigationToolbar2WxAgg as NavigationToolbar
from stockInfo import stockInfo
from stockPlot import stockPlot
from stockUtil import *
from stockAnalysis import Beta, BuyOrSell
from datetime import timedelta,date,datetime
import webbrowser
#: Default values used on start an when fetching new data
DEFAULT_TICKER = "GOOG"
DEFAULT_PERIOD = 26
class initialFrame(wx.Frame):
""" The main frame of the application
"""
title = "Bangsimon Stocks"
def __init__(self):
#Create everything
super(initialFrame,self).__init__( None, -1, self.title,)
self.Bind(wx.EVT_CLOSE, self.on_exit)
self.create_menu()
self.create_status_bar()
print "Fetching initial data"
self.create_main_panel()
print "Plotting initial data"
self.draw_figure()
def create_menu(self):
"""Creates the menubar"""
self.menubar = wx.MenuBar()
menu_file = wx.Menu()
m_new = menu_file.Append(-1, "&New ticker\tCtrl-N", "Choose new ticker to track")
self.Bind(wx.EVT_MENU, self.on_new, m_new)
m_expt = menu_file.Append(-1, "&Save plot\tCtrl-S", "Save plot to file")
self.Bind(wx.EVT_MENU, self.on_save_plot, m_expt)
menu_file.AppendSeparator()
m_exit = menu_file.Append(-1, "E&xit\tCtrl-X", "Exit")
self.Bind(wx.EVT_MENU, self.on_exit, m_exit)
plot = wx.Menu()
options = ['Adj Close','Open' ,'High', 'Low', 'Close', 'Avg. Price']
#Use map instead of multible copy paste
M = map( (lambda x: self.Bind(wx.EVT_MENU,self.plot_handler,
plot.AppendRadioItem(-1,x))),options)
plot.AppendSeparator()
#We want these to be on the same graph, option to turn on or off
self.Bind(wx.EVT_MENU,self.plot_handler,plot.AppendCheckItem(-1, "Simple Moving Average"))
self.Bind(wx.EVT_MENU,self.plot_handler,plot.AppendCheckItem(-1, "Volume"))
dates = wx.Menu()
self.Bind(wx.EVT_MENU,self.changeFromDate,dates.Append(-1, "Change From Date"))
self.Bind(wx.EVT_MENU,self.changeToDate,dates.Append(-1, "Change To Date"))
comp = wx.Menu()
self.Bind(wx.EVT_MENU,self.tiProfile,comp.Append(-1, "Profile"))
self.Bind(wx.EVT_MENU,self.tiKeys,comp.Append(-1, "Key Statistics"))
self.menubar.SetMenus([(menu_file,"&File"),(plot, "&Plot"),(dates,"&Dates"),(comp, "&Company")])
self.SetMenuBar(self.menubar)
def create_main_panel(self):
""" Creates the main panel and everything"""
self.panel = wx.Panel(self)
self.panel.stockObj = stockInfo(DEFAULT_TICKER)
#Put up some default values that are used in plot
self.panel.currentAttr = "Adj Close"
td = self.panel.stockObj.toDate - timedelta(weeks=DEFAULT_PERIOD)
if self.panel.stockObj.validDate(td):
self.panel.fromDate = td
else:
self.panel.fromDate = self.panel.stockObj.fromDate
self.panel.toDate = self.panel.stockObj.toDate
self.panel.MovingAvg = False
self.panel.Volume = False
self.panel.MovingAvgN = 20
self.panel.Beta = Beta(self.panel.stockObj,self.panel.fromDate, self.panel.toDate)
#adding the pllot
self.fig = matplotlib.pyplot.gcf()
self.canvas = FigCanvas(self.panel, -1, self.fig)
#Add slider to change n of moving average
self.slider_label = wx.StaticText(self.panel, -1,
"Moving Average N: ")
self.slider_width = wx.Slider(self.panel, -1,
value=20,
minValue=20,
maxValue=200,
style=wx.SL_AUTOTICKS | wx.SL_LABELS)
self.slider_width.SetTickFreq(10, 1)
self.Bind(wx.EVT_COMMAND_SCROLL_THUMBTRACK, self.on_slider_width, self.slider_width)
self.Bind(wx.EVT_COMMAND_SCROLL_CHANGED, self.on_slider_width, self.slider_width)
#Toolbar of chart
self.toolbar = NavigationToolbar(self.canvas)
self.SetToolBar(self.toolbar)
self.toolbar.Realize()#: Windows fix, does not work
#We use boxes to get a dynamic layout
self.vbox = wx.BoxSizer(wx.VERTICAL)
self.vbox.Add(self.canvas, 0, wx.LEFT | wx.TOP | wx.EXPAND)
self.hbox = wx.BoxSizer(wx.HORIZONTAL)
flags = wx.ALIGN_LEFT | wx.ALL | wx.ALIGN_CENTER_VERTICAL
self.hbox.AddSpacer(30)
self.hbox.Add(self.slider_label, 0, flag=flags)
self.hbox.Add(self.slider_width, 1, border=3, flag=flags |wx.EXPAND)
self.vbox.Add(self.hbox, 0, flag = wx.ALIGN_LEFT | wx.TOP| wx.EXPAND)
self.vbox.AddSpacer(10)
#Add NewsFeed
self.RssBox = wx.BoxSizer(wx.VERTICAL)
self.RssPanel = wx.lib.scrolledpanel.ScrolledPanel(self.panel)
self.RssPanel.SetMinSize((-1,100))
self.RssPanel.SetSizer(self.RssBox)
self.updateRSS()
self.RssBox.Fit(self.RssPanel)
self.RssPanel.SetupScrolling()
self.vbox.Add(self.RssPanel,1, flag= wx.EXPAND | wx.ALIGN_CENTER)
self.panel.SetSizer(self.vbox)
self.vbox.Fit(self)
self.updateCurrentData()
def updateRSS(self):
"""Fetches news for the graph"""
self.RssBox.Clear()
rss = self.panel.stockObj.getRSS()
self.hyperlinks = map( (lambda p:hl.HyperLinkCtrl(self.RssPanel,wx.ID_ANY,label=str(p[0]),URL = str(p[1]))),rss)
for i in self.hyperlinks:
self.RssBox.Add(i,0,flag= wx.EXPAND |wx.ALIGN_CENTER)
self.RssBox.Layout()
def create_status_bar(self):
"""Creates the statusbar"""
self.statusbar = self.CreateStatusBar()
def draw_figure(self):
"""Plots the graph"""
self.flash_status_message("Plotting...")
self.fig.clear()
self.fig.subplots_adjust(top=0.82)
self.fig = stockPlot(self.panel.stockObj,self.panel.currentAttr,
self.panel.fromDate, self.panel.toDate,
self.panel.MovingAvg, self.panel.MovingAvgN,
self.panel.Volume)
self.plotInformation()
self.canvas.draw()
self.flash_status_message("Plotting...Done")
def plotInformation(self):
"""Prints additional data on graph"""
self.updateCurrentData()
#Put title on graph
self.fig.suptitle(self.panel.currentData['name'],fontsize="16",fontweight="bold",url='http://finance.yahoo.com/q/pr?s='+self.panel.stockObj.ticker+'+Key+Statistics',picker=True,color="b")
Cd = lambda c : str(self.panel.currentData[c])
#Current information on graph
currentDataString1 = "Price: %s, Market Cap: %s\n\
52 week high/low: %s/%s,\n\
Beta of period: %s" % (Cd('price'), Cd('market_cap'), Cd('52_week_high'), Cd('52_week_low'), str(self.panel.Beta))
currentDataString2 = "Short ratio: %s, 50 MAvg: %s\n\
Earnings per share: %s\n\
Data indicates you should %s" % (Cd('short_ratio'), Cd('50day_moving_avg'), Cd('earnings_per_share'), self.panel.BuyOrSell)
self.fig.text(0.13, 0.965,"Current:\n" + currentDataString1,
horizontalalignment='left',
verticalalignment='top',
multialignment='center',
transform = self.fig.axes[0].transAxes)
self.fig.text(0.87, 0.965,"Current:\n" + currentDataString2,
horizontalalignment='right',
verticalalignment='top',
multialignment='center',
transform = self.fig.axes[0].transAxes)
#These two functions are used to open the information pages
def tiKeys(self,event):
webbrowser.open('http://finance.yahoo.com/q/ks?s='+self.panel.stockObj.ticker+'+Key+Statistics')
def tiProfile(self,event):
webbrowser.open('http://finance.yahoo.com/q/pr?s='+self.panel.stockObj.ticker+'+Profile')
def updateCurrentData(self):
"""Fetches current data from yahoo"""
self.panel.currentData = self.panel.stockObj.getCurrent()
if BuyOrSell(self.panel.stockObj) == 1:
self.panel.BuyOrSell = "buy"
else:
self.panel.BuyOrSell = "sell"
def on_slider_width(self, event):
"""Handles the changing of the slider, updates the value used"""
self.panel.MovingAvgN = self.slider_width.GetValue()
self.draw_figure()
def on_new(self, event):
"""Handles creating new tickers"""
prompt = wx.TextEntryDialog(self, "Enter new ticker", "New ticker",self.panel.stockObj.ticker)
prompt.ShowModal()
#Use this loop and recursion to ensure we get a legal value
try:
self.flash_status_message("Fetching data...")
self.panel.stockObj = stockInfo(prompt.GetValue())
self.flash_status_message("Fetching data...Done")
if not (self.panel.stockObj.validDate(self.panel.fromDate) and self.panel.stockObj.validDate(self.panel.toDate)):
td = self.panel.stockObj.toDate - timedelta(weeks=DEFAULT_PERIOD)
if self.panel.stockObj.validDate(td):
self.panel.fromDate = td
else:
self.panel.fromDate = self.panel.stockObj.fromDate
self.panel.toDate = self.panel.stockObj.toDate
self.panel.Beta = Beta(self.panel.stockObj,self.panel.fromDate, self.panel.toDate)
self.updateCurrentData()
self.updateRSS()
self.draw_figure()
except ValueError:
msg = wx.MessageDialog(self, "Invalid Ticker", "Error", wx.OK|wx.ICON_ERROR)
msg.ShowModal()
self.on_new(None)
def plot_handler(self,event):
"""Handles the changing of the plot."""
menus = self.GetMenuBar().GetMenus()
#Grab plot menu from the menubar
plotmenu = filter( (lambda f: True if "Plot" in f[1] else False ), menus)
mIs = plotmenu[0][0].GetMenuItems()
#Check what happened
for mI in mIs:
label = mI.GetItemLabelText()
if mI.GetKind() is 2:
if mI.IsChecked():
self.panel.currentAttr = label
# find whether sMA is on.
if mI.GetKind() is 1:
if label == "Simple Moving Average":
self.panel.MovingAvg = mI.IsChecked()
if label == "Volume":
self.panel.Volume = mI.IsChecked()
self.updateCurrentData()
self.draw_figure()
def on_save_plot(self, event):
"""Handles the saving of the plot"""
#We use the saving function in the toolbar
self.toolbar.save(None)
def on_exit(self, event):
"""Exits program"""
self.timeroff.Stop()
self.Destroy()
quit()
def changeFromDate(self,event):
"""Changes the from date of the graph"""
day,month,year = self.panel.fromDate.day,self.panel.fromDate.month,self.panel.fromDate.year
dlg = cal.CalenDlg(self.panel,month,day,year)
dlg.y_spin.SetRange(self.panel.stockObj.fromDate.year,self.panel.stockObj.toDate.year)
dlg.Centre()
if dlg.ShowModal() == wx.ID_OK:
r = dlg.result
r = r[3] + " " + r[2] + " " + r[1]
resDate = datetime.strptime(r,"%Y %B %d").date()
else:
return
#Ensure we get a valid date
if self.panel.stockObj.validDate(resDate) and (compareDates(resDate,self.panel.toDate) < 0 ):
self.panel.fromDate = resDate
self.panel.Beta = Beta(self.panel.stockObj,self.panel.fromDate, self.panel.toDate)
self.draw_figure()
else:
msg = wx.MessageDialog(self, "Invalid Date, date must be within range %s to %s, and fromDate < toDate" % (self.panel.stockObj.fromDate,self.panel.stockObj.toDate), "Error", wx.OK|wx.ICON_ERROR)
msg.ShowModal()
self.changeFromDate(None)
def changeToDate(self,event):
"""Changes the to date of the graph"""
day,month,year = self.panel.toDate.day,self.panel.toDate.month,self.panel.toDate.year
dlg = cal.CalenDlg(self.panel,month,day,year)
dlg.y_spin.SetRange(self.panel.stockObj.fromDate.year,self.panel.stockObj.toDate.year)
dlg.Centre()
if dlg.ShowModal() == wx.ID_OK:
r = dlg.result
r = r[3] + " " + r[2] + " " + r[1]
resDate = datetime.strptime(r,"%Y %B %d").date()
else:
return
#Ensure we get a valid date
if self.panel.stockObj.validDate(resDate) and (compareDates(self.panel.fromDate,resDate) < 0 ):
self.panel.toDate = resDate
self.panel.Beta = Beta(self.panel.stockObj,self.panel.fromDate, self.panel.toDate)
self.draw_figure()
else:
msg = wx.MessageDialog(self, "Invalid Date, date must be within range %s to %s and fromDate < toDate" % (self.panel.stockObj.fromDate,self.panel.stockObj.toDate), "Error", wx.OK|wx.ICON_ERROR)
msg.ShowModal()
self.changeToDate(None)
def flash_status_message(self, msg, flash_len_ms=1500):
"""Flashes status message on status bar, so that it wont appear to have frozen"""
self.statusbar.SetStatusText(msg)
self.timeroff = wx.Timer(self)
self.Bind(
wx.EVT_TIMER,
self.on_flash_status_off,
self.timeroff)
self.timeroff.Start(flash_len_ms, oneShot=True)
def on_flash_status_off(self, event):
self.statusbar.SetStatusText('')
if __name__ == "__main__":
app = wx.App(False)
app.frame = initialFrame()
app.frame.Show()
app.MainLoop()
| gpl-3.0 |
degoldschmidt/ribeirolab-codeconversion | python/flyPAD/fp_swarmbox.py | 1 | 6802 | import numpy as np
import pandas as pd
import matplotlib
matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("ticks")
sns.despine(left=True)
def conj(conditions, printit=False):
outstr = ""
for ind, cond in enumerate(conditions):
outstr += "Label == "
outstr += "'"
outstr += cond
outstr += "'"
if ind < len(conditions)-1:
outstr += " | "
if printit:
print(outstr)
return outstr
def filtered(_data, _ID, _date, _temp, _labels=[]):
_data = _data.query("Id == '" + _ID + "'")
_data = _data.query("Date == " +_date)
if len(_temp) > 0 :
_data = _data.query("Temp == '" +_temp+"'")
if len(_labels) > 0:
_data = _data.query(conj(_labels))
return _data
def scatter(_ax, _data, _ID, _date, _temp, _labels, _lim=8., _showdata=False):
#_data = filtered(_data, _ID, _date, _temp, _labels=_labels)
if len(_labels) == 0:
_labels = _data["Label"].unique()
cY = _data[_data["Label"] == "emptySplitGal4"]["MedianY"].unique()[0]
cS = _data[_data["Label"] == "emptySplitGal4"]["MedianS"].unique()[0]
_data["MedianY"] /= cY
_data["MedianS"] /= cS
ax = sns.FacetGrid(_data, hue="Label", palette=sns.color_palette("colorblind", n_colors=14), size=5)
ax.map(plt.scatter, "MedianY", "MedianS", s=10, linewidth=.5, edgecolor="white")
datax = np.array(_data["DataY"])/cY
datay = np.array(_data["DataS"])/cS
if _showdata:
plt.plot(datax, datay, c="k", alpha=0.5, marker='.', ls = "None", markersize=2.5)
x = np.arange(0.0, 20.0, 5.0)
a = 25
Sy1 = np.tan(np.pi*a/180)*(x-1) + 1 ##
Sy2 = np.tan(-np.pi*a/180)*(x-1) + 1
Yy1 = np.tan(np.pi*(90-a)/180)*(x-1) + 1
Yy2 = np.tan(np.pi*(90+a)/180)*(x-1) + 1
plt.plot(x, Sy1, c='#f30000', ls="dotted", lw=0.5, alpha=0.5)
plt.plot(x, Sy2, c='#f30000', ls="dotted", lw=0.5, alpha=0.5)
plt.plot(x, Yy1, c='#0000f3', ls="dotted", lw=0.5, alpha=0.5)
plt.plot(x, Yy2, c='#0000f3', ls="dotted", lw=0.5, alpha=0.5)
plt.axhline(y=1.0, xmin=0.0, xmax=1., linewidth=0.5, color = 'r', ls = "-", alpha=0.5)
plt.axvline(x=1.0, ymin=0.0, ymax=1., linewidth=0.5, color = 'b', ls = "-", alpha=0.5)
for label in _labels:
_x = _data[_data["Label"] == label]["MedianY"].unique()[0]
_y = _data[_data["Label"] == label]["MedianS"].unique()[0]
plt.text(_x+0.01, _y+0.01, label, fontsize=8)
plt.xlim([0.,_lim])
plt.ylim([0.,_lim])
plt.xlabel("Norm. median #sips yeast")
plt.ylabel("Norm. median #sips sucrose")
return ax
def swarmbox( _ax, _data, _x, _y, _pval, _s, ps=2, _onlybox=True, _grouped=""):
if not _onlybox:
if len(_grouped)==0:
_ax = sns.swarmplot(x=_x, y=_y, hue="Signif"+_s, data=_data, size=ps, ax=_ax, palette=dict(yes = 'r', no = 'k', control = 'b'))
else:
_ax = sns.swarmplot(x=_x, y=_y, hue=_grouped, data=_data, size=ps, ax=_ax, palette="RdBu_r", split=True, edgecolor="gray")
if len(_grouped)==0:
_ax = sns.boxplot(x=_x, y=_y, data=_data, width=0.4, linewidth=0.5, showcaps=False,boxprops={'facecolor':'.85'}, showfliers=False,whiskerprops={'linewidth':0}, ax=_ax)
_ax = sns.pointplot(x=_x, y=_y, hue="Signif"+_s, data=_data, estimator=np.median, ci=None, join=False, color="0.5", markers="_", scale=0.75, ax=_ax, palette=dict(yes = 'r', no = 'k', control = 'b' ))
else:
_ax = sns.boxplot(x=_x, y=_y, data=_data, hue=_grouped, width=0.4, linewidth=0.5, showcaps=False, showfliers=False,whiskerprops={'linewidth':0}, palette="RdBu_r", ax=_ax)
#_ax = sns.pointplot(x=_x, y=_y, hue="Signif"+_s, data=_data, estimator=np.median, ci=None, join=False, color="0.5", markers="_", scale=0.75, ax=_ax, palette=dict(yes = 'r', no = 'k', control = 'b' ))
#_ax = sns.violinplot(x=_x, y=_y, hue=_grouped, data=_data, split=True, inner="stick", palette="RdBu_r");
return _ax
def screenplot(_axes, _dataframe, _ID, _date, _temp, _sort="Y", _title="", _labels=[], _fsuff="", _onlybox=True, _grouped=""):
Substr = ["10% Yeast", "20 mM Sucrose"]
sr = ["Y", "S"]
if _title == "":
title = _ID.replace("_", " ")
else:
title = _title
Df = _dataframe #filtered(_dataframe, _ID, _date, _temp, _labels=_labels)
#print(Df["Temp"].unique())
if _sort == "Y":
labelorder = (Df[Df.Temp == '30ºC'].sort_values("MedianY"))["Label"].unique()
else:
labelorder = (Df[Df.Temp == '30ºC'].sort_values("MedianS"))["Label"].unique()
Df = Df.set_index('Label')
Df = Df.loc[labelorder]
Df.reset_index(level=['Label'], inplace=True)
for i, ax in enumerate(_axes):
s = sr[i]
Labels = Df["Label"].unique()
if len(_grouped) > 0:
Df30 = Df[Df.Temp == '30ºC']
pVals = np.array( [ Df30[Df30.Label == label]["pVal"+s].unique()[0] for label in Labels ] )
print(sr[i])
for jj, label in enumerate(Labels):
print(label, np.log10(1./pVals[jj]))
else:
pVals = np.array( [ Df[Df.Label == label]["pVal"+s].unique()[0] for label in Labels ] )
cmedian = Df[Df.Label == "emptySplitGal4"]["Median"+s].unique()[0]
dmedian = np.nanmedian(Df[Df.Label == "emptySplitGal4"]["Data"+s])
print(cmedian, dmedian)
ax2 = ax.twinx()
ax2.plot(np.log10(1./pVals), 'k-', linewidth=0.5)
ax = swarmbox(ax, Df, "Label", "Data"+s, np.log10(1./np.array(pVals[i])), s, _onlybox = _onlybox, _grouped=_grouped)
#ax.set_ylim(1.4, 1.8)
if _grouped == "":
ax.axhline(y=dmedian, xmin=0.0, xmax=1., linewidth=1, color = 'b', ls = "dotted", alpha=0.5)
ax.set_xticklabels(Labels, rotation=60, ha='right')
ax.grid(which='major', axis='y', linestyle='--')
ax.tick_params(axis='both', direction='out', labelsize=9, pad=1)
ax.tick_params(axis='y', labelsize=10)
[lab.set_color("red") for j, lab in enumerate(ax.get_xticklabels()) if np.log10(1./np.array(pVals[j])) > 2.]
xticks = [item.get_text() for item in ax.get_xticklabels()]
for ix, item in enumerate(xticks):
if xticks[ix] == 'emptySplitGal4':
xticks[ix] = 'control'
ax.set_xticklabels(xticks)
[lab.set_color("blue") for j, lab in enumerate(ax.get_xticklabels()) if lab.get_text() == "control"]
ax.set_xlabel("Split-Gal4 Label", fontsize=8, fontweight='bold')
ax.set_ylabel(title)
ax2.set_ylabel("log10(1/p)")
ax.set_title(Substr[i], fontsize=12, loc='left', fontweight='bold')
ax.legend(loc='upper left', title=" p < 0.01", labelspacing=0.25, handletextpad=-0.2, borderpad=0.,fontsize=8)
return _axes
| gpl-3.0 |
ryfeus/lambda-packs | Tensorflow_Pandas_Numpy/source3.6/pandas/io/sas/sas7bdat.py | 3 | 27470 | """
Read SAS7BDAT files
Based on code written by Jared Hobbs:
https://bitbucket.org/jaredhobbs/sas7bdat
See also:
https://github.com/BioStatMatt/sas7bdat
Partial documentation of the file format:
https://cran.r-project.org/web/packages/sas7bdat/vignettes/sas7bdat.pdf
Reference for binary data compression:
http://collaboration.cmc.ec.gc.ca/science/rpn/biblio/ddj/Website/articles/CUJ/1992/9210/ross/ross.htm
"""
import pandas as pd
from pandas import compat
from pandas.io.common import get_filepath_or_buffer, BaseIterator
from pandas.errors import EmptyDataError
import numpy as np
import struct
import pandas.io.sas.sas_constants as const
from pandas.io.sas._sas import Parser
class _subheader_pointer(object):
pass
class _column(object):
pass
# SAS7BDAT represents a SAS data file in SAS7BDAT format.
class SAS7BDATReader(BaseIterator):
"""
Read SAS files in SAS7BDAT format.
Parameters
----------
path_or_buf : path name or buffer
Name of SAS file or file-like object pointing to SAS file
contents.
index : column identifier, defaults to None
Column to use as index.
convert_dates : boolean, defaults to True
Attempt to convert dates to Pandas datetime values. Note that
some rarely used SAS date formats may be unsupported.
blank_missing : boolean, defaults to True
Convert empty strings to missing values (SAS uses blanks to
indicate missing character variables).
chunksize : int, defaults to None
Return SAS7BDATReader object for iterations, returns chunks
with given number of lines.
encoding : string, defaults to None
String encoding.
convert_text : bool, defaults to True
If False, text variables are left as raw bytes.
convert_header_text : bool, defaults to True
If False, header text, including column names, are left as raw
bytes.
"""
def __init__(self, path_or_buf, index=None, convert_dates=True,
blank_missing=True, chunksize=None, encoding=None,
convert_text=True, convert_header_text=True):
self.index = index
self.convert_dates = convert_dates
self.blank_missing = blank_missing
self.chunksize = chunksize
self.encoding = encoding
self.convert_text = convert_text
self.convert_header_text = convert_header_text
self.default_encoding = "latin-1"
self.compression = ""
self.column_names_strings = []
self.column_names = []
self.column_types = []
self.column_formats = []
self.columns = []
self._current_page_data_subheader_pointers = []
self._cached_page = None
self._column_data_lengths = []
self._column_data_offsets = []
self._current_row_in_file_index = 0
self._current_row_on_page_index = 0
self._current_row_in_file_index = 0
self._path_or_buf, _, _, _ = get_filepath_or_buffer(path_or_buf)
if isinstance(self._path_or_buf, compat.string_types):
self._path_or_buf = open(self._path_or_buf, 'rb')
self.handle = self._path_or_buf
self._get_properties()
self._parse_metadata()
def close(self):
try:
self.handle.close()
except AttributeError:
pass
def _get_properties(self):
# Check magic number
self._path_or_buf.seek(0)
self._cached_page = self._path_or_buf.read(288)
if self._cached_page[0:len(const.magic)] != const.magic:
self.close()
raise ValueError("magic number mismatch (not a SAS file?)")
# Get alignment information
align1, align2 = 0, 0
buf = self._read_bytes(const.align_1_offset, const.align_1_length)
if buf == const.u64_byte_checker_value:
align2 = const.align_2_value
self.U64 = True
self._int_length = 8
self._page_bit_offset = const.page_bit_offset_x64
self._subheader_pointer_length = const.subheader_pointer_length_x64
else:
self.U64 = False
self._page_bit_offset = const.page_bit_offset_x86
self._subheader_pointer_length = const.subheader_pointer_length_x86
self._int_length = 4
buf = self._read_bytes(const.align_2_offset, const.align_2_length)
if buf == const.align_1_checker_value:
align1 = const.align_2_value
total_align = align1 + align2
# Get endianness information
buf = self._read_bytes(const.endianness_offset,
const.endianness_length)
if buf == b'\x01':
self.byte_order = "<"
else:
self.byte_order = ">"
# Get encoding information
buf = self._read_bytes(const.encoding_offset, const.encoding_length)[0]
if buf in const.encoding_names:
self.file_encoding = const.encoding_names[buf]
else:
self.file_encoding = "unknown (code=%s)" % str(buf)
# Get platform information
buf = self._read_bytes(const.platform_offset, const.platform_length)
if buf == b'1':
self.platform = "unix"
elif buf == b'2':
self.platform = "windows"
else:
self.platform = "unknown"
buf = self._read_bytes(const.dataset_offset, const.dataset_length)
self.name = buf.rstrip(b'\x00 ')
if self.convert_header_text:
self.name = self.name.decode(
self.encoding or self.default_encoding)
buf = self._read_bytes(const.file_type_offset, const.file_type_length)
self.file_type = buf.rstrip(b'\x00 ')
if self.convert_header_text:
self.file_type = self.file_type.decode(
self.encoding or self.default_encoding)
# Timestamp is epoch 01/01/1960
epoch = pd.datetime(1960, 1, 1)
x = self._read_float(const.date_created_offset + align1,
const.date_created_length)
self.date_created = epoch + pd.to_timedelta(x, unit='s')
x = self._read_float(const.date_modified_offset + align1,
const.date_modified_length)
self.date_modified = epoch + pd.to_timedelta(x, unit='s')
self.header_length = self._read_int(const.header_size_offset + align1,
const.header_size_length)
# Read the rest of the header into cached_page.
buf = self._path_or_buf.read(self.header_length - 288)
self._cached_page += buf
if len(self._cached_page) != self.header_length:
self.close()
raise ValueError("The SAS7BDAT file appears to be truncated.")
self._page_length = self._read_int(const.page_size_offset + align1,
const.page_size_length)
self._page_count = self._read_int(const.page_count_offset + align1,
const.page_count_length)
buf = self._read_bytes(const.sas_release_offset + total_align,
const.sas_release_length)
self.sas_release = buf.rstrip(b'\x00 ')
if self.convert_header_text:
self.sas_release = self.sas_release.decode(
self.encoding or self.default_encoding)
buf = self._read_bytes(const.sas_server_type_offset + total_align,
const.sas_server_type_length)
self.server_type = buf.rstrip(b'\x00 ')
if self.convert_header_text:
self.server_type = self.server_type.decode(
self.encoding or self.default_encoding)
buf = self._read_bytes(const.os_version_number_offset + total_align,
const.os_version_number_length)
self.os_version = buf.rstrip(b'\x00 ')
if self.convert_header_text:
self.os_version = self.os_version.decode(
self.encoding or self.default_encoding)
buf = self._read_bytes(const.os_name_offset + total_align,
const.os_name_length)
buf = buf.rstrip(b'\x00 ')
if len(buf) > 0:
self.os_name = buf.decode(self.encoding or self.default_encoding)
else:
buf = self._read_bytes(const.os_maker_offset + total_align,
const.os_maker_length)
self.os_name = buf.rstrip(b'\x00 ')
if self.convert_header_text:
self.os_name = self.os_name.decode(
self.encoding or self.default_encoding)
def __next__(self):
da = self.read(nrows=self.chunksize or 1)
if da is None:
raise StopIteration
return da
# Read a single float of the given width (4 or 8).
def _read_float(self, offset, width):
if width not in (4, 8):
self.close()
raise ValueError("invalid float width")
buf = self._read_bytes(offset, width)
fd = "f" if width == 4 else "d"
return struct.unpack(self.byte_order + fd, buf)[0]
# Read a single signed integer of the given width (1, 2, 4 or 8).
def _read_int(self, offset, width):
if width not in (1, 2, 4, 8):
self.close()
raise ValueError("invalid int width")
buf = self._read_bytes(offset, width)
it = {1: "b", 2: "h", 4: "l", 8: "q"}[width]
iv = struct.unpack(self.byte_order + it, buf)[0]
return iv
def _read_bytes(self, offset, length):
if self._cached_page is None:
self._path_or_buf.seek(offset)
buf = self._path_or_buf.read(length)
if len(buf) < length:
self.close()
msg = "Unable to read {:d} bytes from file position {:d}."
raise ValueError(msg.format(length, offset))
return buf
else:
if offset + length > len(self._cached_page):
self.close()
raise ValueError("The cached page is too small.")
return self._cached_page[offset:offset + length]
def _parse_metadata(self):
done = False
while not done:
self._cached_page = self._path_or_buf.read(self._page_length)
if len(self._cached_page) <= 0:
break
if len(self._cached_page) != self._page_length:
self.close()
raise ValueError(
"Failed to read a meta data page from the SAS file.")
done = self._process_page_meta()
def _process_page_meta(self):
self._read_page_header()
pt = [const.page_meta_type, const.page_amd_type] + const.page_mix_types
if self._current_page_type in pt:
self._process_page_metadata()
return ((self._current_page_type in [256] + const.page_mix_types) or
(self._current_page_data_subheader_pointers is not None))
def _read_page_header(self):
bit_offset = self._page_bit_offset
tx = const.page_type_offset + bit_offset
self._current_page_type = self._read_int(tx, const.page_type_length)
tx = const.block_count_offset + bit_offset
self._current_page_block_count = self._read_int(
tx, const.block_count_length)
tx = const.subheader_count_offset + bit_offset
self._current_page_subheaders_count = (
self._read_int(tx, const.subheader_count_length))
def _process_page_metadata(self):
bit_offset = self._page_bit_offset
for i in range(self._current_page_subheaders_count):
pointer = self._process_subheader_pointers(
const.subheader_pointers_offset + bit_offset, i)
if pointer.length == 0:
continue
if pointer.compression == const.truncated_subheader_id:
continue
subheader_signature = self._read_subheader_signature(
pointer.offset)
subheader_index = (
self._get_subheader_index(subheader_signature,
pointer.compression, pointer.ptype))
self._process_subheader(subheader_index, pointer)
def _get_subheader_index(self, signature, compression, ptype):
index = const.subheader_signature_to_index.get(signature)
if index is None:
f1 = ((compression == const.compressed_subheader_id) or
(compression == 0))
f2 = (ptype == const.compressed_subheader_type)
if (self.compression != "") and f1 and f2:
index = const.SASIndex.data_subheader_index
else:
self.close()
raise ValueError("Unknown subheader signature")
return index
def _process_subheader_pointers(self, offset, subheader_pointer_index):
subheader_pointer_length = self._subheader_pointer_length
total_offset = (offset +
subheader_pointer_length * subheader_pointer_index)
subheader_offset = self._read_int(total_offset, self._int_length)
total_offset += self._int_length
subheader_length = self._read_int(total_offset, self._int_length)
total_offset += self._int_length
subheader_compression = self._read_int(total_offset, 1)
total_offset += 1
subheader_type = self._read_int(total_offset, 1)
x = _subheader_pointer()
x.offset = subheader_offset
x.length = subheader_length
x.compression = subheader_compression
x.ptype = subheader_type
return x
def _read_subheader_signature(self, offset):
subheader_signature = self._read_bytes(offset, self._int_length)
return subheader_signature
def _process_subheader(self, subheader_index, pointer):
offset = pointer.offset
length = pointer.length
if subheader_index == const.SASIndex.row_size_index:
processor = self._process_rowsize_subheader
elif subheader_index == const.SASIndex.column_size_index:
processor = self._process_columnsize_subheader
elif subheader_index == const.SASIndex.column_text_index:
processor = self._process_columntext_subheader
elif subheader_index == const.SASIndex.column_name_index:
processor = self._process_columnname_subheader
elif subheader_index == const.SASIndex.column_attributes_index:
processor = self._process_columnattributes_subheader
elif subheader_index == const.SASIndex.format_and_label_index:
processor = self._process_format_subheader
elif subheader_index == const.SASIndex.column_list_index:
processor = self._process_columnlist_subheader
elif subheader_index == const.SASIndex.subheader_counts_index:
processor = self._process_subheader_counts
elif subheader_index == const.SASIndex.data_subheader_index:
self._current_page_data_subheader_pointers.append(pointer)
return
else:
raise ValueError("unknown subheader index")
processor(offset, length)
def _process_rowsize_subheader(self, offset, length):
int_len = self._int_length
lcs_offset = offset
lcp_offset = offset
if self.U64:
lcs_offset += 682
lcp_offset += 706
else:
lcs_offset += 354
lcp_offset += 378
self.row_length = self._read_int(
offset + const.row_length_offset_multiplier * int_len, int_len)
self.row_count = self._read_int(
offset + const.row_count_offset_multiplier * int_len, int_len)
self.col_count_p1 = self._read_int(
offset + const.col_count_p1_multiplier * int_len, int_len)
self.col_count_p2 = self._read_int(
offset + const.col_count_p2_multiplier * int_len, int_len)
mx = const.row_count_on_mix_page_offset_multiplier * int_len
self._mix_page_row_count = self._read_int(offset + mx, int_len)
self._lcs = self._read_int(lcs_offset, 2)
self._lcp = self._read_int(lcp_offset, 2)
def _process_columnsize_subheader(self, offset, length):
int_len = self._int_length
offset += int_len
self.column_count = self._read_int(offset, int_len)
if (self.col_count_p1 + self.col_count_p2 !=
self.column_count):
print("Warning: column count mismatch (%d + %d != %d)\n",
self.col_count_p1, self.col_count_p2, self.column_count)
# Unknown purpose
def _process_subheader_counts(self, offset, length):
pass
def _process_columntext_subheader(self, offset, length):
offset += self._int_length
text_block_size = self._read_int(offset, const.text_block_size_length)
buf = self._read_bytes(offset, text_block_size)
cname_raw = buf[0:text_block_size].rstrip(b"\x00 ")
cname = cname_raw
if self.convert_header_text:
cname = cname.decode(self.encoding or self.default_encoding)
self.column_names_strings.append(cname)
if len(self.column_names_strings) == 1:
compression_literal = ""
for cl in const.compression_literals:
if cl in cname_raw:
compression_literal = cl
self.compression = compression_literal
offset -= self._int_length
offset1 = offset + 16
if self.U64:
offset1 += 4
buf = self._read_bytes(offset1, self._lcp)
compression_literal = buf.rstrip(b"\x00")
if compression_literal == "":
self._lcs = 0
offset1 = offset + 32
if self.U64:
offset1 += 4
buf = self._read_bytes(offset1, self._lcp)
self.creator_proc = buf[0:self._lcp]
elif compression_literal == const.rle_compression:
offset1 = offset + 40
if self.U64:
offset1 += 4
buf = self._read_bytes(offset1, self._lcp)
self.creator_proc = buf[0:self._lcp]
elif self._lcs > 0:
self._lcp = 0
offset1 = offset + 16
if self.U64:
offset1 += 4
buf = self._read_bytes(offset1, self._lcs)
self.creator_proc = buf[0:self._lcp]
if self.convert_header_text:
if hasattr(self, "creator_proc"):
self.creator_proc = self.creator_proc.decode(
self.encoding or self.default_encoding)
def _process_columnname_subheader(self, offset, length):
int_len = self._int_length
offset += int_len
column_name_pointers_count = (length - 2 * int_len - 12) // 8
for i in range(column_name_pointers_count):
text_subheader = offset + const.column_name_pointer_length * \
(i + 1) + const.column_name_text_subheader_offset
col_name_offset = offset + const.column_name_pointer_length * \
(i + 1) + const.column_name_offset_offset
col_name_length = offset + const.column_name_pointer_length * \
(i + 1) + const.column_name_length_offset
idx = self._read_int(
text_subheader, const.column_name_text_subheader_length)
col_offset = self._read_int(
col_name_offset, const.column_name_offset_length)
col_len = self._read_int(
col_name_length, const.column_name_length_length)
name_str = self.column_names_strings[idx]
self.column_names.append(name_str[col_offset:col_offset + col_len])
def _process_columnattributes_subheader(self, offset, length):
int_len = self._int_length
column_attributes_vectors_count = (
length - 2 * int_len - 12) // (int_len + 8)
self.column_types = np.empty(
column_attributes_vectors_count, dtype=np.dtype('S1'))
self._column_data_lengths = np.empty(
column_attributes_vectors_count, dtype=np.int64)
self._column_data_offsets = np.empty(
column_attributes_vectors_count, dtype=np.int64)
for i in range(column_attributes_vectors_count):
col_data_offset = (offset + int_len +
const.column_data_offset_offset +
i * (int_len + 8))
col_data_len = (offset + 2 * int_len +
const.column_data_length_offset +
i * (int_len + 8))
col_types = (offset + 2 * int_len +
const.column_type_offset + i * (int_len + 8))
x = self._read_int(col_data_offset, int_len)
self._column_data_offsets[i] = x
x = self._read_int(col_data_len, const.column_data_length_length)
self._column_data_lengths[i] = x
x = self._read_int(col_types, const.column_type_length)
if x == 1:
self.column_types[i] = b'd'
else:
self.column_types[i] = b's'
def _process_columnlist_subheader(self, offset, length):
# unknown purpose
pass
def _process_format_subheader(self, offset, length):
int_len = self._int_length
text_subheader_format = (
offset +
const.column_format_text_subheader_index_offset +
3 * int_len)
col_format_offset = (offset +
const.column_format_offset_offset +
3 * int_len)
col_format_len = (offset +
const.column_format_length_offset +
3 * int_len)
text_subheader_label = (
offset +
const.column_label_text_subheader_index_offset +
3 * int_len)
col_label_offset = (offset +
const.column_label_offset_offset +
3 * int_len)
col_label_len = offset + const.column_label_length_offset + 3 * int_len
x = self._read_int(text_subheader_format,
const.column_format_text_subheader_index_length)
format_idx = min(x, len(self.column_names_strings) - 1)
format_start = self._read_int(
col_format_offset, const.column_format_offset_length)
format_len = self._read_int(
col_format_len, const.column_format_length_length)
label_idx = self._read_int(
text_subheader_label,
const.column_label_text_subheader_index_length)
label_idx = min(label_idx, len(self.column_names_strings) - 1)
label_start = self._read_int(
col_label_offset, const.column_label_offset_length)
label_len = self._read_int(col_label_len,
const.column_label_length_length)
label_names = self.column_names_strings[label_idx]
column_label = label_names[label_start: label_start + label_len]
format_names = self.column_names_strings[format_idx]
column_format = format_names[format_start: format_start + format_len]
current_column_number = len(self.columns)
col = _column()
col.col_id = current_column_number
col.name = self.column_names[current_column_number]
col.label = column_label
col.format = column_format
col.ctype = self.column_types[current_column_number]
col.length = self._column_data_lengths[current_column_number]
self.column_formats.append(column_format)
self.columns.append(col)
def read(self, nrows=None):
if (nrows is None) and (self.chunksize is not None):
nrows = self.chunksize
elif nrows is None:
nrows = self.row_count
if len(self.column_types) == 0:
self.close()
raise EmptyDataError("No columns to parse from file")
if self._current_row_in_file_index >= self.row_count:
return None
m = self.row_count - self._current_row_in_file_index
if nrows > m:
nrows = m
nd = (self.column_types == b'd').sum()
ns = (self.column_types == b's').sum()
self._string_chunk = np.empty((ns, nrows), dtype=np.object)
self._byte_chunk = np.empty((nd, 8 * nrows), dtype=np.uint8)
self._current_row_in_chunk_index = 0
p = Parser(self)
p.read(nrows)
rslt = self._chunk_to_dataframe()
if self.index is not None:
rslt = rslt.set_index(self.index)
return rslt
def _read_next_page(self):
self._current_page_data_subheader_pointers = []
self._cached_page = self._path_or_buf.read(self._page_length)
if len(self._cached_page) <= 0:
return True
elif len(self._cached_page) != self._page_length:
self.close()
msg = ("failed to read complete page from file "
"(read {:d} of {:d} bytes)")
raise ValueError(msg.format(len(self._cached_page),
self._page_length))
self._read_page_header()
if self._current_page_type == const.page_meta_type:
self._process_page_metadata()
pt = [const.page_meta_type, const.page_data_type]
pt += [const.page_mix_types]
if self._current_page_type not in pt:
return self._read_next_page()
return False
def _chunk_to_dataframe(self):
n = self._current_row_in_chunk_index
m = self._current_row_in_file_index
ix = range(m - n, m)
rslt = pd.DataFrame(index=ix)
js, jb = 0, 0
for j in range(self.column_count):
name = self.column_names[j]
if self.column_types[j] == b'd':
rslt[name] = self._byte_chunk[jb, :].view(
dtype=self.byte_order + 'd')
rslt[name] = np.asarray(rslt[name], dtype=np.float64)
if self.convert_dates:
unit = None
if self.column_formats[j] in const.sas_date_formats:
unit = 'd'
elif self.column_formats[j] in const.sas_datetime_formats:
unit = 's'
if unit:
rslt[name] = pd.to_datetime(rslt[name], unit=unit,
origin="1960-01-01")
jb += 1
elif self.column_types[j] == b's':
rslt[name] = self._string_chunk[js, :]
if self.convert_text and (self.encoding is not None):
rslt[name] = rslt[name].str.decode(
self.encoding or self.default_encoding)
if self.blank_missing:
ii = rslt[name].str.len() == 0
rslt.loc[ii, name] = np.nan
js += 1
else:
self.close()
raise ValueError("unknown column type %s" %
self.column_types[j])
return rslt
| mit |
yukke42/machine-learning | 2/p29_fitting.py | 1 | 1360 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
class Perceptron(object):
def __init__(self, eta=0.01, n_iter=10):
self.eta = eta
self.n_iter = n_iter
def fit(self, X, Y):
"""
paramater
# X.shape = [n_samples, n_features]
# Y.shape = [n_samples]
return
# object
"""
self.w_ = np.zeros(1 + X.shape[1])
self.errors_ = []
for _ in range(self.n_iter):
errors = 0
for xi, target in zip(X, y):
update = self.eta * (target - self.predict(xi))
self.w_[1:] += update * xi
self.w_[0] += update
errors += int(update != 0.0)
self.errors_.append(errors)
return self
def net_input(self, X):
return np.dot(X, self.w_[1:]) + self.w_[0]
def predict(self, X):
return np.where(self.net_input(X) >= 0.0, 1, -1)
df = pd.read_csv('http://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data', header=None)
y = df.iloc[0:100, 4].values
y = np.where(y == 'Iris-setosa', -1, 1)
X = df.iloc[0:100, [0,2]].values
ppn = Perceptron(eta=0.01, n_iter=10)
ppn.fit(X, y)
plt.plot(range(1, len(ppn.errors_) + 1), ppn.errors_, marker='o')
plt.xlabel('Epochs')
plt.ylabel('Number of misclassificaton')
plt.show()
| mit |
aasensio/elecciones2016 | sondeos.py | 2 | 6818 | # -*- coding: utf-8 -*-
import numpy as np
import xlrd
import numbers
import matplotlib.pyplot as pl
import seaborn as sn
import datetime as dt
import matplotlib.dates as mdates
import scipy.integrate as integ
import scipy.interpolate as interp
from ipdb import set_trace as stop
import nestle
import pyiacsun as ps
from numba import jit
import emcee
import transforms as tr
def euler( f, x0, t ):
"""Euler's method to solve x' = f(x,t) with x(t[0]) = x0.
USAGE:
x = euler(f, x0, t)
INPUT:
f - function of x and t equal to dx/dt. x may be multivalued,
in which case it should a list or a NumPy array. In this
case f must return a NumPy array with the same dimension
as x.
x0 - the initial condition(s). Specifies the value of x when
t = t[0]. Can be either a scalar or a list or NumPy array
if a system of equations is being solved.
t - list or NumPy array of t values to compute solution at.
t[0] is the the initial condition point, and the difference
h=t[i+1]-t[i] determines the step size h.
OUTPUT:
x - NumPy array containing solution values corresponding to each
entry in t array. If a system is being solved, x will be
an array of arrays.
"""
n = len( t )
x = np.array( [x0] * n )
for i in range( n - 1 ):
x[i+1] = x[i] + ( t[i+1] - t[i] ) * f( x[i], t[i] )
return x
class sondeos(object):
def __init__(self, nNodes):
book = xlrd.open_workbook("sondeos.xlsx")
sh = book.sheet_by_index(0)
PP = []
PSOE = []
IU = []
UPyD = []
Podemos = []
Ciudadanos = []
fecha = []
mesEsp = ['ene', 'feb', 'mar', 'abr', 'may', 'jun', 'jul', 'ago', 'sep', 'oct', 'nov', 'dic']
mesEng = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec']
for rx in range(2,sh.nrows):
row = sh.row_values(rx)
res = row[2]
out = bytes(res, 'utf-8')
res = out.replace(b'\xe2\x80\x93', b'-').decode('utf-8').lower()
for i in range(12):
res = res.replace(mesEsp[i], mesEng[i])
res = res.replace('.', '')
guion = res.find('-')
if (guion != -1):
res = res[guion+1:]
out = res.split(' ')
if ((len(out) > 1) and (res.find(')') == -1) ):
if (len(out) == 2):
res = '1 '+res
PP.append(row[3])
PSOE.append(row[4])
IU.append(row[5])
UPyD.append(row[6])
Podemos.append(row[16])
Ciudadanos.append(row[17])
fecha.append(dt.datetime.strptime(res, "%d %b %Y").date())
partidos = [PP, PSOE, IU, UPyD, Podemos, Ciudadanos]
nSondeos = len(PP)
# Now clean the lists to transform percentages to numbers
for partido in partidos:
for i in range(nSondeos):
if (not isinstance(partido[i], numbers.Number)):
findPct = partido[i].find('%')
if (findPct != -1):
res = 0.01*float(partido[i][0:findPct].replace(',', '.'))
partido[i] = res
else:
partido[i] = 0
PP = np.asarray(PP)[::-1]
PSOE = np.asarray(PSOE)[::-1]
IU = np.asarray(IU)[::-1]
UPyD = np.asarray(UPyD)[::-1]
Podemos = np.asarray(Podemos)[::-1]
Ciudadanos = np.asarray(Ciudadanos)[::-1]
fecha = np.asarray(fecha)[::-1]
delta = np.zeros(len(fecha))
for i in range(len(fecha)):
delta[i] = (fecha[i]-fecha[0]).days
partidos = [PP, PSOE, IU, UPyD, Podemos, Ciudadanos]
colors = ["blue", "red", "green", "magenta", "violet", "orange"]
self.obsTime = delta
self.party = np.asarray(partidos).T
self.nParties = self.party.shape[1]
self.nNodes = nNodes
self.timeNodes = np.linspace(delta[0], delta[-1], self.nNodes)
self.initialCondition = np.zeros(self.nParties)
for i in range(self.nParties):
self.initialCondition[i] = self.party[0,i]
ind = np.all(self.party != 0, axis=1)
self.party = self.party[ind,:]
self.obsTime = self.obsTime[ind]
self.nTimes = len(self.obsTime)
ind = np.argsort(self.obsTime)
self.obsTime = self.obsTime[ind]
for i in range(6):
self.party[:,i] = self.party[ind,i]
self.obsTime -= self.obsTime[0]
self.low = -1
self.up = 1
#@profile
def funODE(self, y, t0):
ind = np.where()
return matrix.dot(y)
#@profile
def logLikelihood(self, thetaIn):
theta, lnJactheta = tr.lrBoundedForward(thetaIn, self.low, self.up)
matrix = theta.reshape((self.nNodes,self.nParties,self.nParties))
self.matrix = np.zeros((self.nTimes,self.nParties,self.nParties))
for i in range(self.nParties):
for j in range(self.nParties):
tmp = interp.UnivariateSpline(self.timeNodes, matrix[:,i,j])
self.matrix[:,i,j] = tmp(self.obsTime)
out = np.zeros((self.nTimes,self.nParties))
out[0,:] = self.initialCondition
for i in range(self.nTimes-1):
out[i+1,:] = out[i,:] + ( self.obsTime[i+1] - self.obsTime[i] ) * self.matrix[i,:,:].dot(out[i,:])
logL = np.sum( (out[1:,:] - self.party[1:,:])**2 / 0.1**2 )
#print(logL)
return logL + np.sum(lnJactheta)
def prior_transform(self, x):
return (self.up - self.low) * x + self.low
def sample(self):
res = nestle.sample(self.logLikelihood, self.prior_transform, self.nNodes*36, method='single', npoints=1000)
def sample_emcee(self, nIterations):
ndim = self.nNodes*36
self.nwalkers = 2 * ndim
self.theta0 = np.zeros(ndim)
p0 = [self.theta0 + 1e-4*np.random.randn(ndim) for i in range(self.nwalkers)]
self.sampler = emcee.EnsembleSampler(self.nwalkers, ndim, self.logLikelihood)
loop = 0
self.chain = np.zeros((nIterations,self.nwalkers,ndim))
for result in self.sampler.sample(p0, iterations=nIterations, storechain=False):
out, _ = tr.lrBoundedForward(result[0], self.low, self.up)
self.chain[loop,:,:] = out
ps.util.progressbar(loop, nIterations, text='sampling')
loop += 1
out = sondeos(4)
out.sample_emcee(1000)
np.save('sampling.mcmc', out.chain)
| mit |
zertan/Menace | menace/bin/addStrainCoverage.py | 2 | 3094 | #!/usr/bin/env python
import numpy as np
import pandas
import xmltodict
import os
import sys
#import time
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i+n]
def binData(x,binSize):
l=np.ceil((len(x)/binSize))
out=np.zeros(l+1)
tmp=chunks(x,binSize)
for i,val in enumerate(tmp):
out[i]=np.sum(val)
return out
def interp(data,length):
calls=0
while len(data)!=length:
calls+=1
length1=len(data)
cov=np.sum(data)/float(length1)
diff=length-length1
binSize=int(np.ceil(length1/float(diff)))
if binSize==0 or diff>length1/float(2.3):
break
chunks1=chunks(data,int(binSize))
#data2=data
#print("nr " + str(diff))
vals=[]
insert_ind=[]
for i,val in enumerate(chunks1):
binSize2=min(len(val),binSize)
insert_ind.append(i*binSize+np.round(binSize2/float(2)))
vals.append(np.mean(val))
vals=np.array(vals)
insert_ind=np.array(insert_ind)
#print(str(data.shape))
#print(str(insert_ind.shape))
#print(str(vals.shape))
#print(np.array_str(vals))
data=np.insert(data,insert_ind,vals)
#print(str(data.shape))
cov2=np.sum(data)/length
data=cov/cov2*data
#print("k")
if calls>=10:
return []
#print(str(calls))
return data
originFrame=pandas.read_csv(os.path.join(sys.argv[1],'extra','origins.txt'),delimiter=" ",index_col=0)
#print(originFrame.to_string())
#originFrame=originFrame.transpose()
#print(originFrame.to_string())
#print(sys.argv[3])
#print(originFrame.index.values)
#print(originFrame.loc[sys.argv[2]]['Origin'])
#for i,arg in enumerate(sys.argv[2:]):
# print(originFrame.loc[:arg])
data=[]
origin=[]
genomeLen=[]
bacteriaName=[]
ind=0
for i,arg in enumerate(sys.argv[3:]):
#print(arg+".depth")
try:
tmpData=np.array(pandas.read_csv(arg+".depth",delimiter=" ")).astype('float')
data.append(tmpData)
#ind=ind+1
with open(os.path.join(sys.argv[2],'Headers',arg+".xml")) as fd:
obj = xmltodict.parse(fd.read())
#genomeLen.append(int(obj['DocSum']['Item'][8]['#text']))
bacteriaName.append(obj['DocSum']['Item'][1]['#text'])
genomeLen.append(int(len(tmpData)))
origin.append(int(originFrame.loc[arg]['Origin']))
data[ind]=np.roll(data[ind],-origin[ind])
ind=ind+1
except IOError as e:
print(arg+" depth not found.")
except ValueError as e:
print(arg+" origin not found.")
if not data or len(data)==1:
sys.exit()
#print("Len data: "+str(len(data)))
#ind=np.where(np.max(genomeLen))
#print(str(ind))
#print(np.array_str(ind))
#print(str(genomeLen))
ind=genomeLen.index(max(genomeLen))
genomeLen=max(genomeLen)#[int(ind[0])]
#print(str(genomeLen))
print("ind "+str(ind))# + "len "+str(len(genomeLen)))
#print(str(len(data[ind])))
dataLen=len(data[ind])
data2=np.array(data[ind]).flatten()
del data[ind]
#print("init shape")
bap=data2.shape
#print(str(genomeLen))
for i,val in enumerate(data):
tmp=interp(val,dataLen)
print(sys.argv[3+i]+" tmp shape: "+ str(tmp.shape) + " init shape: "+str(bap))
if tmp.shape==data2.shape:
data2=np.add(data2,tmp)
print(sys.argv[3])
np.save(sys.argv[3], data2)
| gpl-2.0 |
nudles/incubator-singa | examples/gan/vanilla.py | 5 | 8295 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from singa import device
from singa import initializer
from singa import layer
from singa import loss
from singa import net as ffnet
from singa import optimizer
from singa import tensor
import argparse
import matplotlib.pyplot as plt
import numpy as np
import os
from utils import load_data
from utils import print_log
class VANILLA():
def __init__(self, dev, rows=28, cols=28, channels=1, noise_size=100, hidden_size=128, batch=128,
interval=1000, learning_rate=0.001, epochs=1000000, dataset_filepath='mnist.pkl.gz', file_dir='vanilla_images/'):
self.dev = dev
self.rows = rows
self.cols = cols
self.channels = channels
self.feature_size = self.rows * self.cols * self.channels
self.noise_size = noise_size
self.hidden_size = hidden_size
self.batch = batch
self.batch_size = self.batch//2
self.interval = interval
self.learning_rate = learning_rate
self.epochs = epochs
self.dataset_filepath = dataset_filepath
self.file_dir = file_dir
self.g_w0_specs = {'init': 'xavier',}
self.g_b0_specs = {'init': 'constant', 'value': 0,}
self.g_w1_specs = {'init': 'xavier',}
self.g_b1_specs = {'init': 'constant', 'value': 0,}
self.gen_net = ffnet.FeedForwardNet(loss.SigmoidCrossEntropy(),)
self.gen_net_fc_0 = layer.Dense(name='g_fc_0', num_output=self.hidden_size, use_bias=True,
W_specs=self.g_w0_specs, b_specs=self.g_b0_specs, input_sample_shape=(self.noise_size,))
self.gen_net_relu_0 = layer.Activation(name='g_relu_0', mode='relu',input_sample_shape=(self.hidden_size,))
self.gen_net_fc_1 = layer.Dense(name='g_fc_1', num_output=self.feature_size, use_bias=True,
W_specs=self.g_w1_specs, b_specs=self.g_b1_specs, input_sample_shape=(self.hidden_size,))
self.gen_net_sigmoid_1 = layer.Activation(name='g_relu_1', mode='sigmoid', input_sample_shape=(self.feature_size,))
self.gen_net.add(self.gen_net_fc_0)
self.gen_net.add(self.gen_net_relu_0)
self.gen_net.add(self.gen_net_fc_1)
self.gen_net.add(self.gen_net_sigmoid_1)
for (p, specs) in zip(self.gen_net.param_values(), self.gen_net.param_specs()):
filler = specs.filler
if filler.type == 'gaussian':
p.gaussian(filler.mean, filler.std)
elif filler.type == 'xavier':
initializer.xavier(p)
else:
p.set_value(0)
print(specs.name, filler.type, p.l1())
self.gen_net.to_device(self.dev)
self.d_w0_specs = {'init': 'xavier',}
self.d_b0_specs = {'init': 'constant', 'value': 0,}
self.d_w1_specs = {'init': 'xavier',}
self.d_b1_specs = {'init': 'constant', 'value': 0,}
self.dis_net = ffnet.FeedForwardNet(loss.SigmoidCrossEntropy(),)
self.dis_net_fc_0 = layer.Dense(name='d_fc_0', num_output=self.hidden_size, use_bias=True,
W_specs=self.d_w0_specs, b_specs=self.d_b0_specs, input_sample_shape=(self.feature_size,))
self.dis_net_relu_0 = layer.Activation(name='d_relu_0', mode='relu',input_sample_shape=(self.hidden_size,))
self.dis_net_fc_1 = layer.Dense(name='d_fc_1', num_output=1, use_bias=True,
W_specs=self.d_w1_specs, b_specs=self.d_b1_specs, input_sample_shape=(self.hidden_size,))
self.dis_net.add(self.dis_net_fc_0)
self.dis_net.add(self.dis_net_relu_0)
self.dis_net.add(self.dis_net_fc_1)
for (p, specs) in zip(self.dis_net.param_values(), self.dis_net.param_specs()):
filler = specs.filler
if filler.type == 'gaussian':
p.gaussian(filler.mean, filler.std)
elif filler.type == 'xavier':
initializer.xavier(p)
else:
p.set_value(0)
print(specs.name, filler.type, p.l1())
self.dis_net.to_device(self.dev)
self.combined_net = ffnet.FeedForwardNet(loss.SigmoidCrossEntropy(), )
for l in self.gen_net.layers:
self.combined_net.add(l)
for l in self.dis_net.layers:
self.combined_net.add(l)
self.combined_net.to_device(self.dev)
def train(self):
train_data, _, _, _, _, _ = load_data(self.dataset_filepath)
opt_0 = optimizer.Adam(lr=self.learning_rate) # optimizer for discriminator
opt_1 = optimizer.Adam(lr=self.learning_rate) # optimizer for generator, aka the combined model
for (p, specs) in zip(self.dis_net.param_names(), self.dis_net.param_specs()):
opt_0.register(p, specs)
for (p, specs) in zip(self.gen_net.param_names(), self.gen_net.param_specs()):
opt_1.register(p, specs)
for epoch in range(self.epochs):
idx = np.random.randint(0, train_data.shape[0], self.batch_size)
real_imgs = train_data[idx]
real_imgs = tensor.from_numpy(real_imgs)
real_imgs.to_device(self.dev)
noise = tensor.Tensor((self.batch_size, self.noise_size))
noise.uniform(-1, 1)
noise.to_device(self.dev)
fake_imgs = self.gen_net.forward(flag=False, x=noise)
real_labels = tensor.Tensor((self.batch_size, 1))
fake_labels = tensor.Tensor((self.batch_size, 1))
real_labels.set_value(1.0)
fake_labels.set_value(0.0)
real_labels.to_device(self.dev)
fake_labels.to_device(self.dev)
grads, (d_loss_real, _) = self.dis_net.train(real_imgs, real_labels)
for (s, p ,g) in zip(self.dis_net.param_names(), self.dis_net.param_values(), grads):
opt_0.apply_with_lr(epoch, self.learning_rate, g, p, str(s), epoch)
grads, (d_loss_fake, _) = self.dis_net.train(fake_imgs, fake_labels)
for (s, p ,g) in zip(self.dis_net.param_names(), self.dis_net.param_values(), grads):
opt_0.apply_with_lr(epoch, self.learning_rate, g, p, str(s), epoch)
d_loss = d_loss_real + d_loss_fake
noise = tensor.Tensor((self.batch_size, self.noise_size))
noise.uniform(-1,1)
noise.to_device(self.dev)
real_labels = tensor.Tensor((self.batch_size, 1))
real_labels.set_value(1.0)
real_labels.to_device(self.dev)
grads, (g_loss, _) = self.combined_net.train(noise, real_labels)
for (s, p ,g) in zip(self.gen_net.param_names(), self.gen_net.param_values(), grads):
opt_1.apply_with_lr(epoch, self.learning_rate, g, p, str(s), epoch)
if epoch % self.interval == 0:
self.save_image(epoch)
print_log('The {} epoch, G_LOSS: {}, D_LOSS: {}'.format(epoch, g_loss, d_loss))
def save_image(self, epoch):
rows = 5
cols = 5
channels = self.channels
noise = tensor.Tensor((rows*cols*channels, self.noise_size))
noise.uniform(-1, 1)
noise.to_device(self.dev)
gen_imgs = self.gen_net.forward(flag=False, x=noise)
gen_imgs = tensor.to_numpy(gen_imgs)
show_imgs = np.reshape(gen_imgs, (gen_imgs.shape[0], self.rows, self.cols, self.channels))
fig, axs = plt.subplots(rows, cols)
cnt = 0
for r in range(rows):
for c in range(cols):
axs[r,c].imshow(show_imgs[cnt, :, :, 0], cmap='gray')
axs[r,c].axis('off')
cnt += 1
fig.savefig("{}{}.png".format(self.file_dir, epoch))
plt.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Train GAN over MNIST')
parser.add_argument('filepath', type=str, help='the dataset path')
parser.add_argument('--use_gpu', action='store_true')
args = parser.parse_args()
if args.use_gpu:
print('Using GPU')
dev = device.create_cuda_gpu()
layer.engine = 'cudnn'
else:
print('Using CPU')
dev = device.get_default_device()
layer.engine = 'singacpp'
if not os.path.exists('vanilla_images/'):
os.makedirs('vanilla_images/')
rows = 28
cols = 28
channels = 1
noise_size = 100
hidden_size = 128
batch = 128
interval = 1000
learning_rate = 0.001
epochs = 1000000
dataset_filepath = 'mnist.pkl.gz'
file_dir = 'vanilla_images/'
vanilla = VANILLA(dev, rows, cols, channels, noise_size, hidden_size, batch,
interval, learning_rate, epochs, dataset_filepath, file_dir)
vanilla.train() | apache-2.0 |
sppalkia/weld | python/grizzly/grizzly/grizzly_impl.py | 2 | 48957 | """Contains implementations for each ported operation in Pandas.
Attributes:
decoder_ (NumPyEncoder): Description
encoder_ (NumPyDecoder): Description
"""
from encoders import *
from weld.weldobject import *
encoder_ = NumPyEncoder()
decoder_ = NumPyDecoder()
def get_field(expr, field):
""" Fetch a field from a struct expr
"""
weld_obj = WeldObject(encoder_, decoder_)
struct_var = weld_obj.update(expr)
if isinstance(expr, WeldObject):
struct_var = expr.obj_id
weld_obj.dependencies[struct_var] = expr
weld_template = """
%(struct)s.$%(field)s
"""
weld_obj.weld_code = weld_template % {"struct":struct_var,
"field":field}
return weld_obj
def unique(array, ty):
"""
Returns a new array-of-arrays with all duplicate arrays removed.
Args:
array (WeldObject / Numpy.ndarray): Input array
ty (WeldType): Type of each element in the input array
Returns:
A WeldObject representing this computation
"""
weld_obj = WeldObject(encoder_, decoder_)
array_var = weld_obj.update(array)
if isinstance(array, WeldObject):
array_var = array.obj_id
weld_obj.dependencies[array_var] = array
weld_template = """
map(
tovec(
result(
for(
map(
%(array)s,
|p: %(ty)s| {p,0}
),
dictmerger[%(ty)s,i32,+],
|b, i, e| merge(b,e)
)
)
),
|p: {%(ty)s, i32}| p.$0
)
"""
weld_obj.weld_code = weld_template % {"array": array_var, "ty": ty}
return weld_obj
def aggr(array, op, initial_value, ty):
"""
Returns sum of elements in the array.
Args:
array (WeldObject / Numpy.ndarray): Input array
op (str): Op string used to aggregate the array (+ / *)
initial_value (int): Initial value for aggregation
ty (WeldType): Type of each element in the input array
Returns:
A WeldObject representing this computation
"""
weld_obj = WeldObject(encoder_, decoder_)
array_var = weld_obj.update(array)
if isinstance(array, WeldObject):
array_var = array.obj_id
weld_obj.dependencies[array_var] = array
weld_template = """
result(
for(
%(array)s,
merger[%(ty)s,%(op)s],
|b, i, e| merge(b, e)
)
)
"""
weld_obj.weld_code = weld_template % {
"array": array_var, "ty": ty, "op": op}
return weld_obj
def mask(array, predicates, new_value, ty):
"""
Returns a new array, with each element in the original array satisfying the
passed-in predicate set to `new_value`
Args:
array (WeldObject / Numpy.ndarray): Input array
predicates (WeldObject / Numpy.ndarray<bool>): Predicate set
new_value (WeldObject / Numpy.ndarray / str): mask value
ty (WeldType): Type of each element in the input array
Returns:
A WeldObject representing this computation
"""
weld_obj = WeldObject(encoder_, decoder_)
array_var = weld_obj.update(array)
if isinstance(array, WeldObject):
array_var = array.obj_id
weld_obj.dependencies[array_var] = array
predicates_var = weld_obj.update(predicates)
if isinstance(predicates, WeldObject):
predicates_var = predicates.obj_id
weld_obj.dependencies[predicates_var] = predicates
if str(ty).startswith("vec"):
new_value_var = weld_obj.update(new_value)
if isinstance(new_value, WeldObject):
new_value_var = new_value.obj_id
weld_obj.dependencies[new_value_var] = new_value
else:
new_value_var = "%s(%s)" % (ty, str(new_value))
weld_template = """
map(
zip(%(array)s, %(predicates)s),
|p: {%(ty)s, bool}| if (p.$1, %(new_value)s, p.$0)
)
"""
weld_obj.weld_code = weld_template % {
"array": array_var,
"predicates": predicates_var,
"new_value": new_value_var,
"ty": ty}
return weld_obj
def filter(array, predicates, ty=None):
"""
Returns a new array, with each element in the original array satisfying the
passed-in predicate set to `new_value`
Args:
array (WeldObject / Numpy.ndarray): Input array
predicates (WeldObject / Numpy.ndarray<bool>): Predicate set
ty (WeldType): Type of each element in the input array
Returns:
A WeldObject representing this computation
"""
weld_obj = WeldObject(encoder_, decoder_)
array_var = weld_obj.update(array)
if isinstance(array, WeldObject):
array_var = array.obj_id
weld_obj.dependencies[array_var] = array
predicates_var = weld_obj.update(predicates)
if isinstance(predicates, WeldObject):
predicates_var = predicates.obj_id
weld_obj.dependencies[predicates_var] = predicates
weld_template = """
result(
for(
zip(%(array)s, %(predicates)s),
appender,
|b, i, e| if (e.$1, merge(b, e.$0), b)
)
)
"""
weld_obj.weld_code = weld_template % {
"array": array_var,
"predicates": predicates_var}
return weld_obj
def pivot_filter(pivot_array, predicates, ty=None):
"""
Returns a new array, with each element in the original array satisfying the
passed-in predicate set to `new_value`
Args:
array (WeldObject / Numpy.ndarray): Input array
predicates (WeldObject / Numpy.ndarray<bool>): Predicate set
ty (WeldType): Type of each element in the input array
Returns:
A WeldObject representing this computation
"""
weld_obj = WeldObject(encoder_, decoder_)
pivot_array_var = weld_obj.update(pivot_array)
if isinstance(pivot_array, WeldObject):
pivot_array_var = pivot_array.obj_id
weld_obj.dependencies[pivot_array_var] = pivot_array
predicates_var = weld_obj.update(predicates)
if isinstance(predicates, WeldObject):
predicates_var = predicates.obj_id
weld_obj.dependencies[predicates_var] = predicates
weld_template = """
let index_filtered =
result(
for(
zip(%(array)s.$0, %(predicates)s),
appender,
|b, i, e| if (e.$1, merge(b, e.$0), b)
)
);
let pivot_filtered =
map(
%(array)s.$1,
|x|
result(
for(
zip(x, %(predicates)s),
appender,
|b, i, e| if (e.$1, merge(b, e.$0), b)
)
)
);
{index_filtered, pivot_filtered, %(array)s.$2}
"""
weld_obj.weld_code = weld_template % {
"array": pivot_array_var,
"predicates": predicates_var}
return weld_obj
def isin(array, predicates, ty):
""" Checks if elements in array is also in predicates
# TODO: better parameter naming
Args:
array (WeldObject / Numpy.ndarray): Input array
predicates (WeldObject / Numpy.ndarray<bool>): Predicate set
ty (WeldType): Type of each element in the input array
Returns:
A WeldObject representing this computation
"""
weld_obj = WeldObject(encoder_, decoder_)
array_var = weld_obj.update(array)
if isinstance(array, WeldObject):
array_var = array.obj_id
weld_obj.dependencies[array_var] = array
predicates_var = weld_obj.update(predicates)
if isinstance(predicates, WeldObject):
predicates_var = predicates.obj_id
weld_obj.dependencies[predicates_var] = predicates
weld_template = """
let check_dict =
result(
for(
map(
%(predicate)s,
|p: %(ty)s| {p,0}
),
dictmerger[%(ty)s,i32,+],
|b, i, e| merge(b,e)
)
);
map(
%(array)s,
|x: %(ty)s| keyexists(check_dict, x)
)
"""
weld_obj.weld_code = weld_template % {
"array": array_var,
"predicate": predicates_var,
"ty": ty}
return weld_obj
def element_wise_op(array, other, op, ty):
"""
Operation of series and other, element-wise (binary operator add)
Args:
array (WeldObject / Numpy.ndarray): Input array
other (WeldObject / Numpy.ndarray): Second Input array
op (str): Op string used to compute element-wise operation (+ / *)
ty (WeldType): Type of each element in the input array
Returns:
A WeldObject representing this computation
"""
weld_obj = WeldObject(encoder_, decoder_)
array_var = weld_obj.update(array)
if isinstance(array, WeldObject):
array_var = array.obj_id
weld_obj.dependencies[array_var] = array
other_var = weld_obj.update(other)
if isinstance(other, WeldObject):
other_var = other.obj_id
weld_obj.dependencies[other_var] = other
weld_template = """
map(
zip(%(array)s, %(other)s),
|a| a.$0 %(op)s a.$1
)
"""
weld_obj.weld_code = weld_template % {"array": array_var,
"other": other_var,
"ty": ty, "op": op}
return weld_obj
def unzip_columns(expr, column_types):
"""
Zip together multiple columns.
Args:
columns (WeldObject / Numpy.ndarray): lust of columns
Returns:
A WeldObject representing this computation
"""
weld_obj = WeldObject(encoder_, decoder_)
column_appenders = []
struct_fields = []
result_fields = []
for i, column_type in enumerate(column_types):
column_appenders.append("appender[%s]" % column_type)
struct_fields.append("merge(b.$%s, e.$%s)" % (i, i))
result_fields.append("result(unzip_builder.$%s)" % i)
appender_string = "{%s}" % ", ".join(column_appenders)
struct_string = "{%s}" % ", ".join(struct_fields)
result_string = "{%s}" % ", ".join(result_fields)
expr_var = weld_obj.update(expr)
if isinstance(expr, WeldObject):
expr_var = expr.obj_id
weld_obj.dependencies[expr_var] = expr
weld_template = """
let unzip_builder = for(
%(expr)s,
%(appenders)s,
|b,i,e| %(struct_builder)s
);
%(result)s
"""
weld_obj.weld_code = weld_template % {"expr": expr_var,
"appenders": appender_string,
"struct_builder": struct_string,
"result": result_string}
return weld_obj
def sort(expr, field = None, keytype=None, ascending=True):
"""
Sorts the vector.
If the field parameter is provided then the sort
operators on a vector of structs where the sort key
is the field of the struct.
Args:
expr (WeldObject)
field (Int)
"""
weld_obj = WeldObject(encoder_, decoder_)
expr_var = weld_obj.update(expr)
if isinstance(expr, WeldObject):
expr_var = expr.obj_id
weld_obj.dependencies[expr_var] = expr
if field is not None:
key_str = "x.$%s" % field
else:
key_str = "x"
if not ascending:
# The type is not necessarily f64.
key_str = key_str + "* %s(-1)" % keytype
weld_template = """
sort(%(expr)s, |x| %(key)s)
"""
weld_obj.weld_code = weld_template % {"expr":expr_var, "key":key_str}
return weld_obj
def slice_vec(expr, start, stop):
"""
Slices the vector.
Args:
expr (WeldObject)
start (Long)
stop (Long)
"""
weld_obj = WeldObject(encoder_, decoder_)
expr_var = weld_obj.update(expr)
if isinstance(expr, WeldObject):
expr_var = expr.obj_id
weld_obj.dependencies[expr_var] = expr
weld_template = """
slice(%(expr)s, %(start)sL, %(stop)sL)
"""
weld_obj.weld_code = weld_template % {"expr":expr_var,
"start":start,
"stop":stop}
return weld_obj
def zip_columns(columns):
"""
Zip together multiple columns.
Args:
columns (WeldObject / Numpy.ndarray): lust of columns
Returns:
A WeldObject representing this computation
"""
weld_obj = WeldObject(encoder_, decoder_)
column_vars = []
for column in columns:
col_var = weld_obj.update(column)
if isinstance(column, WeldObject):
col_var = column.obj_id
weld_obj.dependencies[col_var] = column
column_vars.append(col_var)
arrays = ", ".join(column_vars)
weld_template = """
result(
for(
zip(%(array)s),
appender,
|b, i, e| merge(b, e)
)
)
"""
weld_obj.weld_code = weld_template % {
"array": arrays,
}
return weld_obj
def compare(array, other, op, ty_str):
"""
Performs passed-in comparison op between every element in the passed-in
array and other, and returns an array of booleans.
Args:
array (WeldObject / Numpy.ndarray): Input array
other (WeldObject / Numpy.ndarray): Second input array
op (str): Op string used for element-wise comparison (== >= <= !=)
ty (WeldType): Type of each element in the input array
Returns:
A WeldObject representing this computation
"""
weld_obj = WeldObject(encoder_, decoder_)
array_var = weld_obj.update(array)
if isinstance(array, WeldObject):
array_var = array.obj_id
weld_obj.dependencies[array_var] = array
# Strings need to be encoded into vec[char] array.
# Constants can be added directly to NVL snippet.
if isinstance(other, str) or isinstance(other, WeldObject):
other_var = weld_obj.update(other)
if isinstance(other, WeldObject):
other_var = tmp.obj_id
weld_obj.dependencies[other_var] = other
else:
other_var = "%s(%s)" % (ty_str, str(other))
weld_template = """
map(
%(array)s,
|a: %(ty)s| a %(op)s %(other)s
)
"""
weld_obj.weld_code = weld_template % {"array": array_var,
"other": other_var,
"op": op, "ty": ty_str}
return weld_obj
def slice(array, start, size, ty):
"""
Returns a new array-of-arrays with each array truncated, starting at
index `start` for `length` characters.
Args:
array (WeldObject / Numpy.ndarray): Input array
start (int): starting index
size (int): length to truncate at
ty (WeldType): Type of each element in the input array
Returns:
A WeldObject representing this computation
"""
weld_obj = WeldObject(encoder_, decoder_)
array_var = weld_obj.update(array)
if isinstance(array, WeldObject):
array_var = array.obj_id
weld_obj.dependencies[array_var] = array
weld_template = """
map(
%(array)s,
|array: %(ty)s| slice(array, %(start)dL, %(size)dL)
)
"""
weld_obj.weld_code = weld_template % {"array": array_var, "start": start,
"ty": ty, "size": size}
return weld_obj
def to_lower(array, ty):
"""
Returns a new array of strings that are converted to lowercase
Args:
array (WeldObject / Numpy.ndarray): Input array
start (int): starting index
size (int): length to truncate at
ty (WeldType): Type of each element in the input array
Returns:
A WeldObject representing this computation
"""
weld_obj = WeldObject(encoder_, decoder_)
array_var = weld_obj.update(array)
if isinstance(array, WeldObject):
array_var = array.obj_id
weld_obj.dependencies[array_var] = array
weld_template = """
map(
%(array)s,
|array: vec[i8]| map(array, |b:i8| if(b <= i8(90), b + i8(32), b))
)
"""
weld_obj.weld_code = weld_template % {"array": array_var, "ty": ty}
return weld_obj
def contains(array, ty, string):
"""
Checks if given string is contained in each string in the array.
Output is a vec of booleans.
Args:
array (WeldObject / Numpy.ndarray): Input array
start (int): starting index
size (int): length to truncate at
ty (WeldType): Type of each element in the input array
Returns:
A WeldObject representing this computation
"""
weld_obj = WeldObject(encoder_, decoder_)
string_obj = weld_obj.update(string)
if isinstance(string, WeldObject):
string_obj = string.obj_id
weld_obj.dependencies[string_obj] = string
array_var = weld_obj.update(array)
if isinstance(array, WeldObject):
array_var = array.obj_id
weld_obj.dependencies[array_var] = array
(start, end) = 0, len(string)
# Some odd bug where iterating on str and slicing str results
# in a segfault
weld_template = """
map(
%(array)s,
|str: vec[%(ty)s]|
let tstr = str;
result(
for(
str,
merger[i8,+](i8(0)),
|b,i,e|
if(slice(tstr, i, i64(%(end)s)) == %(cmpstr)s,
merge(b, i8(1)),
b
)
)
) > i8(0)
)
"""
weld_obj.weld_code = weld_template % {"array": array_var, "ty": ty,
"start": start, "end": end,
"cmpstr": string_obj}
return weld_obj
def count(array, ty):
"""
Return number of non-NA/null observations in the Series
TODO : Filter out NaN's once Weld supports NaN's
Args:
array (WeldObject / Numpy.ndarray): Input array
ty (WeldType): Type of each element in the input array
Returns:
A WeldObject representing this computation
"""
weld_obj = WeldObject(encoder_, decoder_)
array_var = weld_obj.update(array)
if isinstance(array, WeldObject):
array_var = array.obj_id
weld_obj.dependencies[array_var] = array
weld_template = """
len(%(array)s)
"""
weld_obj.weld_code = weld_template % {"array": array_var}
return weld_obj
def join(expr1, expr2, d1_keys, d2_keys, keys_type, d1_vals, df1_vals_ty, d2_vals, df2_vals_ty):
"""
Computes a join on two tables
"""
weld_obj = WeldObject(encoder_, decoder_)
df1_var = weld_obj.update(expr1)
if isinstance(expr1, WeldObject):
df1_var = expr1.obj_id
weld_obj.dependencies[df1_var] = expr1
df2_var = weld_obj.update(expr2)
if isinstance(expr2, WeldObject):
df2_var = expr2.obj_id
weld_obj.dependencies[df2_var] = expr2
d1_key_fields = ", ".join(["e.$%s" % k for k in d1_keys])
if len(d1_keys) > 1:
d1_key_struct = "{%s}" % (d1_key_fields)
else:
d1_key_struct = d1_key_fields
d1_val_fields = ", ".join(["e.$%s" % k for k in d1_vals])
d2_key_fields = ", ".join(["e.$%s" % k for k in d2_keys])
if len(d2_keys) > 1:
d2_key_struct = "{%s}" % (d2_key_fields)
else:
d2_key_struct = d2_key_fields
d2_val_fields = ", ".join(["e.$%s" % k for k in d2_vals])
d2_val_fields2 = ", ".join(["e2.$%s" % i for i, k in enumerate(d2_vals)])
d2_val_struct = "{%s}" % (d2_val_fields)
weld_template = """
let df2_join_table = result(
for(
%(df2)s,
groupmerger[%(kty)s, %(df2ty)s],
|b, i, e| merge(b, {%(df2key)s, %(df2vals)s})
)
);
result(for(
%(df1)s,
appender,
|b, i, e|
for(
lookup(df2_join_table, %(df1key)s),
b,
|b2, i2, e2| merge(b, {%(df1key)s, %(df1vals)s, %(df2vals2)s})
)
))
"""
weld_obj.weld_code = weld_template % {"df1":df1_var,
"df1key":d1_key_struct,
"df1vals": d1_val_fields,
"df2":df2_var,
"kty":keys_type,
"df2ty":df2_vals_ty,
"df2key":d2_key_struct,
"df2vals":d2_val_struct,
"df2vals2":d2_val_fields2}
return weld_obj
def pivot_table(expr, value_index, value_ty, index_index, index_ty, columns_index, columns_ty, aggfunc):
"""
Constructs a pivot table where the index_index and columns_index are used as keys and the value_index is used as the value which is aggregated.
"""
weld_obj = WeldObject(encoder_, decoder_)
zip_var = weld_obj.update(expr)
if isinstance(expr, WeldObject):
zip_var = expr.obj_id
weld_obj.dependencies[zip_var] = expr
if aggfunc == 'sum':
weld_template = """
let bs = for(
%(zip_expr)s,
{dictmerger[{%(ity)s,%(cty)s},%(vty)s,+], dictmerger[%(ity)s,i64,+], dictmerger[%(cty)s,i64,+]},
|b, i, e| {merge(b.$0, {{e.$%(id)s, e.$%(cd)s}, e.$%(vd)s}),
merge(b.$1, {e.$%(id)s, 1L}),
merge(b.$2, {e.$%(cd)s, 1L})}
);
let agg_dict = result(bs.$0);
let ind_vec = sort(map(tovec(result(bs.$1)), |x| x.$0), |x, y| compare(x,y));
let col_vec = map(tovec(result(bs.$2)), |x| x.$0);
let pivot = map(
col_vec,
|x:%(cty)s|
map(ind_vec, |y:%(ity)s| f64(lookup(agg_dict, {y, x})))
);
{ind_vec, pivot, col_vec}
"""
elif aggfunc == 'mean':
weld_template = """
let bs = for(
%(zip_expr)s,
{dictmerger[{%(ity)s,%(cty)s},{%(vty)s, i64},+], dictmerger[%(ity)s,i64,+], dictmerger[%(cty)s,i64,+]},
|b, i, e| {merge(b.$0, {{e.$%(id)s, e.$%(cd)s}, {e.$%(vd)s, 1L}}),
merge(b.$1, {e.$%(id)s, 1L}),
merge(b.$2, {e.$%(cd)s, 1L})}
);
let agg_dict = result(bs.$0);
let ind_vec = sort(map(tovec(result(bs.$1)), |x| x.$0), |x, y| compare(x,y));
let col_vec = map(tovec(result(bs.$2)), |x| x.$0);
let pivot = map(
col_vec,
|x:%(cty)s|
map(ind_vec, |y:%(ity)s| (
let sum_len_pair = lookup(agg_dict, {y, x});
f64(sum_len_pair.$0) / f64(sum_len_pair.$1))
)
);
{ind_vec, pivot, col_vec}
"""
else:
raise Exception("Aggregate operation %s not supported." % aggfunc)
weld_obj.weld_code = weld_template % {"zip_expr": zip_var,
"ity": index_ty,
"cty": columns_ty,
"vty": value_ty,
"id": index_index,
"cd": columns_index,
"vd": value_index}
return weld_obj
def get_pivot_column(pivot, column_name, column_type):
weld_obj = WeldObject(encoder_, decoder_)
pivot_var = weld_obj.update(pivot)
if isinstance(pivot, WeldObject):
pivot_var = pivot.obj_id
weld_obj.dependencies[pivot_var] = pivot
column_name_var = weld_obj.update(column_name)
if isinstance(column_name, WeldObject):
column_name_var = column_name.obj_id
weld_obj.dependencies[column_name_var] = column_name
weld_template = """
let col_dict = result(for(
%(piv)s.$2,
dictmerger[%(cty)s,i64,+],
|b, i, e| merge(b, {e, i})
));
{%(piv)s.$0, lookup(%(piv)s.$1, lookup(col_dict, %(colnm)s))}
"""
weld_obj.weld_code = weld_template % {"piv":pivot_var,
"cty":column_type,
"colnm":column_name_var}
return weld_obj
def pivot_sort(pivot, column_name, index_type, column_type, pivot_type):
weld_obj = WeldObject(encoder_, decoder_)
pivot_var = weld_obj.update(pivot)
if isinstance(pivot, WeldObject):
pivot_var = pivot.obj_id
weld_obj.dependencies[pivot_var] = pivot
column_name_var = weld_obj.update(column_name)
if isinstance(column_name, WeldObject):
column_name_var = column_name.obj_id
weld_obj.dependencies[column_name_var] = column_name
# Note the key_column is hardcoded for the movielens workload
# diff is located at the 2nd index.
weld_template = """
let key_col = lookup(%(piv)s.$1, 2L);
let sorted_indices = map(
sort(
result(
for(
key_col,
appender[{%(pvty)s,i64}],
|b,i,e| merge(b, {e,i})
)
),
|x,y| compare(x.$0, y.$0)
),
|x| x.$1
);
let new_piv = map(
%(piv)s.$1,
|x| result(
for(
x,
appender[%(pvty)s],
|b,i,e| merge(b, lookup(x, lookup(sorted_indices, i)))
)
)
);
let new_index = result(
for(
%(piv)s.$0,
appender[%(idty)s],
|b,i,e| merge(b, lookup(%(piv)s.$0, lookup(sorted_indices, i)))
)
);
{new_index, new_piv, %(piv)s.$2}
"""
weld_obj.weld_code = weld_template % {"piv": pivot_var,
"cty": column_type,
"idty": index_type,
"colnm": column_name_var,
"pvty": pivot_type}
return weld_obj
def set_pivot_column(pivot, column_name, item, pivot_type, column_type):
weld_obj = WeldObject(encoder_, decoder_)
pivot_var = weld_obj.update(pivot)
if isinstance(pivot, WeldObject):
pivot_var = pivot.obj_id
weld_obj.dependencies[pivot_var] = pivot
column_name_var = weld_obj.update(column_name)
if isinstance(column_name, WeldObject):
column_name_var = column_name.obj_id
weld_obj.dependencies[column_name_var] = column_name
item_var = weld_obj.update(item)
if isinstance(item, WeldObject):
item_var = item.obj_id
weld_obj.dependencies[item_var] = item
weld_template = """
let pv_cols = for(
%(piv)s.$1,
appender[%(pvty)s],
|b, i, e| merge(b, e)
);
let col_names = for(
%(piv)s.$2,
appender[%(cty)s],
|b, i, e| merge(b, e)
);
let new_pivot_cols = result(merge(pv_cols, %(item)s));
let new_col_names = result(merge(col_names, %(colnm)s));
{%(piv)s.$0, new_pivot_cols, new_col_names}
"""
weld_obj.weld_code = weld_template % {"piv":pivot_var,
"cty":column_type,
"pvty":pivot_type,
"item":item_var,
"colnm":column_name_var}
return weld_obj
def pivot_sum(pivot, value_type):
weld_obj = WeldObject(encoder_, decoder_)
pivot_var = weld_obj.update(pivot)
if isinstance(pivot, WeldObject):
pivot_var = pivot.obj_id
weld_obj.dependencies[pivot_var] = pivot
weld_template = """
result(for(
%(piv)s.$0,
appender[%(pty)s],
|b1,i1,e1|
merge(b1,
result(for(
%(piv)s.$1,
merger[%(pty)s,+],
|b2, i2, e2| merge(b2, lookup(e2, i1))
))
)
))
"""
weld_obj.weld_code = weld_template % {"piv":pivot_var,
"pty":value_type}
return weld_obj
def pivot_div(pivot, series, vec_type, val_type):
weld_obj = WeldObject(encoder_, decoder_)
pivot_var = weld_obj.update(pivot)
if isinstance(pivot, WeldObject):
pivot_var = pivot.obj_id
weld_obj.dependencies[pivot_var] = pivot
series_var = weld_obj.update(series)
if isinstance(series, WeldObject):
series_var = series.obj_id
weld_obj.dependencies[series_var] = series
weld_template = """
{%(piv)s.$0,
map(
%(piv)s.$1,
|v:%(vty)s|
result(for(
v,
appender[f64],
|b, i, e| merge(b, f64(e) / f64(lookup(%(srs)s, i)))
))
),
%(piv)s.$2
}
"""
weld_obj.weld_code = weld_template % {"piv":pivot_var,
"pty":val_type,
"vty":vec_type,
"srs":series_var}
return weld_obj
def groupby_sum(columns, column_tys, grouping_columns, grouping_column_tys):
"""
Groups the given columns by the corresponding grouping column
value, and aggregate by summing values.
Args:
columns (List<WeldObject>): List of columns as WeldObjects
column_tys (List<str>): List of each column data ty
grouping_column (WeldObject): Column to group rest of columns by
Returns:
A WeldObject representing this computation
"""
weld_obj = WeldObject(encoder_, decoder_)
if len(grouping_columns) == 1 and len(grouping_column_tys) == 1:
grouping_column_var = weld_obj.update(grouping_columns[0])
if isinstance(grouping_columns[0], WeldObject):
grouping_column_var = grouping_columns[0].weld_code
grouping_column_ty_str = "%s" % grouping_column_tys[0]
else:
grouping_column_vars = []
for column in grouping_columns:
column_var = weld_obj.update(column)
if isinstance(column_var, WeldObject):
column_var = column_var.weld_code
grouping_column_vars.append(column_var)
grouping_column_var = ", ".join(grouping_column_vars)
grouping_column_tys = [str(ty) for ty in grouping_column_tys]
grouping_column_ty_str = ", ".join(grouping_column_tys)
grouping_column_ty_str = "{%s}" % grouping_column_ty_str
columns_var_list = []
for column in columns:
column_var = weld_obj.update(column)
if isinstance(column, WeldObject):
column_var = column.obj_id
weld_obj.dependencies[column_var] = column
columns_var_list.append(column_var)
if len(columns_var_list) == 1 and len(grouping_columns) == 1:
columns_var = columns_var_list[0]
tys_str = column_tys[0]
result_str = "merge(b, e)"
elif len(columns_var_list) == 1 and len(grouping_columns) > 1:
columns_var = columns_var_list[0]
tys_str = column_tys[0]
key_str_list = []
for i in xrange(0, len(grouping_columns)):
key_str_list.append("e.$%d" % i)
key_str = "{%s}" % ", ".join(key_str_list)
value_str = "e.$" + str(len(grouping_columns))
result_str_list = [key_str, value_str]
result_str = "merge(b, {%s})" % ", ".join(result_str_list)
elif len(columns_var_list) > 1 and len(grouping_columns) == 1:
columns_var = "%s" % ", ".join(columns_var_list)
column_tys = [str(ty) for ty in column_tys]
tys_str = "{%s}" % ", ".join(column_tys)
key_str = "e.$0"
value_str_list = []
for i in xrange(1, len(columns) + 1):
value_str_list.append("e.$%d" % i)
value_str = "{%s}" % ", ".join(value_str_list)
result_str_list = [key_str, value_str]
result_str = "merge(b, {%s})" % ", ".join(result_str_list)
else:
columns_var = "%s" % ", ".join(columns_var_list)
column_tys = [str(ty) for ty in column_tys]
tys_str = "{%s}" % ", ".join(column_tys)
key_str_list = []
key_size = len(grouping_columns)
value_size = len(columns)
for i in xrange(0, key_size):
key_str_list.append("e.$%d" % i)
key_str = "{%s}" % ", ".join(key_str_list)
value_str_list = []
for i in xrange(key_size, key_size + value_size):
value_str_list.append("e.$%d" % i)
value_str = "{%s}" % ", ".join(value_str_list)
result_str_list = [key_str, value_str]
result_str = "merge(b, {%s})" % ", ".join(result_str_list)
weld_template = """
tovec(
result(
for(
zip(%(grouping_column)s, %(columns)s),
dictmerger[%(gty)s, %(ty)s, +],
|b, i, e| %(result)s
)
)
)
"""
weld_obj.weld_code = weld_template % {"grouping_column": grouping_column_var,
"columns": columns_var,
"result": result_str,
"ty": tys_str,
"gty": grouping_column_ty_str}
return weld_obj
def groupby_std(columns, column_tys, grouping_columns, grouping_column_tys):
"""
Groups the given columns by the corresponding grouping column
value, and aggregate by summing values.
Args:
columns (List<WeldObject>): List of columns as WeldObjects
column_tys (List<str>): List of each column data ty
grouping_column (WeldObject): Column to group rest of columns by
Returns:
A WeldObject representing this computation
"""
weld_obj = WeldObject(encoder_, decoder_)
if len(grouping_columns) == 1 and len(grouping_column_tys) == 1:
grouping_column_var = weld_obj.update(grouping_columns[0])
if isinstance(grouping_columns[0], WeldObject):
grouping_column_var = grouping_columns[0].weld_code
grouping_column_ty_str = "%s" % grouping_column_tys[0]
else:
grouping_column_vars = []
for column in grouping_columns:
column_var = weld_obj.update(column)
if isinstance(column_var, WeldObject):
column_var = column_var.weld_code
grouping_column_vars.append(column_var)
grouping_column_var = ", ".join(grouping_column_vars)
grouping_column_tys = [str(ty) for ty in grouping_column_tys]
grouping_column_ty_str = ", ".join(grouping_column_tys)
grouping_column_ty_str = "{%s}" % grouping_column_ty_str
columns_var_list = []
for column in columns:
column_var = weld_obj.update(column)
if isinstance(column, WeldObject):
column_var = column.obj_id
weld_obj.dependencies[column_var] = column
columns_var_list.append(column_var)
if len(columns_var_list) == 1 and len(grouping_columns) == 1:
columns_var = columns_var_list[0]
tys_str = column_tys[0]
result_str = "merge(b, e)"
# TODO : The above change needs to apply for all result strings
# These currently break at the moment
elif len(columns_var_list) == 1 and len(grouping_columns) > 1:
columns_var = columns_var_list[0]
tys_str = column_tys[0]
key_str_list = []
for i in xrange(0, len(grouping_columns)):
key_str_list.append("e.$%d" % i)
key_str = "{%s}" % ", ".join(key_str_list)
value_str = "e.$" + str(len(grouping_columns))
result_str_list = [key_str, value_str]
result_str = "merge(b, {%s, 1L})" % ", ".join(result_str_list)
elif len(columns_var_list) > 1 and len(grouping_columns) == 1:
columns_var = "%s" % ", ".join(columns_var_list)
column_tys = [str(ty) for ty in column_tys]
tys_str = "{%s}" % ", ".join(column_tys)
key_str = "e.$0"
value_str_list = []
for i in xrange(1, len(columns) + 1):
value_str_list.append("e.$%d" % i)
value_str = "{%s}" % ", ".join(value_str_list)
result_str_list = [key_str, value_str]
result_str = "merge(b, %s)" % ", ".join(result_str_list)
else:
columns_var = "%s" % ", ".join(columns_var_list)
column_tys = [str(ty) for ty in column_tys]
tys_str = "{%s}" % ", ".join(column_tys)
key_str_list = []
key_size = len(grouping_columns)
value_size = len(columns)
for i in xrange(0, key_size):
key_str_list.append("e.$%d" % i)
key_str = "{%s}" % ", ".join(key_str_list)
value_str_list = []
for i in xrange(key_size, key_size + value_size):
value_str_list.append("e.$%d" % i)
value_str = "{%s}" % ", ".join(value_str_list)
result_str_list = [key_str, value_str]
result_str = "merge(b, %s)" % ", ".join(result_str_list)
# TODO Need to implement exponent operator
# The mean dict's e.$1.$0 assumes this is a scalar vector and not a struct vector
# Also pandas normalizes by n - 1
weld_template = """
let sum_dict = result(
for(
zip(%(grouping_column)s, %(columns)s),
groupmerger[%(gty)s, %(ty)s],
|b, i, e| %(result)s
)
);
map(
sort(tovec(sum_dict), |x| x.$0),
|e| {e.$0,
(let sum = result(for(e.$1, merger[%(ty)s,+], |b,i,a| merge(b, a)));
let m = f64(sum) / f64(len(e.$1));
let msqr = result(for(e.$1, merger[f64,+], |b,i,a| merge(b, (f64(a)-m)*(f64(a)-m))));
sqrt(msqr / f64(len(e.$1) - 1L))
)}
)
"""
weld_obj.weld_code = weld_template % {"grouping_column": grouping_column_var,
"columns": columns_var,
"result": result_str,
"ty": tys_str,
"gty": grouping_column_ty_str}
return weld_obj
def groupby_size(columns, column_tys, grouping_columns, grouping_column_tys):
"""
Groups the given columns by the corresponding grouping column
value, and aggregate by summing values.
Args:
columns (List<WeldObject>): List of columns as WeldObjects
column_tys (List<str>): List of each column data ty
grouping_column (WeldObject): Column to group rest of columns by
Returns:
A WeldObject representing this computation
"""
weld_obj = WeldObject(encoder_, decoder_)
if len(grouping_columns) == 1 and len(grouping_column_tys) == 1:
grouping_column_var = weld_obj.update(grouping_columns[0])
if isinstance(grouping_columns[0], WeldObject):
grouping_column_var = grouping_columns[0].weld_code
grouping_column_ty_str = "%s" % grouping_column_tys[0]
else:
grouping_column_vars = []
for column in grouping_columns:
column_var = weld_obj.update(column)
if isinstance(column_var, WeldObject):
column_var = column_var.weld_code
grouping_column_vars.append(column_var)
grouping_column_var = ", ".join(grouping_column_vars)
grouping_column_var = "zip(%s)" % grouping_column_var
grouping_column_tys = [str(ty) for ty in grouping_column_tys]
grouping_column_ty_str = ", ".join(grouping_column_tys)
grouping_column_ty_str = "{%s}" % grouping_column_ty_str
weld_template = """
let group = sort(
tovec(
result(
for(
%(grouping_column)s,
dictmerger[%(gty)s, i64, +],
|b, i, e| merge(b, {e, 1L})
)
)
),
|x:{%(gty)s, i64}, y:{%(gty)s, i64}| compare(x.$0, y.$0)
);
{
map(
group,
|x:{%(gty)s,i64}| x.$0
),
map(
group,
|x:{%(gty)s,i64}| x.$1
)
}
"""
weld_obj.weld_code = weld_template % {"grouping_column": grouping_column_var,
"gty": grouping_column_ty_str}
return weld_obj
def groupby_sort(columns, column_tys, grouping_columns, grouping_column_tys, key_index, ascending):
"""
Groups the given columns by the corresponding grouping column
value, and aggregate by summing values.
Args:
columns (List<WeldObject>): List of columns as WeldObjects
column_tys (List<str>): List of each column data ty
grouping_column (WeldObject): Column to group rest of columns by
Returns:
A WeldObject representing this computation
"""
weld_obj = WeldObject(encoder_, decoder_)
if len(grouping_columns) == 1 and len(grouping_column_tys) == 1:
grouping_column_var = weld_obj.update(grouping_columns[0])
if isinstance(grouping_columns[0], WeldObject):
grouping_column_var = grouping_columns[0].weld_code
grouping_column_ty_str = "%s" % grouping_column_tys[0]
else:
grouping_column_vars = []
for column in grouping_columns:
column_var = weld_obj.update(column)
if isinstance(column, WeldObject):
column_var = column.obj_id
weld_obj.dependencies[column_var] = column
grouping_column_vars.append(column_var)
grouping_column_var = ", ".join(grouping_column_vars)
grouping_column_tys = [str(ty) for ty in grouping_column_tys]
grouping_column_ty_str = ", ".join(grouping_column_tys)
grouping_column_ty_str = "{%s}" % grouping_column_ty_str
columns_var_list = []
for column in columns:
column_var = weld_obj.update(column)
if isinstance(column, WeldObject):
column_var = column.obj_id
weld_obj.dependencies[column_var] = column
columns_var_list.append(column_var)
if len(columns_var_list) == 1 and len(grouping_columns) == 1:
columns_var = columns_var_list[0]
tys_str = column_tys[0]
result_str = "merge(b, e)"
elif len(columns_var_list) == 1 and len(grouping_columns) > 1:
columns_var = columns_var_list[0]
tys_str = column_tys[0]
key_str_list = []
for i in xrange(0, len(grouping_columns)):
key_str_list.append("e.$%d" % i)
key_str = "{%s}" % ", ".join(key_str_list)
value_str = "e.$" + str(len(grouping_columns))
result_str_list = [key_str, value_str]
result_str = "merge(b, {%s})" % ", ".join(result_str_list)
elif len(columns_var_list) > 1 and len(grouping_columns) == 1:
columns_var = "%s" % ", ".join(columns_var_list)
column_tys = [str(ty) for ty in column_tys]
tys_str = "{%s}" % ", ".join(column_tys)
key_str = "e.$0"
value_str_list = []
for i in xrange(1, len(columns) + 1):
value_str_list.append("e.$%d" % i)
value_str = "{%s}" % ", ".join(value_str_list)
result_str_list = [key_str, value_str]
result_str = "merge(b, {%s})" % ", ".join(result_str_list)
else:
columns_var = "%s" % ", ".join(columns_var_list)
column_tys = [str(ty) for ty in column_tys]
tys_str = "{%s}" % ", ".join(column_tys)
key_str_list = []
key_size = len(grouping_columns)
value_size = len(columns)
for i in xrange(0, key_size):
key_str_list.append("e.$%d" % i)
key_str = "{%s}" % ", ".join(key_str_list)
value_str_list = []
for i in xrange(key_size, key_size + value_size):
value_str_list.append("e.$%d" % i)
value_str = "{%s}" % ", ".join(value_str_list)
result_str_list = [key_str, value_str]
result_str = "merge(b, {%s})" % ", ".join(result_str_list)
if key_index == None:
key_str_x = "x"
key_str_y = "y"
else :
key_str_x = "x.$%d" % key_index
key_str_y = "y.$%d" % key_index
if ascending == False:
key_str = key_str + "* %s(-1)" % column_tys[key_index]
weld_template = """
@(grain_size:1)map(
tovec(
result(
for(
zip(%(grouping_column)s, %(columns)s),
groupmerger[%(gty)s, %(ty)s],
|b, i, e| %(result)s
)
)
),
|a:{%(gty)s, vec[%(ty)s]}| {a.$0, sort(a.$1, |x:%(ty)s, y:%(ty)s| compare(%(key_str_x)s, %(key_str_y)s))}
)
"""
weld_obj.weld_code = weld_template % {"grouping_column": grouping_column_var,
"columns": columns_var,
"result": result_str,
"ty": tys_str,
"gty": grouping_column_ty_str,
"key_str_x": key_str_x,
"key_str_y": key_str_y}
return weld_obj
def flatten_group(expr, column_tys, grouping_column_tys):
"""
Groups the given columns by the corresponding grouping column
value, and aggregate by summing values.
Args:
columns (List<WeldObject>): List of columns as WeldObjects
column_tys (List<str>): List of each column data ty
grouping_column (WeldObject): Column to group rest of columns by
Returns:
A WeldObject representing this computation
"""
weld_obj = WeldObject(encoder_, decoder_)
group_var = weld_obj.update(expr)
num_group_cols = len(grouping_column_tys)
if num_group_cols == 1:
grouping_column_ty_str = "%s" % grouping_column_tys[0]
grouping_column_key_str = "a.$0"
else:
grouping_column_tys = [str(ty) for ty in grouping_column_tys]
grouping_column_ty_str = ", ".join(grouping_column_tys)
grouping_column_ty_str = "{%s}" % grouping_column_ty_str
grouping_column_keys = ["a.$0.$%s" % i for i in range(0, num_group_cols)]
grouping_column_key_str = "{%s}" % ", ".join(grouping_column_keys)
if len(column_tys) == 1:
tys_str = "%s" % column_tys[0]
column_values_str = "b.$1"
else:
column_tys = [str(ty) for ty in column_tys]
tys_str = "{%s}" % ", ".join(column_tys)
column_values = ["b.$%s" % i for i in range(0, len(column_tys))]
column_values_str = "{%s}" % ", ".join(column_values)
# TODO: The output in pandas keps the keys sorted (even if keys are structs)
# We need to allow keyfunctions in sort by clauses to be able to compare structs.
# sort(%(group_vec)s, |x:{%(gty)s, vec[%(ty)s]}| x.$0),
weld_template = """
flatten(
map(
%(group_vec)s,
|a:{%(gty)s, vec[%(ty)s]}| map(a.$1, |b:%(ty)s| {%(gkeys)s, %(gvals)s})
)
)
"""
weld_obj.weld_code = weld_template % {"group_vec": expr.weld_code,
"gkeys": grouping_column_key_str,
"gvals": column_values_str,
"ty": tys_str,
"gty": grouping_column_ty_str}
return weld_obj
def grouped_slice(expr, type, start, size):
"""
Groups the given columns by the corresponding grouping column
value, and aggregate by summing values.
Args:
columns (List<WeldObject>): List of columns as WeldObjects
column_tys (List<str>): List of each column data ty
grouping_column (WeldObject): Column to group rest of columns by
Returns:
A WeldObject representing this computation
"""
weld_obj = WeldObject(encoder_, decoder_)
weld_obj.update(expr)
weld_template = """
map(
%(vec)s,
|b: %(type)s| {b.$0, slice(b.$1, i64(%(start)s), i64(%(size)s))}
)
"""
weld_obj.weld_code = weld_template % {"vec": expr.weld_code,
"type": type,
"start": str(start),
"size": str(size)}
return weld_obj
def get_column(columns, column_tys, index):
"""
Get column corresponding to passed-in index from ptr returned
by groupBySum.
Args:
columns (List<WeldObject>): List of columns as WeldObjects
column_tys (List<str>): List of each column data ty
index (int): index of selected column
Returns:
A WeldObject representing this computation
"""
weld_obj = WeldObject(encoder_, decoder_)
columns_var = weld_obj.update(columns, tys=WeldVec(column_tys), override=False)
if isinstance(columns, WeldObject):
columns_var = columns.obj_id
weld_obj.dependencies[columns_var] = columns
weld_template = """
map(
%(columns)s,
|elem: %(ty)s| elem.$%(index)s
)
"""
weld_obj.weld_code = weld_template % {"columns": columns_var,
"ty": column_tys,
"index": index}
return weld_obj
| bsd-3-clause |
rsivapr/scikit-learn | sklearn/semi_supervised/label_propagation.py | 8 | 14061 | # coding=utf8
"""
Label propagation in the context of this module refers to a set of
semisupervised classification algorithms. In the high level, these algorithms
work by forming a fully-connected graph between all points given and solving
for the steady-state distribution of labels at each point.
These algorithms perform very well in practice. The cost of running can be very
expensive, at approximately O(N^3) where N is the number of (labeled and
unlabeled) points. The theory (why they perform so well) is motivated by
intuitions from random walk algorithms and geometric relationships in the data.
For more information see the references below.
Model Features
--------------
Label clamping:
The algorithm tries to learn distributions of labels over the dataset. In the
"Hard Clamp" mode, the true ground labels are never allowed to change. They
are clamped into position. In the "Soft Clamp" mode, they are allowed some
wiggle room, but some alpha of their original value will always be retained.
Hard clamp is the same as soft clamping with alpha set to 1.
Kernel:
A function which projects a vector into some higher dimensional space. This
implementation supprots RBF and KNN kernels. Using the RBF kernel generates
a dense matrix of size O(N^2). KNN kernel will generate a sparse matrix of
size O(k*N) which will run much faster. See the documentation for SVMs for
more info on kernels.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
Notes
-----
References:
[1] Yoshua Bengio, Olivier Delalleau, Nicolas Le Roux. In Semi-Supervised
Learning (2006), pp. 193-216
[2] Olivier Delalleau, Yoshua Bengio, Nicolas Le Roux. Efficient
Non-Parametric Function Induction in Semi-Supervised Learning. AISTAT 2005
"""
# Authors: Clay Woolam <clay@woolam.org>
# Licence: BSD
from abc import ABCMeta, abstractmethod
from scipy import sparse
import numpy as np
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import rbf_kernel
from ..utils.graph import graph_laplacian
from ..utils.extmath import safe_sparse_dot
from ..externals import six
from ..neighbors.unsupervised import NearestNeighbors
### Helper functions
def _not_converged(y_truth, y_prediction, tol=1e-3):
"""basic convergence check"""
return np.abs(y_truth - y_prediction).sum() > tol
class BaseLabelPropagation(six.with_metaclass(ABCMeta, BaseEstimator,
ClassifierMixin)):
"""Base class for label propagation module.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7,
alpha=1, max_iter=30, tol=1e-3):
self.max_iter = max_iter
self.tol = tol
# kernel parameters
self.kernel = kernel
self.gamma = gamma
self.n_neighbors = n_neighbors
# clamping factor
self.alpha = alpha
def _get_kernel(self, X, y=None):
if self.kernel == "rbf":
if y is None:
return rbf_kernel(X, X, gamma=self.gamma)
else:
return rbf_kernel(X, y, gamma=self.gamma)
elif self.kernel == "knn":
if self.nn_fit is None:
self.nn_fit = NearestNeighbors(self.n_neighbors).fit(X)
if y is None:
return self.nn_fit.kneighbors_graph(self.nn_fit._fit_X,
self.n_neighbors,
mode='connectivity')
else:
return self.nn_fit.kneighbors(y, return_distance=False)
else:
raise ValueError("%s is not a valid kernel. Only rbf and knn"
" are supported at this time" % self.kernel)
@abstractmethod
def _build_graph(self):
raise NotImplementedError("Graph construction must be implemented"
" to fit a label propagation model.")
def predict(self, X):
"""Performs inductive inference across the model.
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
y : array_like, shape = [n_samples]
Predictions for input data
"""
probas = self.predict_proba(X)
return self.classes_[np.argmax(probas, axis=1)].ravel()
def predict_proba(self, X):
"""Predict probability for each possible outcome.
Compute the probability estimates for each single sample in X
and each possible outcome seen during training (categorical
distribution).
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
probabilities : array, shape = [n_samples, n_classes]
Normalized probability distributions across
class labels
"""
if sparse.isspmatrix(X):
X_2d = X
else:
X_2d = np.atleast_2d(X)
weight_matrices = self._get_kernel(self.X_, X_2d)
if self.kernel == 'knn':
probabilities = []
for weight_matrix in weight_matrices:
ine = np.sum(self.label_distributions_[weight_matrix], axis=0)
probabilities.append(ine)
probabilities = np.array(probabilities)
else:
weight_matrices = weight_matrices.T
probabilities = np.dot(weight_matrices, self.label_distributions_)
normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
probabilities /= normalizer
return probabilities
def fit(self, X, y):
"""Fit a semi-supervised label propagation model based
All the input data is provided matrix X (labeled and unlabeled)
and corresponding label matrix y with a dedicated marker value for
unlabeled samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
A {n_samples by n_samples} size matrix will be created from this
y : array_like, shape = [n_samples]
n_labeled_samples (unlabeled points are marked as -1)
All unlabeled samples will be transductively assigned labels
Returns
-------
self : returns an instance of self.
"""
if sparse.isspmatrix(X):
self.X_ = X
else:
self.X_ = np.asarray(X)
# actual graph construction (implementations should override this)
graph_matrix = self._build_graph()
# label construction
# construct a categorical distribution for classification only
classes = np.unique(y)
classes = (classes[classes != -1])
self.classes_ = classes
n_samples, n_classes = len(y), len(classes)
y = np.asarray(y)
unlabeled = y == -1
clamp_weights = np.ones((n_samples, 1))
clamp_weights[unlabeled, 0] = self.alpha
# initialize distributions
self.label_distributions_ = np.zeros((n_samples, n_classes))
for label in classes:
self.label_distributions_[y == label, classes == label] = 1
y_static = np.copy(self.label_distributions_)
if self.alpha > 0.:
y_static *= 1 - self.alpha
y_static[unlabeled] = 0
l_previous = np.zeros((self.X_.shape[0], n_classes))
remaining_iter = self.max_iter
if sparse.isspmatrix(graph_matrix):
graph_matrix = graph_matrix.tocsr()
while (_not_converged(self.label_distributions_, l_previous, self.tol)
and remaining_iter > 1):
l_previous = self.label_distributions_
self.label_distributions_ = safe_sparse_dot(
graph_matrix, self.label_distributions_)
# clamp
self.label_distributions_ = np.multiply(
clamp_weights, self.label_distributions_) + y_static
remaining_iter -= 1
normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
self.label_distributions_ /= normalizer
# set the transduction item
transduction = self.classes_[np.argmax(self.label_distributions_,
axis=1)]
self.transduction_ = transduction.ravel()
return self
class LabelPropagation(BaseLabelPropagation):
"""Label Propagation classifier
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
clamping factor
max_iter : float
change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
References
----------
Xiaojin Zhu and Zoubin Ghahramani. Learning from labeled and unlabeled data
with label propagation. Technical Report CMU-CALD-02-107, Carnegie Mellon
University, 2002 http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf
See Also
--------
LabelSpreading : Alternate label propagation strategy more robust to noise
"""
def _build_graph(self):
"""Matrix representing a fully connected graph between each sample
This basic implementation creates a non-stochastic affinity matrix, so
class distributions will exceed 1 (normalization may be desired).
"""
if self.kernel == 'knn':
self.nn_fit = None
affinity_matrix = self._get_kernel(self.X_)
normalizer = affinity_matrix.sum(axis=0)
if sparse.isspmatrix(affinity_matrix):
affinity_matrix.data /= np.diag(np.array(normalizer))
else:
affinity_matrix /= normalizer[:, np.newaxis]
return affinity_matrix
class LabelSpreading(BaseLabelPropagation):
"""LabelSpreading model for semi-supervised learning
This model is similar to the basic Label Propgation algorithm,
but uses affinity matrix based on the normalized graph Laplacian
and soft clamping across the labels.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported.
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
clamping factor
max_iter : float
maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelSpreading
>>> label_prop_model = LabelSpreading()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelSpreading(...)
References
----------
Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston,
Bernhard Schoelkopf. Learning with local and global consistency (2004)
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.115.3219
See Also
--------
LabelPropagation : Unregularized graph based semi-supervised learning
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7, alpha=0.2,
max_iter=30, tol=1e-3):
# this one has different base parameters
super(LabelSpreading, self).__init__(kernel=kernel, gamma=gamma,
n_neighbors=n_neighbors,
alpha=alpha, max_iter=max_iter,
tol=tol)
def _build_graph(self):
"""Graph matrix for Label Spreading computes the graph laplacian"""
# compute affinity matrix (or gram matrix)
if self.kernel == 'knn':
self.nn_fit = None
n_samples = self.X_.shape[0]
affinity_matrix = self._get_kernel(self.X_)
laplacian = graph_laplacian(affinity_matrix, normed=True)
laplacian = -laplacian
if sparse.isspmatrix(laplacian):
diag_mask = (laplacian.row == laplacian.col)
laplacian.data[diag_mask] = 0.0
else:
laplacian.flat[::n_samples + 1] = 0.0 # set diag to 0.0
return laplacian
| bsd-3-clause |
solin319/incubator-mxnet | example/gan/dcgan.py | 24 | 10758 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import mxnet as mx
import numpy as np
from sklearn.datasets import fetch_mldata
from matplotlib import pyplot as plt
import logging
import cv2
from datetime import datetime
def make_dcgan_sym(ngf, ndf, nc, no_bias=True, fix_gamma=True, eps=1e-5 + 1e-12):
BatchNorm = mx.sym.BatchNorm
rand = mx.sym.Variable('rand')
g1 = mx.sym.Deconvolution(rand, name='g1', kernel=(4,4), num_filter=ngf*8, no_bias=no_bias)
gbn1 = BatchNorm(g1, name='gbn1', fix_gamma=fix_gamma, eps=eps)
gact1 = mx.sym.Activation(gbn1, name='gact1', act_type='relu')
g2 = mx.sym.Deconvolution(gact1, name='g2', kernel=(4,4), stride=(2,2), pad=(1,1), num_filter=ngf*4, no_bias=no_bias)
gbn2 = BatchNorm(g2, name='gbn2', fix_gamma=fix_gamma, eps=eps)
gact2 = mx.sym.Activation(gbn2, name='gact2', act_type='relu')
g3 = mx.sym.Deconvolution(gact2, name='g3', kernel=(4,4), stride=(2,2), pad=(1,1), num_filter=ngf*2, no_bias=no_bias)
gbn3 = BatchNorm(g3, name='gbn3', fix_gamma=fix_gamma, eps=eps)
gact3 = mx.sym.Activation(gbn3, name='gact3', act_type='relu')
g4 = mx.sym.Deconvolution(gact3, name='g4', kernel=(4,4), stride=(2,2), pad=(1,1), num_filter=ngf, no_bias=no_bias)
gbn4 = BatchNorm(g4, name='gbn4', fix_gamma=fix_gamma, eps=eps)
gact4 = mx.sym.Activation(gbn4, name='gact4', act_type='relu')
g5 = mx.sym.Deconvolution(gact4, name='g5', kernel=(4,4), stride=(2,2), pad=(1,1), num_filter=nc, no_bias=no_bias)
gout = mx.sym.Activation(g5, name='gact5', act_type='tanh')
data = mx.sym.Variable('data')
label = mx.sym.Variable('label')
d1 = mx.sym.Convolution(data, name='d1', kernel=(4,4), stride=(2,2), pad=(1,1), num_filter=ndf, no_bias=no_bias)
dact1 = mx.sym.LeakyReLU(d1, name='dact1', act_type='leaky', slope=0.2)
d2 = mx.sym.Convolution(dact1, name='d2', kernel=(4,4), stride=(2,2), pad=(1,1), num_filter=ndf*2, no_bias=no_bias)
dbn2 = BatchNorm(d2, name='dbn2', fix_gamma=fix_gamma, eps=eps)
dact2 = mx.sym.LeakyReLU(dbn2, name='dact2', act_type='leaky', slope=0.2)
d3 = mx.sym.Convolution(dact2, name='d3', kernel=(4,4), stride=(2,2), pad=(1,1), num_filter=ndf*4, no_bias=no_bias)
dbn3 = BatchNorm(d3, name='dbn3', fix_gamma=fix_gamma, eps=eps)
dact3 = mx.sym.LeakyReLU(dbn3, name='dact3', act_type='leaky', slope=0.2)
d4 = mx.sym.Convolution(dact3, name='d4', kernel=(4,4), stride=(2,2), pad=(1,1), num_filter=ndf*8, no_bias=no_bias)
dbn4 = BatchNorm(d4, name='dbn4', fix_gamma=fix_gamma, eps=eps)
dact4 = mx.sym.LeakyReLU(dbn4, name='dact4', act_type='leaky', slope=0.2)
d5 = mx.sym.Convolution(dact4, name='d5', kernel=(4,4), num_filter=1, no_bias=no_bias)
d5 = mx.sym.Flatten(d5)
dloss = mx.sym.LogisticRegressionOutput(data=d5, label=label, name='dloss')
return gout, dloss
def get_mnist():
mnist = fetch_mldata('MNIST original')
np.random.seed(1234) # set seed for deterministic ordering
p = np.random.permutation(mnist.data.shape[0])
X = mnist.data[p]
X = X.reshape((70000, 28, 28))
X = np.asarray([cv2.resize(x, (64,64)) for x in X])
X = X.astype(np.float32)/(255.0/2) - 1.0
X = X.reshape((70000, 1, 64, 64))
X = np.tile(X, (1, 3, 1, 1))
X_train = X[:60000]
X_test = X[60000:]
return X_train, X_test
class RandIter(mx.io.DataIter):
def __init__(self, batch_size, ndim):
self.batch_size = batch_size
self.ndim = ndim
self.provide_data = [('rand', (batch_size, ndim, 1, 1))]
self.provide_label = []
def iter_next(self):
return True
def getdata(self):
return [mx.random.normal(0, 1.0, shape=(self.batch_size, self.ndim, 1, 1))]
class ImagenetIter(mx.io.DataIter):
def __init__(self, path, batch_size, data_shape):
self.internal = mx.io.ImageRecordIter(
path_imgrec = path,
data_shape = data_shape,
batch_size = batch_size,
rand_crop = True,
rand_mirror = True,
max_crop_size = 256,
min_crop_size = 192)
self.provide_data = [('data', (batch_size,) + data_shape)]
self.provide_label = []
def reset(self):
self.internal.reset()
def iter_next(self):
return self.internal.iter_next()
def getdata(self):
data = self.internal.getdata()
data = data * (2.0/255.0)
data -= 1
return [data]
def fill_buf(buf, i, img, shape):
n = buf.shape[0]/shape[1]
m = buf.shape[1]/shape[0]
sx = (i%m)*shape[0]
sy = (i/m)*shape[1]
buf[sy:sy+shape[1], sx:sx+shape[0], :] = img
def visual(title, X):
assert len(X.shape) == 4
X = X.transpose((0, 2, 3, 1))
X = np.clip((X+1.0)*(255.0/2.0), 0, 255).astype(np.uint8)
n = np.ceil(np.sqrt(X.shape[0]))
buff = np.zeros((int(n*X.shape[1]), int(n*X.shape[2]), int(X.shape[3])), dtype=np.uint8)
for i, img in enumerate(X):
fill_buf(buff, i, img, X.shape[1:3])
buff = cv2.cvtColor(buff, cv2.COLOR_BGR2RGB)
plt.imshow(buff)
plt.title(title)
plt.show()
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
# =============setting============
dataset = 'mnist'
imgnet_path = './train.rec'
ndf = 64
ngf = 64
nc = 3
batch_size = 64
Z = 100
lr = 0.0002
beta1 = 0.5
ctx = mx.gpu(0)
check_point = False
symG, symD = make_dcgan_sym(ngf, ndf, nc)
#mx.viz.plot_network(symG, shape={'rand': (batch_size, 100, 1, 1)}).view()
#mx.viz.plot_network(symD, shape={'data': (batch_size, nc, 64, 64)}).view()
# ==============data==============
if dataset == 'mnist':
X_train, X_test = get_mnist()
train_iter = mx.io.NDArrayIter(X_train, batch_size=batch_size)
elif dataset == 'imagenet':
train_iter = ImagenetIter(imgnet_path, batch_size, (3, 64, 64))
rand_iter = RandIter(batch_size, Z)
label = mx.nd.zeros((batch_size,), ctx=ctx)
# =============module G=============
modG = mx.mod.Module(symbol=symG, data_names=('rand',), label_names=None, context=ctx)
modG.bind(data_shapes=rand_iter.provide_data)
modG.init_params(initializer=mx.init.Normal(0.02))
modG.init_optimizer(
optimizer='adam',
optimizer_params={
'learning_rate': lr,
'wd': 0.,
'beta1': beta1,
})
mods = [modG]
# =============module D=============
modD = mx.mod.Module(symbol=symD, data_names=('data',), label_names=('label',), context=ctx)
modD.bind(data_shapes=train_iter.provide_data,
label_shapes=[('label', (batch_size,))],
inputs_need_grad=True)
modD.init_params(initializer=mx.init.Normal(0.02))
modD.init_optimizer(
optimizer='adam',
optimizer_params={
'learning_rate': lr,
'wd': 0.,
'beta1': beta1,
})
mods.append(modD)
# ============printing==============
def norm_stat(d):
return mx.nd.norm(d)/np.sqrt(d.size)
mon = mx.mon.Monitor(10, norm_stat, pattern=".*output|d1_backward_data", sort=True)
mon = None
if mon is not None:
for mod in mods:
pass
def facc(label, pred):
pred = pred.ravel()
label = label.ravel()
return ((pred > 0.5) == label).mean()
def fentropy(label, pred):
pred = pred.ravel()
label = label.ravel()
return -(label*np.log(pred+1e-12) + (1.-label)*np.log(1.-pred+1e-12)).mean()
mG = mx.metric.CustomMetric(fentropy)
mD = mx.metric.CustomMetric(fentropy)
mACC = mx.metric.CustomMetric(facc)
print('Training...')
stamp = datetime.now().strftime('%Y_%m_%d-%H_%M')
# =============train===============
for epoch in range(100):
train_iter.reset()
for t, batch in enumerate(train_iter):
rbatch = rand_iter.next()
if mon is not None:
mon.tic()
modG.forward(rbatch, is_train=True)
outG = modG.get_outputs()
# update discriminator on fake
label[:] = 0
modD.forward(mx.io.DataBatch(outG, [label]), is_train=True)
modD.backward()
#modD.update()
gradD = [[grad.copyto(grad.context) for grad in grads] for grads in modD._exec_group.grad_arrays]
modD.update_metric(mD, [label])
modD.update_metric(mACC, [label])
# update discriminator on real
label[:] = 1
batch.label = [label]
modD.forward(batch, is_train=True)
modD.backward()
for gradsr, gradsf in zip(modD._exec_group.grad_arrays, gradD):
for gradr, gradf in zip(gradsr, gradsf):
gradr += gradf
modD.update()
modD.update_metric(mD, [label])
modD.update_metric(mACC, [label])
# update generator
label[:] = 1
modD.forward(mx.io.DataBatch(outG, [label]), is_train=True)
modD.backward()
diffD = modD.get_input_grads()
modG.backward(diffD)
modG.update()
mG.update([label], modD.get_outputs())
if mon is not None:
mon.toc_print()
t += 1
if t % 10 == 0:
print('epoch:', epoch, 'iter:', t, 'metric:', mACC.get(), mG.get(), mD.get())
mACC.reset()
mG.reset()
mD.reset()
visual('gout', outG[0].asnumpy())
diff = diffD[0].asnumpy()
diff = (diff - diff.mean())/diff.std()
visual('diff', diff)
visual('data', batch.data[0].asnumpy())
if check_point:
print('Saving...')
modG.save_params('%s_G_%s-%04d.params'%(dataset, stamp, epoch))
modD.save_params('%s_D_%s-%04d.params'%(dataset, stamp, epoch))
| apache-2.0 |
kaichogami/scikit-learn | examples/ensemble/plot_forest_importances.py | 168 | 1793 | """
=========================================
Feature importances with forests of trees
=========================================
This examples shows the use of forests of trees to evaluate the importance of
features on an artificial classification task. The red bars are the feature
importances of the forest, along with their inter-trees variability.
As expected, the plot suggests that 3 features are informative, while the
remaining are not.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.ensemble import ExtraTreesClassifier
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
n_classes=2,
random_state=0,
shuffle=False)
# Build a forest and compute the feature importances
forest = ExtraTreesClassifier(n_estimators=250,
random_state=0)
forest.fit(X, y)
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(X.shape[1]):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
# Plot the feature importances of the forest
plt.figure()
plt.title("Feature importances")
plt.bar(range(X.shape[1]), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(X.shape[1]), indices)
plt.xlim([-1, X.shape[1]])
plt.show()
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.